From f4b4941fecd2c65ddd0785d6df9ffdba40470b73 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Fri, 8 Aug 2025 20:56:29 +0200 Subject: [PATCH 01/23] Fixed horrible kernel_manager singleton design and cleaned up all circular include chains --- .../guidelines/coding_guidelines_cpp.rst | 632 ++++++++---- .../spatially_structured_networks.rst | 1 - libnestutil/dict_util.h | 2 +- models/CMakeLists.txt | 8 +- models/ac_generator.cpp | 4 +- models/aeif_cond_alpha.cpp | 2 - models/aeif_cond_alpha_astro.cpp | 2 - models/aeif_cond_alpha_multisynapse.cpp | 2 - models/aeif_cond_beta_multisynapse.cpp | 2 - models/aeif_cond_exp.cpp | 2 - models/aeif_psc_alpha.cpp | 2 - models/aeif_psc_delta.cpp | 2 - models/aeif_psc_delta_clopath.cpp | 2 - models/aeif_psc_exp.cpp | 2 - models/amat2_psc_exp.cpp | 2 - models/astrocyte_lr_1994.cpp | 2 - models/bernoulli_synapse.cpp | 3 - models/binary_neuron.h | 1 - models/clopath_synapse.cpp | 3 - models/cm_compartmentcurrents.h | 2 + models/cm_default.cpp | 3 - models/cm_tree.cpp | 3 + models/cm_tree.h | 1 - models/cont_delay_synapse.cpp | 4 - models/cont_delay_synapse.h | 68 ++ models/cont_delay_synapse_impl.h | 109 --- models/correlation_detector.cpp | 4 - models/correlomatrix_detector.cpp | 2 - models/correlospinmatrix_detector.cpp | 2 - models/dc_generator.cpp | 3 - models/diffusion_connection.cpp | 3 - models/eprop_iaf.cpp | 3 - models/eprop_iaf.h | 1 - models/eprop_iaf_adapt.cpp | 3 - models/eprop_iaf_adapt.h | 1 - models/eprop_iaf_adapt_bsshslm_2020.cpp | 3 - models/eprop_iaf_adapt_bsshslm_2020.h | 1 - models/eprop_iaf_bsshslm_2020.cpp | 3 - models/eprop_iaf_bsshslm_2020.h | 1 - models/eprop_iaf_psc_delta.cpp | 3 - models/eprop_iaf_psc_delta.h | 1 - models/eprop_iaf_psc_delta_adapt.cpp | 3 - models/eprop_iaf_psc_delta_adapt.h | 1 - models/eprop_learning_signal_connection.cpp | 1 - ...earning_signal_connection_bsshslm_2020.cpp | 1 - models/eprop_readout.cpp | 2 - models/eprop_readout.h | 1 - models/eprop_readout_bsshslm_2020.cpp | 2 - models/eprop_readout_bsshslm_2020.h | 1 - models/eprop_synapse.cpp | 1 - models/eprop_synapse_bsshslm_2020.cpp | 1 - models/erfc_neuron.cpp | 3 - models/gamma_sup_generator.cpp | 7 +- models/gap_junction.cpp | 3 - models/gauss_rate.cpp | 2 - models/gauss_rate.h | 2 - models/gif_cond_exp.cpp | 2 - models/gif_cond_exp_multisynapse.cpp | 2 - models/gif_pop_psc_exp.cpp | 6 - models/gif_psc_exp.cpp | 7 +- models/gif_psc_exp_multisynapse.cpp | 2 - models/ginzburg_neuron.cpp | 3 - models/glif_cond.cpp | 6 +- models/glif_psc.cpp | 10 - models/glif_psc.h | 6 + models/glif_psc_double_alpha.cpp | 6 +- models/hh_cond_beta_gap_traub.cpp | 2 - models/hh_cond_exp_traub.cpp | 2 - models/hh_psc_alpha.cpp | 3 - models/hh_psc_alpha_clopath.cpp | 3 - models/hh_psc_alpha_gap.cpp | 2 - models/ht_neuron.cpp | 2 - models/ht_synapse.cpp | 3 - models/iaf_bw_2001.cpp | 6 +- models/iaf_bw_2001_exact.cpp | 6 +- models/iaf_chs_2007.cpp | 2 - models/iaf_chxk_2008.cpp | 2 - models/iaf_cond_alpha.cpp | 2 - models/iaf_cond_alpha_mc.cpp | 2 - models/iaf_cond_beta.cpp | 2 - models/iaf_cond_exp.cpp | 2 - models/iaf_cond_exp_sfa_rr.cpp | 2 - models/iaf_psc_alpha.cpp | 3 - models/iaf_psc_alpha_multisynapse.cpp | 2 - models/iaf_psc_alpha_ps.cpp | 2 - models/iaf_psc_delta.cpp | 2 - models/iaf_psc_delta_ps.cpp | 2 - models/iaf_psc_exp.cpp | 3 - models/iaf_psc_exp_htum.cpp | 2 - models/iaf_psc_exp_multisynapse.cpp | 2 - models/iaf_psc_exp_ps.cpp | 2 - models/iaf_psc_exp_ps_lossless.cpp | 2 - models/iaf_tum_2000.cpp | 3 - models/ignore_and_fire.cpp | 4 - models/ignore_and_fire.h | 24 +- models/inhomogeneous_poisson_generator.cpp | 3 - models/izhikevich.cpp | 3 - models/jonke_synapse.cpp | 3 - models/lin_rate.cpp | 4 +- models/lin_rate.h | 3 - models/mat2_psc_exp.cpp | 2 - models/mcculloch_pitts_neuron.cpp | 3 - models/mip_generator.cpp | 3 +- models/multimeter.cpp | 6 +- models/music_cont_in_proxy.cpp | 1 - models/music_cont_out_proxy.cpp | 2 - models/music_event_in_proxy.cpp | 9 +- models/music_event_out_proxy.cpp | 4 +- models/music_message_in_proxy.cpp | 1 - models/music_rate_in_proxy.cpp | 2 - models/music_rate_out_proxy.cpp | 1 - models/noise_generator.cpp | 4 +- models/noise_generator.h | 2 - models/parrot_neuron.cpp | 9 - models/parrot_neuron_ps.cpp | 2 - models/poisson_generator.cpp | 3 +- models/poisson_generator_ps.cpp | 3 +- models/pp_cond_exp_mc_urbanczik.cpp | 3 - models/pp_cond_exp_mc_urbanczik.h | 1 - models/pp_psc_delta.cpp | 2 - models/ppd_sup_generator.cpp | 3 +- models/pulsepacket_generator.cpp | 4 +- models/quantal_stp_synapse.cpp | 4 - models/quantal_stp_synapse.h | 64 ++ models/quantal_stp_synapse_impl.h | 105 -- models/rate_connection_delayed.cpp | 3 - models/rate_connection_instantaneous.cpp | 3 - models/rate_neuron_ipn.h | 435 ++++++++- models/rate_neuron_ipn_impl.h | 479 --------- models/rate_neuron_opn.h | 411 +++++++- models/rate_neuron_opn_impl.h | 452 --------- models/rate_transformer_node.h | 262 +++++ models/rate_transformer_node_impl.h | 319 ------ models/sic_connection.cpp | 3 - models/siegert_neuron.cpp | 4 - models/sigmoid_rate.cpp | 5 +- models/sigmoid_rate.h | 2 - models/sigmoid_rate_gg_1998.cpp | 5 +- models/sigmoid_rate_gg_1998.h | 2 - models/sinusoidal_gamma_generator.cpp | 4 +- models/sinusoidal_gamma_generator.h | 1 - models/sinusoidal_poisson_generator.cpp | 4 +- models/spike_dilutor.cpp | 3 - models/spike_generator.cpp | 3 +- models/spike_recorder.cpp | 5 +- models/spike_train_injector.cpp | 4 +- models/spin_detector.cpp | 11 +- models/static_synapse.cpp | 3 - models/static_synapse.h | 2 + models/static_synapse_hom_w.cpp | 3 - models/stdp_dopamine_synapse.cpp | 3 - models/stdp_facetshw_synapse_hom.cpp | 4 - models/stdp_facetshw_synapse_hom.h | 296 ++++++ models/stdp_facetshw_synapse_hom_impl.h | 339 ------- models/stdp_nn_pre_centered_synapse.cpp | 3 - models/stdp_nn_restr_synapse.cpp | 3 - models/stdp_nn_symm_synapse.cpp | 3 - models/stdp_pl_synapse_hom.cpp | 2 - models/stdp_synapse.cpp | 3 - models/stdp_synapse_hom.cpp | 2 - models/stdp_triplet_synapse.cpp | 3 - models/step_current_generator.cpp | 3 - models/step_rate_generator.cpp | 3 - models/tanh_rate.cpp | 5 +- models/tanh_rate.h | 3 - models/threshold_lin_rate.cpp | 5 +- models/threshold_lin_rate.h | 3 - models/tsodyks2_synapse.cpp | 3 - models/tsodyks_synapse.cpp | 3 - models/tsodyks_synapse_hom.cpp | 4 - models/urbanczik_synapse.cpp | 3 - models/vogels_sprekeler_synapse.cpp | 3 - models/volume_transmitter.cpp | 4 - models/weight_recorder.cpp | 5 +- models/weight_recorder.h | 6 +- nestkernel/CMakeLists.txt | 46 +- nestkernel/archiving_node.cpp | 1 + nestkernel/buffer_resize_log.cpp | 2 + nestkernel/clopath_archiving_node.cpp | 1 + nestkernel/common_properties_hom_w.h | 2 + nestkernel/common_synapse_properties.cpp | 3 +- nestkernel/common_synapse_properties.h | 8 +- nestkernel/conn_builder.cpp | 7 +- nestkernel/conn_builder.h | 17 +- nestkernel/conn_builder_impl.h | 51 - nestkernel/connection.h | 1 - nestkernel/connection_creator.h | 867 +++++++++++++++++ nestkernel/connection_creator_impl.h | 907 ------------------ nestkernel/connection_label.h | 2 +- nestkernel/connection_manager.cpp | 170 ++-- nestkernel/connection_manager.h | 44 +- nestkernel/connection_manager_impl.h | 85 -- nestkernel/connector_base.cpp | 50 + nestkernel/connector_base.h | 44 +- nestkernel/connector_base_impl.h | 67 -- nestkernel/connector_model.cpp | 7 + nestkernel/connector_model.h | 8 +- nestkernel/connector_model_impl.h | 43 +- nestkernel/delay_checker.cpp | 7 + nestkernel/eprop_archiving_node.h | 155 +++ nestkernel/eprop_archiving_node_impl.h | 194 ---- nestkernel/eprop_archiving_node_recurrent.h | 266 +++++ .../eprop_archiving_node_recurrent_impl.h | 303 ------ nestkernel/event.cpp | 2 +- nestkernel/event_delivery_manager.cpp | 32 +- nestkernel/event_delivery_manager.h | 147 ++- nestkernel/event_delivery_manager_impl.h | 181 ---- nestkernel/free_layer.h | 2 +- nestkernel/genericmodel.h | 22 +- nestkernel/genericmodel_impl.h | 52 - nestkernel/io_manager.cpp | 3 +- nestkernel/io_manager.h | 49 +- nestkernel/io_manager_impl.h | 61 -- nestkernel/kernel_manager.cpp | 118 +-- nestkernel/kernel_manager.h | 109 +-- nestkernel/layer.cpp | 2 - nestkernel/layer.h | 173 +--- nestkernel/layer_impl.h | 180 +++- nestkernel/mask.h | 482 ++++++++++ nestkernel/mask_impl.h | 515 ---------- nestkernel/model.cpp | 1 + nestkernel/model_manager.cpp | 5 +- nestkernel/model_manager.h | 97 +- nestkernel/model_manager_impl.h | 126 --- nestkernel/modelrange_manager.cpp | 1 + nestkernel/mpi_manager.cpp | 12 +- nestkernel/mpi_manager.h | 50 +- nestkernel/mpi_manager_impl.h | 94 -- nestkernel/music_event_handler.cpp | 2 + nestkernel/music_manager.cpp | 3 + nestkernel/music_rate_in_handler.cpp | 4 +- nestkernel/nest.cpp | 9 +- nestkernel/nest.h | 22 +- nestkernel/nest_extension_interface.h | 7 - nestkernel/nestmodule.cpp | 13 +- nestkernel/node.cpp | 2 + nestkernel/node.h | 2 - nestkernel/node_collection.cpp | 6 +- nestkernel/node_manager.cpp | 8 +- nestkernel/ntree.h | 495 ++++++++++ nestkernel/ntree_impl.h | 532 ---------- nestkernel/parameter.cpp | 5 +- nestkernel/per_thread_bool_indicator.cpp | 19 +- nestkernel/proxynode.cpp | 1 + nestkernel/random_manager.cpp | 5 +- nestkernel/recording_backend_ascii.cpp | 8 +- nestkernel/recording_backend_memory.cpp | 5 +- nestkernel/recording_backend_mpi.cpp | 3 + nestkernel/recording_backend_sionlib.cpp | 4 +- nestkernel/recording_device.cpp | 5 +- nestkernel/ring_buffer.cpp | 1 + nestkernel/ring_buffer.h | 31 + nestkernel/ring_buffer_impl.h | 58 -- nestkernel/secondary_event.h | 25 + nestkernel/secondary_event_impl.h | 51 - nestkernel/simulation_manager.cpp | 63 +- nestkernel/simulation_manager.h | 24 + nestkernel/slice_ring_buffer.cpp | 3 + nestkernel/slice_ring_buffer.h | 2 +- nestkernel/sonata_connector.cpp | 1 - nestkernel/source_table.cpp | 15 +- nestkernel/sp_manager.cpp | 6 +- nestkernel/sp_manager.h | 12 + nestkernel/sp_manager_impl.h | 52 - nestkernel/sparse_node_array.cpp | 1 - nestkernel/spatial.cpp | 1 - nestkernel/stimulation_backend_mpi.cpp | 4 + nestkernel/stimulation_device.cpp | 1 + nestkernel/stopwatch.h | 91 +- nestkernel/stopwatch_impl.h | 112 --- nestkernel/synaptic_element.cpp | 2 + nestkernel/target_identifier.h | 2 + nestkernel/target_table.cpp | 3 + nestkernel/target_table_devices.cpp | 71 +- nestkernel/target_table_devices.h | 66 +- nestkernel/target_table_devices_impl.h | 138 --- nestkernel/universal_data_logger.h | 442 ++++++++- nestkernel/universal_data_logger_impl.h | 436 --------- nestkernel/urbanczik_archiving_node.h | 96 ++ nestkernel/urbanczik_archiving_node_impl.h | 131 --- nestkernel/vp_manager.cpp | 7 +- nestkernel/vp_manager.h | 133 ++- nestkernel/vp_manager_impl.h | 133 --- 283 files changed, 6368 insertions(+), 7358 deletions(-) delete mode 100644 models/cont_delay_synapse_impl.h delete mode 100644 models/rate_neuron_ipn_impl.h delete mode 100644 models/rate_neuron_opn_impl.h delete mode 100644 models/rate_transformer_node_impl.h delete mode 100644 models/stdp_facetshw_synapse_hom_impl.h delete mode 100644 nestkernel/conn_builder_impl.h delete mode 100644 nestkernel/connection_creator_impl.h delete mode 100644 nestkernel/connection_manager_impl.h create mode 100644 nestkernel/connector_base.cpp delete mode 100644 nestkernel/eprop_archiving_node_impl.h delete mode 100644 nestkernel/eprop_archiving_node_recurrent_impl.h delete mode 100644 nestkernel/event_delivery_manager_impl.h delete mode 100644 nestkernel/genericmodel_impl.h delete mode 100644 nestkernel/io_manager_impl.h delete mode 100644 nestkernel/mask_impl.h delete mode 100644 nestkernel/mpi_manager_impl.h delete mode 100644 nestkernel/ntree_impl.h delete mode 100644 nestkernel/ring_buffer_impl.h delete mode 100644 nestkernel/secondary_event_impl.h delete mode 100644 nestkernel/sp_manager_impl.h delete mode 100644 nestkernel/stopwatch_impl.h delete mode 100644 nestkernel/target_table_devices_impl.h delete mode 100644 nestkernel/universal_data_logger_impl.h delete mode 100644 nestkernel/urbanczik_archiving_node_impl.h delete mode 100644 nestkernel/vp_manager_impl.h diff --git a/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst b/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst index f6dceba360..e906a612b7 100644 --- a/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst +++ b/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst @@ -371,171 +371,175 @@ For example, the ``stopwatch.h`` file could look like: #ifndef STOPWATCH_H #define STOPWATCH_H - // C includes: - #include - // C++ includes: - #include + #include #include + #include + + // Includes from nestkernel: + #include "arraydatum.h" + #include "dictdatum.h" + #include "dictutils.h" + #include "exceptions.h" + #include "kernel_manager.h" + #include "vp_manager.h" namespace nest { - /*********************************************************************** - * Stopwatch * - * Accumulates time between start and stop, and provides * - * the elapsed time with different time units. * - * * - * Partly inspired by com.google.common.base.Stopwatch.java * - * Not thread-safe: - Do not share stopwatches among threads. * - * - Let each thread have its own stopwatch. * - * * - * Usage example: * - * Stopwatch x; * - * x.start(); * - * // ... do computations for 15.34 sec * - * x.stop(); // only pauses stopwatch * - * x.print("Time needed "); // > Time needed 15.34 sec. * - * x.start(); // resumes stopwatch * - * // ... next computations for 11.22 sec * - * x.stop(); * - * x.print("Time needed "); // > Time needed 26,56 sec. * - * x.reset(); // reset to default values * - * x.start(); // starts the stopwatch from 0 * - * // ... computation 5.7 sec * - * x.print("Time "); // > Time 5.7 sec. * - * // ^ intermediate timing without stopping the stopwatch * - * // ... more computations 1.7643 min * - * x.stop(); * - * x.print("Time needed ", Stopwatch::MINUTES, std::cerr); * - * // > Time needed 1,8593 min. (on cerr) * - * // other units and output streams possible * - ***********************************************************************/ - class Stopwatch + #ifdef TIMER_DETAILED + constexpr bool use_detailed_timers = true; + #else + constexpr bool use_detailed_timers = false; + #endif + #ifdef THREADED_TIMERS + constexpr bool use_threaded_timers = true; + #else + constexpr bool use_threaded_timers = false; + #endif + + enum class StopwatchGranularity { - public: - typedef size_t timestamp_t; - typedef size_t timeunit_t; + Normal, // + class Stopwatch; + + /******************************************************************************** + * Stopwatch * + * Accumulates time between start and stop, and provides the elapsed time * + * with different time units. Either runs multi-threaded or only on master. * + * * + * Usage example: * + * Stopwatch< StopwatchGranularity::Normal, StopwatchParallelism::MasterOnly > x; * + * x.start(); * + * // ... do computations for 15.34 sec * + * x.stop(); // only pauses stopwatch * + * x.print("Time needed "); // > Time needed 15.34 sec. * + * x.start(); // resumes stopwatch * + * // ... next computations for 11.22 sec * + * x.stop(); * + * x.print("Time needed "); // > Time needed 26,56 sec. * + * x.reset(); // reset to default values * + * x.start(); // starts the stopwatch from 0 * + * // ... computation 5.7 sec * + * x.print("Time "); // > Time 5.7 sec. * + * // ^ intermediate timing without stopping the stopwatch * + * // ... more computations 1.7643 min * + * x.stop(); * + * x.print("Time needed ", timeunit_t::MINUTES, std::cerr); * + * // > Time needed 1,8593 min. (on cerr) * + * // other units and output streams possible * + ********************************************************************************/ + namespace timers + { + enum class timeunit_t : size_t + { + NANOSEC = 1, + MICROSEC = NANOSEC * 1000, + MILLISEC = MICROSEC * 1000, + SECONDS = MILLISEC * 1000, + MINUTES = SECONDS * 60, + HOURS = MINUTES * 60, + DAYS = HOURS * 24 + }; - /** - * Creates a stopwatch that is not running. - */ - Stopwatch() + /** This class represents a single timer which measures the execution time of a single thread for a given clock type. + * Typical clocks are monotonic wall-time clocks or clocks just measuring cpu time. + */ + template < clockid_t clock_type > + class StopwatchTimer + { + template < StopwatchGranularity, StopwatchParallelism, typename > + friend class nest::Stopwatch; + + public: + typedef size_t timestamp_t; + + //! Creates a stopwatch that is not running. + StopwatchTimer() { reset(); } - /** - * Starts or resumes the stopwatch, if it is not running already. - */ + //! Starts or resumes the stopwatch, if it is not running already. void start(); - /** - * Stops the stopwatch, if it is not stopped already. - */ + //! Stops the stopwatch, if it is not stopped already. void stop(); /** - * Returns, whether the stopwatch is running. - */ - bool isRunning() const; - - /** - * Returns the time elapsed between the start and stop of the - * stopwatch. If it is running, it returns the time from start - * until now. If the stopwatch is run previously, the previous - * runtime is added. If you want only the last measurment, you - * have to reset the timer, before stating the measurment. - * Does not change the running state. - */ - double elapsed( timeunit_t timeunit = SECONDS ) const; - - /** - * Returns the time elapsed between the start and stop of the - * stopwatch. If it is running, it returns the time from start - * until now. If the stopwatch is run previously, the previous - * runtime is added. If you want only the last measurment, you - * have to reset the timer, before stating the measurment. + * Returns the time elapsed between the start and stop of the stopwatch in the given unit. If it is running, it + * returns the time from start until now. If the stopwatch is run previously, the previous runtime is added. If you + * want only the last measurement, you have to reset the timer, before stating the measurement. * Does not change the running state. - * In contrast to Stopwatch::elapsed(), only the timestamp is returned, - * that is the number if microseconds as an integer. */ - timestamp_t elapsed_timestamp() const; + double elapsed( timeunit_t timeunit = timeunit_t::SECONDS ) const; - /** - * Resets the stopwatch. - */ + //! Resets the stopwatch. void reset(); - /** - * This method prints out the currently elapsed time. - */ - void print( const char* msg = "", timeunit_t timeunit = SECONDS, std::ostream& os = std::cout ) const; - - /** - * Convenient method for writing time in seconds - * to some ostream. - */ - friend std::ostream& operator<<( std::ostream& os, const Stopwatch& stopwatch ); + //! This method prints out the currently elapsed time. + void + print( const std::string& msg = "", timeunit_t timeunit = timeunit_t::SECONDS, std::ostream& os = std::cout ) const; private: + //! Returns, whether the stopwatch is running. + bool is_running_() const; + #ifndef DISABLE_TIMING timestamp_t _beg, _end; size_t _prev_elapsed; bool _running; #endif - /** - * Returns current time in microseconds since EPOCH. - */ - static timestamp_t get_timestamp(); + //! Returns current time in microseconds since EPOCH. + static size_t get_current_time(); }; - inline bool - Stopwatch::correct_timeunit( timeunit_t t ) - { - return t == MICROSEC || t == MILLISEC || t == SECONDS || t == MINUTES || t == HOURS || t == DAYS; - } - + template < clockid_t clock_type > inline void - nest::Stopwatch::start() + StopwatchTimer< clock_type >::start() { #ifndef DISABLE_TIMING - if ( not isRunning() ) + if ( not is_running_() ) { - _prev_elapsed += _end - _beg; // store prev. time, if we resume - _end = _beg = get_timestamp(); // invariant: _end >= _beg - _running = true; // we start running + _prev_elapsed += _end - _beg; // store prev. time, if we resume + _end = _beg = get_current_time(); // invariant: _end >= _beg + _running = true; // we start running } #endif } + template < clockid_t clock_type > inline void - nest::Stopwatch::stop() + StopwatchTimer< clock_type >::stop() { #ifndef DISABLE_TIMING - if ( isRunning() ) + if ( is_running_() ) { - _end = get_timestamp(); // invariant: _end >= _beg - _running = false; // we stopped running + _end = get_current_time(); // invariant: _end >= _beg + _running = false; // we stopped running } #endif } + template < clockid_t clock_type > inline bool - nest::Stopwatch::isRunning() const + StopwatchTimer< clock_type >::is_running_() const { #ifndef DISABLE_TIMING return _running; @@ -544,38 +548,31 @@ For example, the ``stopwatch.h`` file could look like: #endif } + template < clockid_t clock_type > inline double - nest::Stopwatch::elapsed( timeunit_t timeunit ) const - { - #ifndef DISABLE_TIMING - assert( correct_timeunit( timeunit ) ); - return 1.0 * elapsed_timestamp() / timeunit; - #else - return 0.0; - #endif - } - - inline nest::Stopwatch::timestamp_t - nest::Stopwatch::elapsed_us() const + StopwatchTimer< clock_type >::elapsed( timeunit_t timeunit ) const { #ifndef DISABLE_TIMING - if ( isRunning() ) + size_t time_elapsed; + if ( is_running_() ) { // get intermediate elapsed time; do not change _end, to be const - return get_timestamp() - _beg + _prev_elapsed; + time_elapsed = get_current_time() - _beg + _prev_elapsed; } else { // stopped before, get time of current measurement + last measurements - return _end - _beg + _prev_elapsed; + time_elapsed = _end - _beg + _prev_elapsed; } + return static_cast< double >( time_elapsed ) / static_cast< double >( timeunit ); #else - return ( timestamp_t ) 0; + return 0.; #endif } + template < clockid_t clock_type > inline void - nest::Stopwatch::reset() + StopwatchTimer< clock_type >::reset() { #ifndef DISABLE_TIMING _beg = 0; // invariant: _end >= _beg @@ -585,33 +582,37 @@ For example, the ``stopwatch.h`` file could look like: #endif } + template < clockid_t clock_type > inline void - nest::Stopwatch::print( const char* msg, timeunit_t timeunit, std::ostream& os ) const + StopwatchTimer< clock_type >::print( const std::string& msg, timeunit_t timeunit, std::ostream& os ) const { #ifndef DISABLE_TIMING - assert( correct_timeunit( timeunit ) ); double e = elapsed( timeunit ); os << msg << e; switch ( timeunit ) { - case MICROSEC: + case timeunit_t::NANOSEC: + os << " nanosec."; + case timeunit_t::MICROSEC: os << " microsec."; break; - case MILLISEC: + case timeunit_t::MILLISEC: os << " millisec."; break; - case SECONDS: + case timeunit_t::SECONDS: os << " sec."; break; - case MINUTES: + case timeunit_t::MINUTES: os << " min."; break; - case HOURS: + case timeunit_t::HOURS: os << " h."; break; - case DAYS: + case timeunit_t::DAYS: os << " days."; break; + default: + throw BadParameter( "Invalid timeunit provided to stopwatch." ); } #ifdef DEBUG os << " (running: " << ( _running ? "true" : "false" ) << ", begin: " << _beg << ", end: " << _end @@ -621,55 +622,336 @@ For example, the ``stopwatch.h`` file could look like: #endif } - inline nest::Stopwatch::timestamp_t - nest::Stopwatch::get_current_time() + template < clockid_t clock_type > + inline size_t + StopwatchTimer< clock_type >::get_current_time() { - // works with: - // * hambach (Linux 2.6.32 x86_64) - // * JuQueen (BG/Q) - // * MacOS 10.9 - struct timeval now; - gettimeofday( &now, ( struct timezone* ) 0 ); - return ( nest::Stopwatch::timestamp_t ) now.tv_usec - + ( nest::Stopwatch::timestamp_t ) now.tv_sec * nest::Stopwatch::SECONDS; + timespec now; + clock_gettime( clock_type, &now ); + return now.tv_nsec + now.tv_sec * static_cast< long >( timeunit_t::SECONDS ); } - } /* namespace timer */ - #endif /* STOPWATCH_H */ + template < clockid_t clock_type > + inline std::ostream& + operator<<( std::ostream& os, const StopwatchTimer< clock_type >& stopwatch ) + { + stopwatch.print( "", timeunit_t::SECONDS, os ); + return os; + } -And the corresponding ``stopwatch_impl.h``: + } // namespace timers -.. code:: cpp - /* - * stopwatch_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. + /** This is the base template for all Stopwatch specializations. * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This template will be specialized in case detailed timers are deactivated or the timer is supposed to be run by + * multiple threads. If the timer should be deactivated, because detailed timers are disabled, the template + * specialization will be empty and optimized away by the compiler. + * This base template only measures a single timer, owned by the master thread, which applies for both detailed and + * regular timers. Detailed, master-only timers that are deactivated when detailed timers are turned off are handled + * by one of the template specializations below. * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . + * The template has three template arguments of which two act as actual parameters that need to be specified for each + * stopwatch instance. The first one "detailed_timer" controls the granularity of the stopwatch, i.e., if the timer is + * considered a normal or detailed timer. The second one "threaded_timer" defines if the timer is supposed to be + * measured by each thread individually. In case a timer is specified as threaded, but threaded timers are turned off + * globally, the stopwatch will run in master-only mode instead. The third template argument is used to enable or + * disable certain template specializations based on compiler flags (i.e., detailed timers and threaded timers). * + * In all cases, both the (monotonic) wall-time and cpu time are measured. */ + template < StopwatchGranularity detailed_timer, StopwatchParallelism threaded_timer, typename = void > + class Stopwatch + { + public: + void + start() + { + #pragma omp master + { + walltime_timer_.start(); + cputime_timer_.start(); + } + } - #include "stopwatch.h" + void + stop() + { + #pragma omp master + { + walltime_timer_.stop(); + cputime_timer_.stop(); + } + } - namespace nest + double + elapsed( timers::timeunit_t timeunit = timers::timeunit_t::SECONDS ) const + { + double elapsed = 0.; + #pragma omp master + { + elapsed = walltime_timer_.elapsed( timeunit ); + }; + return elapsed; + } + + void + reset() + { + #pragma omp master + { + walltime_timer_.reset(); + cputime_timer_.reset(); + } + } + + void + print( const std::string& msg = "", + timers::timeunit_t timeunit = timers::timeunit_t::SECONDS, + std::ostream& os = std::cout ) const + { + #pragma omp master + walltime_timer_.print( msg, timeunit, os ); + } + + void + get_status( DictionaryDatum& d, const Name& walltime_name, const Name& cputime_name ) const + { + def< double >( d, walltime_name, walltime_timer_.elapsed() ); + def< double >( d, cputime_name, cputime_timer_.elapsed() ); + } + + private: + bool + is_running_() const + { + bool is_running_ = false; + #pragma omp master + { + is_running_ = walltime_timer_.is_running_(); + }; + return is_running_; + } + + // We use a monotonic timer to make sure the stopwatch is not influenced by time jumps (e.g. summer/winter time). + timers::StopwatchTimer< CLOCK_MONOTONIC > walltime_timer_; + timers::StopwatchTimer< CLOCK_THREAD_CPUTIME_ID > cputime_timer_; + }; + + //! Stopwatch template specialization for detailed, master-only timer instances if detailed timers are deactivated. + template <> + class Stopwatch< StopwatchGranularity::Detailed, + StopwatchParallelism::MasterOnly, + std::enable_if< not use_detailed_timers > > { - std::ostream& operator<<( std::ostream& os, const Stopwatch& stopwatch ) + public: + void + start() + { + } + void + stop() + { + } + double + elapsed( timers::timeunit_t = timers::timeunit_t::SECONDS ) const + { + return 0; + } + void + reset() + { + } + void + print( const std::string& = "", timers::timeunit_t = timers::timeunit_t::SECONDS, std::ostream& = std::cout ) const + { + } + void + get_status( DictionaryDatum&, const Name&, const Name& ) const + { + } + + private: + bool + is_running_() const + { + return false; + } + }; + + //! Stopwatch template specialization for detailed, threaded timer instances if detailed timers are deactivated. + template < StopwatchGranularity detailed_timer > + class Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Detailed and not use_detailed_timers ) > > { - stopwatch.print( "", Stopwatch::SECONDS, os ); - return os; + public: + void + start() + { + } + void + stop() + { + } + double + elapsed( timers::timeunit_t = timers::timeunit_t::SECONDS ) const + { + return 0; + } + void + reset() + { + } + void + print( const std::string& = "", timers::timeunit_t = timers::timeunit_t::SECONDS, std::ostream& = std::cout ) const + { + } + void + get_status( DictionaryDatum&, const Name&, const Name& ) const + { + } + + private: + bool + is_running_() const + { + return false; + } + }; + + /** Stopwatch template specialization for threaded timer instances if the timer is a detailed one and detailed timers + * are activated or the timer is not a detailed one in the first place. + */ + template < StopwatchGranularity detailed_timer > + class Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > > + { + public: + void start(); + + void stop(); + + double elapsed( timers::timeunit_t timeunit = timers::timeunit_t::SECONDS ) const; + + void reset(); + + void print( const std::string& msg = "", + timers::timeunit_t timeunit = timers::timeunit_t::SECONDS, + std::ostream& os = std::cout ) const; + + void + get_status( DictionaryDatum& d, const Name& walltime_name, const Name& cputime_name ) const + { + std::vector< double > wall_times( walltime_timers_.size() ); + std::transform( walltime_timers_.begin(), + walltime_timers_.end(), + wall_times.begin(), + []( const timers::StopwatchTimer< CLOCK_MONOTONIC >& timer ) { return timer.elapsed(); } ); + def< ArrayDatum >( d, walltime_name, ArrayDatum( wall_times ) ); + + std::vector< double > cpu_times( cputime_timers_.size() ); + std::transform( cputime_timers_.begin(), + cputime_timers_.end(), + cpu_times.begin(), + []( const timers::StopwatchTimer< CLOCK_THREAD_CPUTIME_ID >& timer ) { return timer.elapsed(); } ); + def< ArrayDatum >( d, cputime_name, ArrayDatum( cpu_times ) ); + } + + private: + bool is_running_() const; + + // We use a monotonic timer to make sure the stopwatch is not influenced by time jumps (e.g. summer/winter time). + std::vector< timers::StopwatchTimer< CLOCK_MONOTONIC > > walltime_timers_; + std::vector< timers::StopwatchTimer< CLOCK_THREAD_CPUTIME_ID > > cputime_timers_; + }; + + template < StopwatchGranularity detailed_timer > + void + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() + { + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].start(); + cputime_timers_[ kernel().vp_manager.get_thread_id() ].start(); } + + template < StopwatchGranularity detailed_timer > + void + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() + { + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); + cputime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); + } + + template < StopwatchGranularity detailed_timer > + bool + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const + { + kernel().vp_manager.assert_thread_parallel(); + + return walltime_timers_[ kernel().vp_manager.get_thread_id() ].is_running_(); + } + + template < StopwatchGranularity detailed_timer > + double + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t + timeunit ) const + { + kernel().vp_manager.assert_thread_parallel(); + + return walltime_timers_[ kernel().vp_manager.get_thread_id() ].elapsed( timeunit ); } + + template < StopwatchGranularity detailed_timer > + void + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::print( const std::string& msg, + timers::timeunit_t timeunit, + std::ostream& os ) const + { + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].print( msg, timeunit, os ); + } + + template < StopwatchGranularity detailed_timer > + void + Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() + { + kernel().vp_manager.assert_single_threaded(); + + const size_t num_threads = kernel().vp_manager.get_num_threads(); + walltime_timers_.resize( num_threads ); + cputime_timers_.resize( num_threads ); + for ( size_t i = 0; i < num_threads; ++i ) + { + walltime_timers_[ i ].reset(); + cputime_timers_[ i ].reset(); + } + } + + } /* namespace nest */ + #endif /* STOPWATCH_H */ diff --git a/doc/htmldoc/networks/spatially_structured_networks.rst b/doc/htmldoc/networks/spatially_structured_networks.rst index c982239e53..198378b9c9 100644 --- a/doc/htmldoc/networks/spatially_structured_networks.rst +++ b/doc/htmldoc/networks/spatially_structured_networks.rst @@ -1516,7 +1516,6 @@ files for the ``Mask`` parent class: .. code:: c #include "mask.h" - #include "mask_impl.h" The ``Mask`` class has a few methods that must be overridden: diff --git a/libnestutil/dict_util.h b/libnestutil/dict_util.h index eeb954b5f9..93582046fe 100644 --- a/libnestutil/dict_util.h +++ b/libnestutil/dict_util.h @@ -25,8 +25,8 @@ // Includes from nestkernel: #include "kernel_manager.h" +#include "nest.h" #include "nest_datums.h" -#include "vp_manager_impl.h" // Includes from sli: #include "dictdatum.h" diff --git a/models/CMakeLists.txt b/models/CMakeLists.txt index 4b861b13f3..3746c98cf0 100644 --- a/models/CMakeLists.txt +++ b/models/CMakeLists.txt @@ -23,10 +23,10 @@ set(models_sources weight_recorder.h weight_recorder.cpp # Required by CommonSynapseProperties cm_compartmentcurrents.h cm_compartmentcurrents.cpp cm_tree.h cm_tree.cpp - rate_neuron_ipn.h rate_neuron_ipn_impl.h - rate_neuron_opn.h rate_neuron_opn_impl.h - rate_transformer_node.h rate_transformer_node_impl.h - weight_optimizer.h weight_optimizer.cpp + rate_neuron_ipn.h + rate_neuron_opn.h + rate_transformer_node.h + weight_optimizer.h weight_optimizer.cpp ${MODELS_SOURCES_GENERATED} ) diff --git a/models/ac_generator.cpp b/models/ac_generator.cpp index d7c5ea7751..fe9d069e01 100644 --- a/models/ac_generator.cpp +++ b/models/ac_generator.cpp @@ -30,10 +30,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/aeif_cond_alpha.cpp b/models/aeif_cond_alpha.cpp index dc8298bc3e..94d8b19c9e 100644 --- a/models/aeif_cond_alpha.cpp +++ b/models/aeif_cond_alpha.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_cond_alpha_astro.cpp b/models/aeif_cond_alpha_astro.cpp index 9b073b5487..f6da7c1c6e 100644 --- a/models/aeif_cond_alpha_astro.cpp +++ b/models/aeif_cond_alpha_astro.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_cond_alpha_multisynapse.cpp b/models/aeif_cond_alpha_multisynapse.cpp index f027713973..64c2707926 100644 --- a/models/aeif_cond_alpha_multisynapse.cpp +++ b/models/aeif_cond_alpha_multisynapse.cpp @@ -34,8 +34,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/aeif_cond_beta_multisynapse.cpp b/models/aeif_cond_beta_multisynapse.cpp index 5ef7e10717..20646dc575 100644 --- a/models/aeif_cond_beta_multisynapse.cpp +++ b/models/aeif_cond_beta_multisynapse.cpp @@ -35,8 +35,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/aeif_cond_exp.cpp b/models/aeif_cond_exp.cpp index cb0eb86d0c..5fdcc39845 100644 --- a/models/aeif_cond_exp.cpp +++ b/models/aeif_cond_exp.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_psc_alpha.cpp b/models/aeif_psc_alpha.cpp index b4a1b9c86e..8a60a66f2d 100644 --- a/models/aeif_psc_alpha.cpp +++ b/models/aeif_psc_alpha.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_psc_delta.cpp b/models/aeif_psc_delta.cpp index 7cafe3ad64..c5762b8461 100644 --- a/models/aeif_psc_delta.cpp +++ b/models/aeif_psc_delta.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_psc_delta_clopath.cpp b/models/aeif_psc_delta_clopath.cpp index c45158ee77..3ef0c5747c 100644 --- a/models/aeif_psc_delta_clopath.cpp +++ b/models/aeif_psc_delta_clopath.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/aeif_psc_exp.cpp b/models/aeif_psc_exp.cpp index 6b8aec869c..3ea33218ef 100644 --- a/models/aeif_psc_exp.cpp +++ b/models/aeif_psc_exp.cpp @@ -37,9 +37,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "nest_names.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/amat2_psc_exp.cpp b/models/amat2_psc_exp.cpp index 06bce3bd22..cda1eab17c 100644 --- a/models/amat2_psc_exp.cpp +++ b/models/amat2_psc_exp.cpp @@ -30,8 +30,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/astrocyte_lr_1994.cpp b/models/astrocyte_lr_1994.cpp index cd32bf5bc5..d5fb3738e1 100644 --- a/models/astrocyte_lr_1994.cpp +++ b/models/astrocyte_lr_1994.cpp @@ -37,8 +37,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/bernoulli_synapse.cpp b/models/bernoulli_synapse.cpp index 95174cf2ef..06f354f196 100644 --- a/models/bernoulli_synapse.cpp +++ b/models/bernoulli_synapse.cpp @@ -22,9 +22,6 @@ #include "bernoulli_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_bernoulli_synapse( const std::string& name ) { diff --git a/models/binary_neuron.h b/models/binary_neuron.h index d3854d650e..d9afac52a7 100644 --- a/models/binary_neuron.h +++ b/models/binary_neuron.h @@ -35,7 +35,6 @@ #include "archiving_node.h" #include "connection.h" #include "event.h" -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" #include "nest_timeconverter.h" diff --git a/models/clopath_synapse.cpp b/models/clopath_synapse.cpp index 73a2cbd1fc..07af3f894d 100644 --- a/models/clopath_synapse.cpp +++ b/models/clopath_synapse.cpp @@ -22,9 +22,6 @@ #include "clopath_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_clopath_synapse( const std::string& name ) { diff --git a/models/cm_compartmentcurrents.h b/models/cm_compartmentcurrents.h index 87135ffe07..8153f1b290 100644 --- a/models/cm_compartmentcurrents.h +++ b/models/cm_compartmentcurrents.h @@ -24,6 +24,8 @@ #include +#include "logging.h" +#include "logging_manager.h" #include "ring_buffer.h" namespace nest diff --git a/models/cm_default.cpp b/models/cm_default.cpp index 37b714929b..d85159a1d4 100644 --- a/models/cm_default.cpp +++ b/models/cm_default.cpp @@ -22,9 +22,6 @@ #include "cm_default.h" -// Includes from nestkernel: -#include "nest_impl.h" - namespace nest { void diff --git a/models/cm_tree.cpp b/models/cm_tree.cpp index 52121ca2fe..dc85163687 100644 --- a/models/cm_tree.cpp +++ b/models/cm_tree.cpp @@ -21,6 +21,9 @@ */ #include "cm_tree.h" +#include "logging.h" +#include "logging_manager.h" + nest::Compartment::Compartment( const long compartment_index, const long parent_index ) : xx_( 0.0 ) diff --git a/models/cm_tree.h b/models/cm_tree.h index 6f5898729e..b4884eb5d2 100644 --- a/models/cm_tree.h +++ b/models/cm_tree.h @@ -38,7 +38,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/cont_delay_synapse.cpp b/models/cont_delay_synapse.cpp index a0a3e7afb7..6d37a6ee94 100644 --- a/models/cont_delay_synapse.cpp +++ b/models/cont_delay_synapse.cpp @@ -21,10 +21,6 @@ */ #include "cont_delay_synapse.h" -#include "cont_delay_synapse_impl.h" - -// Includes from nestkernel: -#include "nest_impl.h" void nest::register_cont_delay_synapse( const std::string& name ) diff --git a/models/cont_delay_synapse.h b/models/cont_delay_synapse.h index a5759c2622..057a69dcc8 100644 --- a/models/cont_delay_synapse.h +++ b/models/cont_delay_synapse.h @@ -248,6 +248,74 @@ cont_delay_synapse< targetidentifierT >::send( Event& e, size_t t, const CommonS template < typename targetidentifierT > constexpr ConnectionModelProperties cont_delay_synapse< targetidentifierT >::properties; +template < typename targetidentifierT > +cont_delay_synapse< targetidentifierT >::cont_delay_synapse() + : ConnectionBase() + , weight_( 1.0 ) + , delay_offset_( 0.0 ) +{ +} + +template < typename targetidentifierT > +void +cont_delay_synapse< targetidentifierT >::get_status( DictionaryDatum& d ) const +{ + ConnectionBase::get_status( d ); + + def< double >( d, names::weight, weight_ ); + def< double >( d, names::delay, Time( Time::step( get_delay_steps() ) ).get_ms() - delay_offset_ ); + def< long >( d, names::size_of, sizeof( *this ) ); +} + +template < typename targetidentifierT > +void +cont_delay_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) +{ + ConnectionBase::set_status( d, cm ); + + updateValue< double >( d, names::weight, weight_ ); + + // set delay if mentioned + double delay; + + if ( updateValue< double >( d, names::delay, delay ) ) + { + + const double h = Time::get_resolution().get_ms(); + + double int_delay; + const double frac_delay = std::modf( delay / h, &int_delay ); + + if ( frac_delay == 0 ) + { + kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); + set_delay_steps( Time::delay_ms_to_steps( delay ) ); + delay_offset_ = 0.0; + } + else + { + const long lowerbound = static_cast< long >( int_delay ); + kernel().connection_manager.get_delay_checker().assert_two_valid_delays_steps( lowerbound, lowerbound + 1 ); + set_delay_steps( lowerbound + 1 ); + delay_offset_ = h * ( 1.0 - frac_delay ); + } + } +} + +template < typename targetidentifierT > +void +cont_delay_synapse< targetidentifierT >::check_synapse_params( const DictionaryDatum& syn_spec ) const +{ + if ( syn_spec->known( names::delay ) ) + { + LOG( M_WARNING, + "Connect", + "The delay will be rounded to the next multiple of the time step. " + "To use a more precise time delay it needs to be defined within " + "the synapse, e.g. with CopyModel()." ); + } +} + } // of namespace nest #endif // of #ifndef CONT_DELAY_SYNAPSE_H diff --git a/models/cont_delay_synapse_impl.h b/models/cont_delay_synapse_impl.h deleted file mode 100644 index 4a029b82a8..0000000000 --- a/models/cont_delay_synapse_impl.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * cont_delay_synapse_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef CONT_DELAY_SYNAPSE_IMPL_H -#define CONT_DELAY_SYNAPSE_IMPL_H - -#include "cont_delay_synapse.h" - -// Includes from nestkernel: -#include "common_synapse_properties.h" -#include "connector_model.h" -#include "event.h" - -// Includes from sli: -#include "dictdatum.h" - -namespace nest -{ - -template < typename targetidentifierT > -cont_delay_synapse< targetidentifierT >::cont_delay_synapse() - : ConnectionBase() - , weight_( 1.0 ) - , delay_offset_( 0.0 ) -{ -} - -template < typename targetidentifierT > -void -cont_delay_synapse< targetidentifierT >::get_status( DictionaryDatum& d ) const -{ - ConnectionBase::get_status( d ); - - def< double >( d, names::weight, weight_ ); - def< double >( d, names::delay, Time( Time::step( get_delay_steps() ) ).get_ms() - delay_offset_ ); - def< long >( d, names::size_of, sizeof( *this ) ); -} - -template < typename targetidentifierT > -void -cont_delay_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) -{ - ConnectionBase::set_status( d, cm ); - - updateValue< double >( d, names::weight, weight_ ); - - // set delay if mentioned - double delay; - - if ( updateValue< double >( d, names::delay, delay ) ) - { - - const double h = Time::get_resolution().get_ms(); - - double int_delay; - const double frac_delay = std::modf( delay / h, &int_delay ); - - if ( frac_delay == 0 ) - { - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); - set_delay_steps( Time::delay_ms_to_steps( delay ) ); - delay_offset_ = 0.0; - } - else - { - const long lowerbound = static_cast< long >( int_delay ); - kernel().connection_manager.get_delay_checker().assert_two_valid_delays_steps( lowerbound, lowerbound + 1 ); - set_delay_steps( lowerbound + 1 ); - delay_offset_ = h * ( 1.0 - frac_delay ); - } - } -} - -template < typename targetidentifierT > -void -cont_delay_synapse< targetidentifierT >::check_synapse_params( const DictionaryDatum& syn_spec ) const -{ - if ( syn_spec->known( names::delay ) ) - { - LOG( M_WARNING, - "Connect", - "The delay will be rounded to the next multiple of the time step. " - "To use a more precise time delay it needs to be defined within " - "the synapse, e.g. with CopyModel()." ); - } -} - -} // of namespace nest - -#endif // #ifndef CONT_DELAY_SYNAPSE_IMPL_H diff --git a/models/correlation_detector.cpp b/models/correlation_detector.cpp index 3d7b09b8ae..9846ffac50 100644 --- a/models/correlation_detector.cpp +++ b/models/correlation_detector.cpp @@ -31,10 +31,6 @@ #include "dict_util.h" #include "logging.h" -// Includes from nestkernel: -#include "model_manager_impl.h" -#include "nest_impl.h" - // Includes from sli: #include "arraydatum.h" #include "dict.h" diff --git a/models/correlomatrix_detector.cpp b/models/correlomatrix_detector.cpp index bab3a03043..7fb08887c7 100644 --- a/models/correlomatrix_detector.cpp +++ b/models/correlomatrix_detector.cpp @@ -33,8 +33,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/correlospinmatrix_detector.cpp b/models/correlospinmatrix_detector.cpp index ec270da576..d49d82c0c3 100644 --- a/models/correlospinmatrix_detector.cpp +++ b/models/correlospinmatrix_detector.cpp @@ -33,8 +33,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/dc_generator.cpp b/models/dc_generator.cpp index 7c5389914f..606a7e2355 100644 --- a/models/dc_generator.cpp +++ b/models/dc_generator.cpp @@ -23,10 +23,7 @@ #include "dc_generator.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from libnestutil: #include "dict_util.h" diff --git a/models/diffusion_connection.cpp b/models/diffusion_connection.cpp index bd23f5dfe1..9bfd6f6449 100644 --- a/models/diffusion_connection.cpp +++ b/models/diffusion_connection.cpp @@ -22,9 +22,6 @@ #include "diffusion_connection.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_diffusion_connection( const std::string& name ) { diff --git a/models/eprop_iaf.cpp b/models/eprop_iaf.cpp index fb336a40f4..adf74577af 100644 --- a/models/eprop_iaf.cpp +++ b/models/eprop_iaf.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf.h b/models/eprop_iaf.h index 225de65333..a9966fd16a 100644 --- a/models/eprop_iaf.h +++ b/models/eprop_iaf.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "eprop_synapse.h" #include "event.h" diff --git a/models/eprop_iaf_adapt.cpp b/models/eprop_iaf_adapt.cpp index 10cb9ff224..7f52ed0554 100644 --- a/models/eprop_iaf_adapt.cpp +++ b/models/eprop_iaf_adapt.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf_adapt.h b/models/eprop_iaf_adapt.h index d404dcbd69..e6f1976fe6 100644 --- a/models/eprop_iaf_adapt.h +++ b/models/eprop_iaf_adapt.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "eprop_synapse.h" #include "event.h" diff --git a/models/eprop_iaf_adapt_bsshslm_2020.cpp b/models/eprop_iaf_adapt_bsshslm_2020.cpp index f37f2203a0..b665212e0c 100644 --- a/models/eprop_iaf_adapt_bsshslm_2020.cpp +++ b/models/eprop_iaf_adapt_bsshslm_2020.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf_adapt_bsshslm_2020.h b/models/eprop_iaf_adapt_bsshslm_2020.h index f8d8009ba2..844ca7cf3c 100644 --- a/models/eprop_iaf_adapt_bsshslm_2020.h +++ b/models/eprop_iaf_adapt_bsshslm_2020.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "event.h" #include "nest_types.h" diff --git a/models/eprop_iaf_bsshslm_2020.cpp b/models/eprop_iaf_bsshslm_2020.cpp index a4c52557ff..2a6a4f9aac 100644 --- a/models/eprop_iaf_bsshslm_2020.cpp +++ b/models/eprop_iaf_bsshslm_2020.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf_bsshslm_2020.h b/models/eprop_iaf_bsshslm_2020.h index 34cc9a575d..1eca5799a7 100644 --- a/models/eprop_iaf_bsshslm_2020.h +++ b/models/eprop_iaf_bsshslm_2020.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "event.h" #include "nest_types.h" diff --git a/models/eprop_iaf_psc_delta.cpp b/models/eprop_iaf_psc_delta.cpp index a8b13ac4e4..bcc83788b5 100644 --- a/models/eprop_iaf_psc_delta.cpp +++ b/models/eprop_iaf_psc_delta.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf_psc_delta.h b/models/eprop_iaf_psc_delta.h index c36066a984..60a7ee4235 100644 --- a/models/eprop_iaf_psc_delta.h +++ b/models/eprop_iaf_psc_delta.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "eprop_synapse.h" #include "event.h" diff --git a/models/eprop_iaf_psc_delta_adapt.cpp b/models/eprop_iaf_psc_delta_adapt.cpp index 9ffe2a8b69..f720140773 100644 --- a/models/eprop_iaf_psc_delta_adapt.cpp +++ b/models/eprop_iaf_psc_delta_adapt.cpp @@ -31,11 +31,8 @@ #include "numerics.h" // nestkernel -#include "eprop_archiving_node_recurrent_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_iaf_psc_delta_adapt.h b/models/eprop_iaf_psc_delta_adapt.h index 3c0949de3a..50d65bae46 100644 --- a/models/eprop_iaf_psc_delta_adapt.h +++ b/models/eprop_iaf_psc_delta_adapt.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_recurrent.h" #include "eprop_synapse.h" #include "event.h" diff --git a/models/eprop_learning_signal_connection.cpp b/models/eprop_learning_signal_connection.cpp index 7e537ed5c0..38230ac394 100644 --- a/models/eprop_learning_signal_connection.cpp +++ b/models/eprop_learning_signal_connection.cpp @@ -23,7 +23,6 @@ #include "eprop_learning_signal_connection.h" // nestkernel -#include "nest_impl.h" void nest::register_eprop_learning_signal_connection( const std::string& name ) diff --git a/models/eprop_learning_signal_connection_bsshslm_2020.cpp b/models/eprop_learning_signal_connection_bsshslm_2020.cpp index fe29c2a84c..b5477ef5ec 100644 --- a/models/eprop_learning_signal_connection_bsshslm_2020.cpp +++ b/models/eprop_learning_signal_connection_bsshslm_2020.cpp @@ -23,7 +23,6 @@ #include "eprop_learning_signal_connection_bsshslm_2020.h" // nestkernel -#include "nest_impl.h" void nest::register_eprop_learning_signal_connection_bsshslm_2020( const std::string& name ) diff --git a/models/eprop_readout.cpp b/models/eprop_readout.cpp index b2f740d70e..66011a4d60 100644 --- a/models/eprop_readout.cpp +++ b/models/eprop_readout.cpp @@ -33,8 +33,6 @@ // nestkernel #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_readout.h b/models/eprop_readout.h index b96c007542..edaa2ae39b 100644 --- a/models/eprop_readout.h +++ b/models/eprop_readout.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_readout.h" #include "eprop_synapse.h" #include "event.h" diff --git a/models/eprop_readout_bsshslm_2020.cpp b/models/eprop_readout_bsshslm_2020.cpp index 79f059fb59..f2e903aa2b 100644 --- a/models/eprop_readout_bsshslm_2020.cpp +++ b/models/eprop_readout_bsshslm_2020.cpp @@ -33,8 +33,6 @@ // nestkernel #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // sli #include "dictutils.h" diff --git a/models/eprop_readout_bsshslm_2020.h b/models/eprop_readout_bsshslm_2020.h index 36ae6e7134..38e5ac95be 100644 --- a/models/eprop_readout_bsshslm_2020.h +++ b/models/eprop_readout_bsshslm_2020.h @@ -25,7 +25,6 @@ // nestkernel #include "connection.h" -#include "eprop_archiving_node_impl.h" #include "eprop_archiving_node_readout.h" #include "event.h" #include "nest_types.h" diff --git a/models/eprop_synapse.cpp b/models/eprop_synapse.cpp index f167592024..59b8c40fba 100644 --- a/models/eprop_synapse.cpp +++ b/models/eprop_synapse.cpp @@ -23,7 +23,6 @@ #include "eprop_synapse.h" // nestkernel -#include "nest_impl.h" namespace nest { diff --git a/models/eprop_synapse_bsshslm_2020.cpp b/models/eprop_synapse_bsshslm_2020.cpp index ceb1dba4d1..de41bd993f 100644 --- a/models/eprop_synapse_bsshslm_2020.cpp +++ b/models/eprop_synapse_bsshslm_2020.cpp @@ -23,7 +23,6 @@ #include "eprop_synapse_bsshslm_2020.h" // nestkernel -#include "nest_impl.h" namespace nest { diff --git a/models/erfc_neuron.cpp b/models/erfc_neuron.cpp index 891a7cfc1f..ec25c34c05 100644 --- a/models/erfc_neuron.cpp +++ b/models/erfc_neuron.cpp @@ -24,9 +24,6 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" namespace nest { diff --git a/models/gamma_sup_generator.cpp b/models/gamma_sup_generator.cpp index 5817c36f7f..96c59098a4 100644 --- a/models/gamma_sup_generator.cpp +++ b/models/gamma_sup_generator.cpp @@ -22,17 +22,12 @@ #include "gamma_sup_generator.h" -// C++ includes: -#include - // Includes from libnestutil: #include "dict_util.h" -#include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/gap_junction.cpp b/models/gap_junction.cpp index 4dd0886c6c..63fce5f4f4 100644 --- a/models/gap_junction.cpp +++ b/models/gap_junction.cpp @@ -22,9 +22,6 @@ #include "gap_junction.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_gap_junction( const std::string& name ) { diff --git a/models/gauss_rate.cpp b/models/gauss_rate.cpp index 520f0228e9..6a68d8357a 100644 --- a/models/gauss_rate.cpp +++ b/models/gauss_rate.cpp @@ -24,8 +24,6 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" namespace nest { diff --git a/models/gauss_rate.h b/models/gauss_rate.h index 8d51447775..71cdee6f50 100644 --- a/models/gauss_rate.h +++ b/models/gauss_rate.h @@ -28,9 +28,7 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" // Includes from libnestutil: #include "dict_util.h" diff --git a/models/gif_cond_exp.cpp b/models/gif_cond_exp.cpp index 43e7d96d19..9bb5ef88c4 100644 --- a/models/gif_cond_exp.cpp +++ b/models/gif_cond_exp.cpp @@ -35,8 +35,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/gif_cond_exp_multisynapse.cpp b/models/gif_cond_exp_multisynapse.cpp index 5a31ba490f..d3921d7484 100644 --- a/models/gif_cond_exp_multisynapse.cpp +++ b/models/gif_cond_exp_multisynapse.cpp @@ -36,8 +36,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/gif_pop_psc_exp.cpp b/models/gif_pop_psc_exp.cpp index 43299c877f..32e3ffabb0 100644 --- a/models/gif_pop_psc_exp.cpp +++ b/models/gif_pop_psc_exp.cpp @@ -37,12 +37,6 @@ #include "compose.hpp" #include "dict_util.h" -// Includes from nestkernel: -#include "model_manager_impl.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" - - #ifdef HAVE_GSL namespace nest diff --git a/models/gif_psc_exp.cpp b/models/gif_psc_exp.cpp index 0731dc8175..fd6c5ca2eb 100644 --- a/models/gif_psc_exp.cpp +++ b/models/gif_psc_exp.cpp @@ -23,10 +23,10 @@ #include "gif_psc_exp.h" // Includes from nestkernel: +#include "compose.hpp" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" +#include "numerics.h" // Includes from libnestutil: #include "dict_util.h" @@ -36,9 +36,6 @@ #include "dict.h" #include "dictutils.h" -#include "compose.hpp" -#include "numerics.h" - namespace nest { void diff --git a/models/gif_psc_exp_multisynapse.cpp b/models/gif_psc_exp_multisynapse.cpp index cfeca96ea4..8502c0db22 100644 --- a/models/gif_psc_exp_multisynapse.cpp +++ b/models/gif_psc_exp_multisynapse.cpp @@ -30,8 +30,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/ginzburg_neuron.cpp b/models/ginzburg_neuron.cpp index 36b94176cb..0c331630f4 100644 --- a/models/ginzburg_neuron.cpp +++ b/models/ginzburg_neuron.cpp @@ -24,9 +24,6 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" namespace nest { diff --git a/models/glif_cond.cpp b/models/glif_cond.cpp index a1434786b9..3592cc281e 100644 --- a/models/glif_cond.cpp +++ b/models/glif_cond.cpp @@ -36,8 +36,6 @@ #include "exceptions.h" #include "kernel_manager.h" #include "name.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" @@ -802,8 +800,8 @@ nest::glif_cond::handle( CurrentEvent& e ) e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); } -// Do not move this function as inline to h-file. It depends on -// universal_data_logger_impl.h being included here. +// TODO JV +// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. void nest::glif_cond::handle( DataLoggingRequest& e ) { diff --git a/models/glif_psc.cpp b/models/glif_psc.cpp index b60dafc0ec..c0844856c3 100644 --- a/models/glif_psc.cpp +++ b/models/glif_psc.cpp @@ -31,8 +31,6 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" @@ -627,11 +625,3 @@ nest::glif_psc::handle( CurrentEvent& e ) B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); } - -// Do not move this function as inline to h-file. It depends on -// universal_data_logger_impl.h being included here. -void -nest::glif_psc::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); // the logger does this for us -} diff --git a/models/glif_psc.h b/models/glif_psc.h index 63d51b93e8..d07e3a60aa 100644 --- a/models/glif_psc.h +++ b/models/glif_psc.h @@ -461,6 +461,12 @@ glif_psc::set_status( const DictionaryDatum& d ) S_ = stmp; } +void +nest::glif_psc::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); // the logger does this for us +} + } // namespace nest #endif diff --git a/models/glif_psc_double_alpha.cpp b/models/glif_psc_double_alpha.cpp index f79de012d3..c7168ae582 100644 --- a/models/glif_psc_double_alpha.cpp +++ b/models/glif_psc_double_alpha.cpp @@ -31,8 +31,6 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" @@ -698,8 +696,8 @@ nest::glif_psc_double_alpha::handle( CurrentEvent& e ) e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); } -// Do not move this function as inline to h-file. It depends on -// universal_data_logger_impl.h being included here. +// TODO JV +// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. void nest::glif_psc_double_alpha::handle( DataLoggingRequest& e ) { diff --git a/models/hh_cond_beta_gap_traub.cpp b/models/hh_cond_beta_gap_traub.cpp index 7907b051c8..958b3396c7 100644 --- a/models/hh_cond_beta_gap_traub.cpp +++ b/models/hh_cond_beta_gap_traub.cpp @@ -41,8 +41,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/hh_cond_exp_traub.cpp b/models/hh_cond_exp_traub.cpp index 5aa908a15d..86aae90aba 100644 --- a/models/hh_cond_exp_traub.cpp +++ b/models/hh_cond_exp_traub.cpp @@ -38,8 +38,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/hh_psc_alpha.cpp b/models/hh_psc_alpha.cpp index 29e953b810..d80b410956 100644 --- a/models/hh_psc_alpha.cpp +++ b/models/hh_psc_alpha.cpp @@ -33,11 +33,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/hh_psc_alpha_clopath.cpp b/models/hh_psc_alpha_clopath.cpp index e0c35e830f..287f8ec5c6 100644 --- a/models/hh_psc_alpha_clopath.cpp +++ b/models/hh_psc_alpha_clopath.cpp @@ -33,11 +33,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/hh_psc_alpha_gap.cpp b/models/hh_psc_alpha_gap.cpp index b26176784f..10076c0507 100644 --- a/models/hh_psc_alpha_gap.cpp +++ b/models/hh_psc_alpha_gap.cpp @@ -37,8 +37,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/ht_neuron.cpp b/models/ht_neuron.cpp index 047ca61fa3..48bf8575a6 100644 --- a/models/ht_neuron.cpp +++ b/models/ht_neuron.cpp @@ -33,8 +33,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" namespace nest { diff --git a/models/ht_synapse.cpp b/models/ht_synapse.cpp index b92c297138..968e4fb785 100644 --- a/models/ht_synapse.cpp +++ b/models/ht_synapse.cpp @@ -22,9 +22,6 @@ #include "ht_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_ht_synapse( const std::string& name ) { diff --git a/models/iaf_bw_2001.cpp b/models/iaf_bw_2001.cpp index e030579183..34c4ce0974 100644 --- a/models/iaf_bw_2001.cpp +++ b/models/iaf_bw_2001.cpp @@ -33,8 +33,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" @@ -486,8 +484,8 @@ nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) } } -// Do not move this function as inline to h-file. It depends on -// universal_data_logger_impl.h being included here. +// TODO JV +// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. void nest::iaf_bw_2001::handle( DataLoggingRequest& e ) { diff --git a/models/iaf_bw_2001_exact.cpp b/models/iaf_bw_2001_exact.cpp index 3855c27a53..537a21e325 100644 --- a/models/iaf_bw_2001_exact.cpp +++ b/models/iaf_bw_2001_exact.cpp @@ -32,8 +32,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" @@ -517,8 +515,8 @@ nest::iaf_bw_2001_exact::update( Time const& origin, const long from, const long } } -// Do not move this function as inline to h-file. It depends on -// universal_data_logger_impl.h being included here. +// TODO JV +// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. void nest::iaf_bw_2001_exact::handle( DataLoggingRequest& e ) { diff --git a/models/iaf_chs_2007.cpp b/models/iaf_chs_2007.cpp index b5c183d7b0..e947b03bef 100644 --- a/models/iaf_chs_2007.cpp +++ b/models/iaf_chs_2007.cpp @@ -29,8 +29,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/iaf_chxk_2008.cpp b/models/iaf_chxk_2008.cpp index 62559536ae..fb847e3d8e 100644 --- a/models/iaf_chxk_2008.cpp +++ b/models/iaf_chxk_2008.cpp @@ -34,8 +34,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_cond_alpha.cpp b/models/iaf_cond_alpha.cpp index 7cbff968ee..ff30457d89 100644 --- a/models/iaf_cond_alpha.cpp +++ b/models/iaf_cond_alpha.cpp @@ -36,8 +36,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_cond_alpha_mc.cpp b/models/iaf_cond_alpha_mc.cpp index 2b99b81025..711bf921f2 100644 --- a/models/iaf_cond_alpha_mc.cpp +++ b/models/iaf_cond_alpha_mc.cpp @@ -36,8 +36,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/iaf_cond_beta.cpp b/models/iaf_cond_beta.cpp index a0f5d59c2e..8a1a537b69 100644 --- a/models/iaf_cond_beta.cpp +++ b/models/iaf_cond_beta.cpp @@ -37,8 +37,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_cond_exp.cpp b/models/iaf_cond_exp.cpp index c777d686fd..860853d149 100644 --- a/models/iaf_cond_exp.cpp +++ b/models/iaf_cond_exp.cpp @@ -36,8 +36,6 @@ #include "event.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_cond_exp_sfa_rr.cpp b/models/iaf_cond_exp_sfa_rr.cpp index d37b461e06..96c3d66f58 100644 --- a/models/iaf_cond_exp_sfa_rr.cpp +++ b/models/iaf_cond_exp_sfa_rr.cpp @@ -36,8 +36,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_alpha.cpp b/models/iaf_psc_alpha.cpp index 053a30a22c..1ea6ce344e 100644 --- a/models/iaf_psc_alpha.cpp +++ b/models/iaf_psc_alpha.cpp @@ -30,10 +30,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "ring_buffer_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_alpha_multisynapse.cpp b/models/iaf_psc_alpha_multisynapse.cpp index cee69d2889..5a55757292 100644 --- a/models/iaf_psc_alpha_multisynapse.cpp +++ b/models/iaf_psc_alpha_multisynapse.cpp @@ -30,9 +30,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/iaf_psc_alpha_ps.cpp b/models/iaf_psc_alpha_ps.cpp index 885d1f2051..cf040eb540 100644 --- a/models/iaf_psc_alpha_ps.cpp +++ b/models/iaf_psc_alpha_ps.cpp @@ -34,8 +34,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_delta.cpp b/models/iaf_psc_delta.cpp index 4de329ed60..4d405c18e2 100644 --- a/models/iaf_psc_delta.cpp +++ b/models/iaf_psc_delta.cpp @@ -34,8 +34,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_delta_ps.cpp b/models/iaf_psc_delta_ps.cpp index 1da42f3dcc..513715967c 100644 --- a/models/iaf_psc_delta_ps.cpp +++ b/models/iaf_psc_delta_ps.cpp @@ -35,8 +35,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_exp.cpp b/models/iaf_psc_exp.cpp index 48385040db..e6b3a61e55 100644 --- a/models/iaf_psc_exp.cpp +++ b/models/iaf_psc_exp.cpp @@ -32,10 +32,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "ring_buffer_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_exp_htum.cpp b/models/iaf_psc_exp_htum.cpp index dd38c565cd..85d430b38f 100644 --- a/models/iaf_psc_exp_htum.cpp +++ b/models/iaf_psc_exp_htum.cpp @@ -28,9 +28,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_exp_multisynapse.cpp b/models/iaf_psc_exp_multisynapse.cpp index 7c53921537..3d39df7889 100644 --- a/models/iaf_psc_exp_multisynapse.cpp +++ b/models/iaf_psc_exp_multisynapse.cpp @@ -27,9 +27,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/iaf_psc_exp_ps.cpp b/models/iaf_psc_exp_ps.cpp index 71f83acc78..109861db27 100644 --- a/models/iaf_psc_exp_ps.cpp +++ b/models/iaf_psc_exp_ps.cpp @@ -34,8 +34,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/iaf_psc_exp_ps_lossless.cpp b/models/iaf_psc_exp_ps_lossless.cpp index dcbf6435bf..9561747875 100644 --- a/models/iaf_psc_exp_ps_lossless.cpp +++ b/models/iaf_psc_exp_ps_lossless.cpp @@ -27,8 +27,6 @@ // Includes from nestkernel: #include "exceptions.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from libnestutil: #include "dict_util.h" diff --git a/models/iaf_tum_2000.cpp b/models/iaf_tum_2000.cpp index 29ae084558..b1510bf0fe 100644 --- a/models/iaf_tum_2000.cpp +++ b/models/iaf_tum_2000.cpp @@ -31,10 +31,7 @@ #include "exceptions.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "nest_impl.h" #include "numerics.h" -#include "ring_buffer_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/ignore_and_fire.cpp b/models/ignore_and_fire.cpp index 82b61785da..5e6368bebf 100644 --- a/models/ignore_and_fire.cpp +++ b/models/ignore_and_fire.cpp @@ -27,15 +27,11 @@ // Includes from nestkernel: #include "exceptions.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from libnestutil: #include "dict_util.h" #include "iaf_propagator.h" #include "kernel_manager.h" -#include "numerics.h" -#include "ring_buffer_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/ignore_and_fire.h b/models/ignore_and_fire.h index 857e596ce9..b74ea3a7a4 100644 --- a/models/ignore_and_fire.h +++ b/models/ignore_and_fire.h @@ -116,24 +116,24 @@ class ignore_and_fire : public ArchivingNode using Node::handle; using Node::handles_test_event; - size_t send_test_event( Node&, size_t, synindex, bool ); + size_t send_test_event( Node&, size_t, synindex, bool ) override; - void handle( SpikeEvent& ); - void handle( CurrentEvent& ); - void handle( DataLoggingRequest& ); + void handle( SpikeEvent& ) override; + void handle( CurrentEvent& ) override; + void handle( DataLoggingRequest& ) override; - size_t handles_test_event( SpikeEvent&, size_t ); - size_t handles_test_event( CurrentEvent&, size_t ); - size_t handles_test_event( DataLoggingRequest&, size_t ); + size_t handles_test_event( SpikeEvent&, size_t ) override; + size_t handles_test_event( CurrentEvent&, size_t ) override; + size_t handles_test_event( DataLoggingRequest&, size_t ) override; - void get_status( DictionaryDatum& ) const; - void set_status( const DictionaryDatum& ); + void get_status( DictionaryDatum& ) const override; + void set_status( const DictionaryDatum& ) override; private: - void init_buffers_(); - void pre_run_hook(); + void init_buffers_() override; + void pre_run_hook() override; - void update( Time const&, const long, const long ); + void update( Time const&, const long, const long ) override; // The next two classes need to be friends to access the State_ class/member friend class RecordablesMap< ignore_and_fire >; diff --git a/models/inhomogeneous_poisson_generator.cpp b/models/inhomogeneous_poisson_generator.cpp index 8f0b5f58ba..cb5459ea3c 100644 --- a/models/inhomogeneous_poisson_generator.cpp +++ b/models/inhomogeneous_poisson_generator.cpp @@ -29,11 +29,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/izhikevich.cpp b/models/izhikevich.cpp index 9865f74427..71558f17e3 100644 --- a/models/izhikevich.cpp +++ b/models/izhikevich.cpp @@ -30,11 +30,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/jonke_synapse.cpp b/models/jonke_synapse.cpp index 3aea1fb292..532ecf40cb 100644 --- a/models/jonke_synapse.cpp +++ b/models/jonke_synapse.cpp @@ -22,9 +22,6 @@ #include "jonke_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_jonke_synapse( const std::string& name ) { diff --git a/models/lin_rate.cpp b/models/lin_rate.cpp index 2db3fcafd1..2c524a2afd 100644 --- a/models/lin_rate.cpp +++ b/models/lin_rate.cpp @@ -24,8 +24,8 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" + +#include namespace nest { diff --git a/models/lin_rate.h b/models/lin_rate.h index 5482c62aae..e11e025c0e 100644 --- a/models/lin_rate.h +++ b/models/lin_rate.h @@ -25,11 +25,8 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_neuron_opn.h" -#include "rate_neuron_opn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" namespace nest { diff --git a/models/mat2_psc_exp.cpp b/models/mat2_psc_exp.cpp index 104d20af0f..2c42c5a458 100644 --- a/models/mat2_psc_exp.cpp +++ b/models/mat2_psc_exp.cpp @@ -30,8 +30,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/mcculloch_pitts_neuron.cpp b/models/mcculloch_pitts_neuron.cpp index 2f8b15a6d9..9032171e44 100644 --- a/models/mcculloch_pitts_neuron.cpp +++ b/models/mcculloch_pitts_neuron.cpp @@ -24,9 +24,6 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" namespace nest { diff --git a/models/mip_generator.cpp b/models/mip_generator.cpp index bf60516e97..03ce07134e 100644 --- a/models/mip_generator.cpp +++ b/models/mip_generator.cpp @@ -27,10 +27,9 @@ #include "dict_util.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" void nest::register_mip_generator( const std::string& name ) diff --git a/models/multimeter.cpp b/models/multimeter.cpp index 2943c45850..1318dad50a 100644 --- a/models/multimeter.cpp +++ b/models/multimeter.cpp @@ -22,13 +22,9 @@ #include "multimeter.h" -// Includes from nestkernel: -#include "event_delivery_manager_impl.h" -#include "model_manager_impl.h" -#include "nest_impl.h" - // Includes from libnestutil: #include "dict_util.h" +#include "event_delivery_manager.h" namespace nest { diff --git a/models/music_cont_in_proxy.cpp b/models/music_cont_in_proxy.cpp index b0301aa8e4..41dbb9269a 100644 --- a/models/music_cont_in_proxy.cpp +++ b/models/music_cont_in_proxy.cpp @@ -38,7 +38,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "nest_impl.h" void nest::register_music_cont_in_proxy( const std::string& name ) diff --git a/models/music_cont_out_proxy.cpp b/models/music_cont_out_proxy.cpp index a3d57d4f58..1eef0f015c 100644 --- a/models/music_cont_out_proxy.cpp +++ b/models/music_cont_out_proxy.cpp @@ -29,10 +29,8 @@ #include // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" #include "nest_datums.h" -#include "nest_impl.h" // Includes from libnestutil: #include "compose.hpp" diff --git a/models/music_event_in_proxy.cpp b/models/music_event_in_proxy.cpp index 331d195f46..7e182c0dcc 100644 --- a/models/music_event_in_proxy.cpp +++ b/models/music_event_in_proxy.cpp @@ -31,17 +31,10 @@ #include "arraydatum.h" #include "dict.h" #include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" - -// Includes from libnestutil: -#include "compose.hpp" -#include "logging.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" +#include "music_manager.h" void nest::register_music_event_in_proxy( const std::string& name ) diff --git a/models/music_event_out_proxy.cpp b/models/music_event_out_proxy.cpp index 7c30ccd47d..34e6c5094c 100644 --- a/models/music_event_out_proxy.cpp +++ b/models/music_event_out_proxy.cpp @@ -31,7 +31,6 @@ #include "arraydatum.h" #include "dict.h" #include "dictutils.h" -#include "doubledatum.h" #include "integerdatum.h" // Includes from libnestutil: @@ -40,7 +39,8 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "nest_impl.h" +#include "music_manager.h" +#include "nest.h" void nest::register_music_event_out_proxy( const std::string& name ) diff --git a/models/music_message_in_proxy.cpp b/models/music_message_in_proxy.cpp index b0b7aa4f82..fd58c5947b 100644 --- a/models/music_message_in_proxy.cpp +++ b/models/music_message_in_proxy.cpp @@ -38,7 +38,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "nest_impl.h" void nest::register_music_message_in_proxy( const std::string& name ) diff --git a/models/music_rate_in_proxy.cpp b/models/music_rate_in_proxy.cpp index 20f6a23dd9..70028f3f9f 100644 --- a/models/music_rate_in_proxy.cpp +++ b/models/music_rate_in_proxy.cpp @@ -36,9 +36,7 @@ #include "logging.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" void nest::register_music_rate_in_proxy( const std::string& name ) diff --git a/models/music_rate_out_proxy.cpp b/models/music_rate_out_proxy.cpp index 9753421447..e12631a0ef 100644 --- a/models/music_rate_out_proxy.cpp +++ b/models/music_rate_out_proxy.cpp @@ -40,7 +40,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "nest_impl.h" /* ---------------------------------------------------------------- * Default constructors defining default parameters and state diff --git a/models/noise_generator.cpp b/models/noise_generator.cpp index 7bc72369da..6f9fa3795c 100644 --- a/models/noise_generator.cpp +++ b/models/noise_generator.cpp @@ -29,10 +29,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/noise_generator.h b/models/noise_generator.h index 8f73511ae3..e928af53ff 100644 --- a/models/noise_generator.h +++ b/models/noise_generator.h @@ -28,9 +28,7 @@ // Includes from nestkernel: #include "connection.h" -#include "device_node.h" #include "event.h" -#include "nest_timeconverter.h" #include "nest_types.h" #include "random_generators.h" #include "stimulation_device.h" diff --git a/models/parrot_neuron.cpp b/models/parrot_neuron.cpp index 6009758f02..961685dd72 100644 --- a/models/parrot_neuron.cpp +++ b/models/parrot_neuron.cpp @@ -23,17 +23,8 @@ #include "parrot_neuron.h" -// Includes from libnestutil: -#include "numerics.h" - // Includes from nestkernel: -#include "event_delivery_manager_impl.h" -#include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" - -// Includes from sli: -#include "dictutils.h" namespace nest { diff --git a/models/parrot_neuron_ps.cpp b/models/parrot_neuron_ps.cpp index 18140972a8..79eddaae4c 100644 --- a/models/parrot_neuron_ps.cpp +++ b/models/parrot_neuron_ps.cpp @@ -26,10 +26,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/models/poisson_generator.cpp b/models/poisson_generator.cpp index 516a886bca..e1de8bfe10 100644 --- a/models/poisson_generator.cpp +++ b/models/poisson_generator.cpp @@ -23,10 +23,9 @@ #include "poisson_generator.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from libnestutil: #include "dict_util.h" diff --git a/models/poisson_generator_ps.cpp b/models/poisson_generator_ps.cpp index 7a913d15bc..830d558c28 100644 --- a/models/poisson_generator_ps.cpp +++ b/models/poisson_generator_ps.cpp @@ -27,12 +27,11 @@ #include // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from libnestutil: #include "dict_util.h" +#include "event_delivery_manager.h" // Includes from sli: #include "dict.h" diff --git a/models/pp_cond_exp_mc_urbanczik.cpp b/models/pp_cond_exp_mc_urbanczik.cpp index 04d58df15f..fc6c477902 100644 --- a/models/pp_cond_exp_mc_urbanczik.cpp +++ b/models/pp_cond_exp_mc_urbanczik.cpp @@ -35,9 +35,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/pp_cond_exp_mc_urbanczik.h b/models/pp_cond_exp_mc_urbanczik.h index 9a109247bb..8258549a1c 100644 --- a/models/pp_cond_exp_mc_urbanczik.h +++ b/models/pp_cond_exp_mc_urbanczik.h @@ -45,7 +45,6 @@ #include "ring_buffer.h" #include "universal_data_logger.h" #include "urbanczik_archiving_node.h" -#include "urbanczik_archiving_node_impl.h" // Includes from sli: #include "dictdatum.h" diff --git a/models/pp_psc_delta.cpp b/models/pp_psc_delta.cpp index 482fde1dff..96161ecb40 100644 --- a/models/pp_psc_delta.cpp +++ b/models/pp_psc_delta.cpp @@ -36,8 +36,6 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/ppd_sup_generator.cpp b/models/ppd_sup_generator.cpp index 32eb092145..5dddb142fb 100644 --- a/models/ppd_sup_generator.cpp +++ b/models/ppd_sup_generator.cpp @@ -30,9 +30,8 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/pulsepacket_generator.cpp b/models/pulsepacket_generator.cpp index 19b4db16c6..dfe2391dee 100644 --- a/models/pulsepacket_generator.cpp +++ b/models/pulsepacket_generator.cpp @@ -27,13 +27,11 @@ // Includes from libnestutil: #include "dict_util.h" -#include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/quantal_stp_synapse.cpp b/models/quantal_stp_synapse.cpp index 829579605f..d0d4be6946 100644 --- a/models/quantal_stp_synapse.cpp +++ b/models/quantal_stp_synapse.cpp @@ -21,10 +21,6 @@ */ #include "quantal_stp_synapse.h" -#include "quantal_stp_synapse_impl.h" - -// Includes from nestkernel: -#include "nest_impl.h" void nest::register_quantal_stp_synapse( const std::string& name ) diff --git a/models/quantal_stp_synapse.h b/models/quantal_stp_synapse.h index 41b2fbf51d..79f4892a12 100644 --- a/models/quantal_stp_synapse.h +++ b/models/quantal_stp_synapse.h @@ -259,6 +259,70 @@ quantal_stp_synapse< targetidentifierT >::send( Event& e, size_t t, const Common return send_spike; } +template < typename targetidentifierT > +quantal_stp_synapse< targetidentifierT >::quantal_stp_synapse() + : ConnectionBase() + , weight_( 1.0 ) + , U_( 0.5 ) + , u_( U_ ) + , tau_rec_( 800.0 ) + , tau_fac_( 0.0 ) + , n_( 1 ) + , a_( n_ ) + , t_lastspike_( -1.0 ) +{ +} + +template < typename targetidentifierT > +void +quantal_stp_synapse< targetidentifierT >::get_status( DictionaryDatum& d ) const +{ + ConnectionBase::get_status( d ); + def< double >( d, names::weight, weight_ ); + def< double >( d, names::dU, U_ ); + def< double >( d, names::u, u_ ); + def< double >( d, names::tau_rec, tau_rec_ ); + def< double >( d, names::tau_fac, tau_fac_ ); + def< int >( d, names::n, n_ ); + def< int >( d, names::a, a_ ); +} + + +template < typename targetidentifierT > +void +quantal_stp_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) +{ + ConnectionBase::set_status( d, cm ); + updateValue< double >( d, names::weight, weight_ ); + + updateValue< double >( d, names::dU, U_ ); + if ( U_ > 1.0 or U_ < 0.0 ) + { + throw BadProperty( "'U' must be in [0,1]." ); + } + + updateValue< double >( d, names::u, u_ ); + if ( u_ > 1.0 or u_ < 0.0 ) + { + throw BadProperty( "'u' must be in [0,1]." ); + } + + updateValue< double >( d, names::tau_rec, tau_rec_ ); + if ( tau_rec_ <= 0.0 ) + { + throw BadProperty( "'tau_rec' must be > 0." ); + } + + updateValue< double >( d, names::tau_fac, tau_fac_ ); + if ( tau_fac_ < 0.0 ) + { + throw BadProperty( "'tau_fac' must be >= 0." ); + } + + updateValue< long >( d, names::n, n_ ); + updateValue< long >( d, names::a, a_ ); +} + } // namespace #endif // QUANTAL_STP_SYNAPSE_H diff --git a/models/quantal_stp_synapse_impl.h b/models/quantal_stp_synapse_impl.h index 1df262df46..e69de29bb2 100644 --- a/models/quantal_stp_synapse_impl.h +++ b/models/quantal_stp_synapse_impl.h @@ -1,105 +0,0 @@ -/* - * quantal_stp_synapse_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef QUANTAL_STP_SYNAPSE_IMPL_H -#define QUANTAL_STP_SYNAPSE_IMPL_H - -#include "quantal_stp_synapse.h" - -// Includes from nestkernel: -#include "connection.h" -#include "connector_model.h" -#include "nest_names.h" - -// Includes from sli: -#include "dictutils.h" - -namespace nest -{ - -template < typename targetidentifierT > -quantal_stp_synapse< targetidentifierT >::quantal_stp_synapse() - : ConnectionBase() - , weight_( 1.0 ) - , U_( 0.5 ) - , u_( U_ ) - , tau_rec_( 800.0 ) - , tau_fac_( 0.0 ) - , n_( 1 ) - , a_( n_ ) - , t_lastspike_( -1.0 ) -{ -} - -template < typename targetidentifierT > -void -quantal_stp_synapse< targetidentifierT >::get_status( DictionaryDatum& d ) const -{ - ConnectionBase::get_status( d ); - def< double >( d, names::weight, weight_ ); - def< double >( d, names::dU, U_ ); - def< double >( d, names::u, u_ ); - def< double >( d, names::tau_rec, tau_rec_ ); - def< double >( d, names::tau_fac, tau_fac_ ); - def< int >( d, names::n, n_ ); - def< int >( d, names::a, a_ ); -} - - -template < typename targetidentifierT > -void -quantal_stp_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) -{ - ConnectionBase::set_status( d, cm ); - updateValue< double >( d, names::weight, weight_ ); - - updateValue< double >( d, names::dU, U_ ); - if ( U_ > 1.0 or U_ < 0.0 ) - { - throw BadProperty( "'U' must be in [0,1]." ); - } - - updateValue< double >( d, names::u, u_ ); - if ( u_ > 1.0 or u_ < 0.0 ) - { - throw BadProperty( "'u' must be in [0,1]." ); - } - - updateValue< double >( d, names::tau_rec, tau_rec_ ); - if ( tau_rec_ <= 0.0 ) - { - throw BadProperty( "'tau_rec' must be > 0." ); - } - - updateValue< double >( d, names::tau_fac, tau_fac_ ); - if ( tau_fac_ < 0.0 ) - { - throw BadProperty( "'tau_fac' must be >= 0." ); - } - - updateValue< long >( d, names::n, n_ ); - updateValue< long >( d, names::a, a_ ); -} - -} // of namespace nest - -#endif // #ifndef QUANTAL_STP_SYNAPSE_IMPL_H diff --git a/models/rate_connection_delayed.cpp b/models/rate_connection_delayed.cpp index 8e2c986a90..c6373b43f7 100644 --- a/models/rate_connection_delayed.cpp +++ b/models/rate_connection_delayed.cpp @@ -22,9 +22,6 @@ #include "rate_connection_delayed.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_rate_connection_delayed( const std::string& name ) { diff --git a/models/rate_connection_instantaneous.cpp b/models/rate_connection_instantaneous.cpp index 89c88c5bd5..d45bad767c 100644 --- a/models/rate_connection_instantaneous.cpp +++ b/models/rate_connection_instantaneous.cpp @@ -22,9 +22,6 @@ #include "rate_connection_instantaneous.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_rate_connection_instantaneous( const std::string& name ) { diff --git a/models/rate_neuron_ipn.h b/models/rate_neuron_ipn.h index 14c22920ac..5140deb72a 100644 --- a/models/rate_neuron_ipn.h +++ b/models/rate_neuron_ipn.h @@ -27,19 +27,29 @@ #include "config.h" // C++ includes: +#include // in case we need isnan() // fabs #include // Includes from nestkernel: #include "archiving_node.h" #include "connection.h" #include "event.h" -#include "nest_types.h" +#include "exceptions.h" +#include "kernel_manager.h" #include "node.h" #include "random_generators.h" #include "recordables_map.h" #include "ring_buffer.h" #include "universal_data_logger.h" +// Includes from libnestutil: +#include "dict_util.h" +#include "numerics.h" + +// Includes from sli: +#include "dict.h" +#include "dictutils.h" + namespace nest { @@ -385,6 +395,429 @@ rate_neuron_ipn< TNonlinearities >::set_status( const DictionaryDatum& d ) nonlinearities_.set( d, this ); } +/* ---------------------------------------------------------------- + * Recordables map + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +RecordablesMap< rate_neuron_ipn< TNonlinearities > > rate_neuron_ipn< TNonlinearities >::recordablesMap_; + +/* ---------------------------------------------------------------- + * Default constructors defining default parameters and state + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::Parameters_::Parameters_() + : tau_( 10.0 ) // ms + , lambda_( 1.0 ) // ms + , sigma_( 1.0 ) + , mu_( 0.0 ) + , rectify_rate_( 0.0 ) + , linear_summation_( true ) + , rectify_output_( false ) + , mult_coupling_( false ) +{ + recordablesMap_.create(); +} + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::State_::State_() + : rate_( 0.0 ) + , noise_( 0.0 ) +{ +} + +/* ---------------------------------------------------------------- + * Parameter and state extractions and manipulation functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const +{ + def< double >( d, names::tau, tau_ ); + def< double >( d, names::lambda, lambda_ ); + def< double >( d, names::sigma, sigma_ ); + def< double >( d, names::mu, mu_ ); + def< double >( d, names::rectify_rate, rectify_rate_ ); + def< bool >( d, names::linear_summation, linear_summation_ ); + def< bool >( d, names::rectify_output, rectify_output_ ); + def< bool >( d, names::mult_coupling, mult_coupling_ ); + + // Also allow old names (to not break old scripts) + def< double >( d, names::std, sigma_ ); + def< double >( d, names::mean, mu_ ); +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< double >( d, names::tau, tau_, node ); + updateValueParam< double >( d, names::lambda, lambda_, node ); + updateValueParam< double >( d, names::mu, mu_, node ); + updateValueParam< double >( d, names::rectify_rate, rectify_rate_, node ); + updateValueParam< double >( d, names::sigma, sigma_, node ); + updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); + updateValueParam< bool >( d, names::rectify_output, rectify_output_, node ); + updateValueParam< bool >( d, names::mult_coupling, mult_coupling_, node ); + + // Check for old names + if ( updateValueParam< double >( d, names::mean, mu_, node ) ) + { + LOG( M_WARNING, + "rate_neuron_ipn< TNonlinearities >::Parameters_::set", + "The parameter mean has been renamed to mu. Please use the new " + "name from now on." ); + } + + if ( updateValueParam< double >( d, names::std, sigma_, node ) ) + { + LOG( M_WARNING, + "rate_neuron_ipn< TNonlinearities >::Parameters_::set", + "The parameter std has been renamed to sigma. Please use the new " + "name from now on." ); + } + + // Check for invalid parameters + if ( tau_ <= 0 ) + { + throw BadProperty( "Time constant must be > 0." ); + } + if ( lambda_ < 0 ) + { + throw BadProperty( "Passive decay rate must be >= 0." ); + } + if ( sigma_ < 0 ) + { + throw BadProperty( "Noise parameter must not be negative." ); + } + if ( rectify_rate_ < 0 ) + { + throw BadProperty( "Rectifying rate must not be negative." ); + } +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::State_::get( DictionaryDatum& d ) const +{ + def< double >( d, names::rate, rate_ ); // Rate + def< double >( d, names::noise, noise_ ); // Noise +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< double >( d, names::rate, rate_, node ); // Rate +} + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::Buffers_::Buffers_( rate_neuron_ipn< TNonlinearities >& n ) + : logger_( n ) +{ +} + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, rate_neuron_ipn< TNonlinearities >& n ) + : logger_( n ) +{ +} + +/* ---------------------------------------------------------------- + * Default and copy constructor for node + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn() + : ArchivingNode() + , P_() + , S_() + , B_( *this ) +{ + recordablesMap_.create(); + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +template < class TNonlinearities > +rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn( const rate_neuron_ipn& n ) + : ArchivingNode( n ) + , nonlinearities_( n.nonlinearities_ ) + , P_( n.P_ ) + , S_( n.S_ ) + , B_( n.B_, *this ) +{ + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +/* ---------------------------------------------------------------- + * Node initialization functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::init_buffers_() +{ + B_.delayed_rates_ex_.clear(); // includes resize + B_.delayed_rates_in_.clear(); // includes resize + + // resize buffers + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + B_.instant_rates_ex_.resize( buffer_size, 0.0 ); + B_.instant_rates_in_.resize( buffer_size, 0.0 ); + B_.last_y_values.resize( buffer_size, 0.0 ); + B_.random_numbers.resize( buffer_size, numerics::nan ); + + // initialize random numbers + for ( unsigned int i = 0; i < buffer_size; i++ ) + { + B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); + } + + B_.logger_.reset(); // includes resize + ArchivingNode::clear_history(); +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::pre_run_hook() +{ + B_.logger_.init(); // ensures initialization in case mm connected after Simulate + + const double h = Time::get_resolution().get_ms(); + + if ( P_.lambda_ > 0 ) + { + // use stochastic exponential Euler method + V_.P1_ = std::exp( -P_.lambda_ * h / P_.tau_ ); + V_.P2_ = -1.0 / P_.lambda_ * numerics::expm1( -P_.lambda_ * h / P_.tau_ ); + V_.input_noise_factor_ = std::sqrt( -0.5 / P_.lambda_ * numerics::expm1( -2. * P_.lambda_ * h / P_.tau_ ) ); + } + else + { + // use Euler-Maruyama method + V_.P1_ = 1; + V_.P2_ = h / P_.tau_; + V_.input_noise_factor_ = std::sqrt( h / P_.tau_ ); + } +} + +/* ---------------------------------------------------------------- + * Update and event handling functions + */ + +template < class TNonlinearities > +bool +rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, + const long from, + const long to, + const bool called_from_wfr_update ) +{ + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + bool wfr_tol_exceeded = false; + + // allocate memory to store rates to be sent by rate events + std::vector< double > new_rates( buffer_size, 0.0 ); + + for ( long lag = from; lag < to; ++lag ) + { + // store rate + new_rates[ lag ] = S_.rate_; + // get noise + S_.noise_ = P_.sigma_ * B_.random_numbers[ lag ]; + // propagate rate to new time step (exponential integration) + S_.rate_ = V_.P1_ * new_rates[ lag ] + V_.P2_ * P_.mu_ + V_.input_noise_factor_ * S_.noise_; + + double delayed_rates_in = 0; + double delayed_rates_ex = 0; + if ( called_from_wfr_update ) + { + // use get_value_wfr_update to keep values in buffer + delayed_rates_in = B_.delayed_rates_in_.get_value_wfr_update( lag ); + delayed_rates_ex = B_.delayed_rates_ex_.get_value_wfr_update( lag ); + } + else + { + // use get_value to clear values in buffer after reading + delayed_rates_in = B_.delayed_rates_in_.get_value( lag ); + delayed_rates_ex = B_.delayed_rates_ex_.get_value( lag ); + } + double instant_rates_in = B_.instant_rates_in_[ lag ]; + double instant_rates_ex = B_.instant_rates_ex_[ lag ]; + double H_ex = 1.; // valid value for non-multiplicative coupling + double H_in = 1.; // valid value for non-multiplicative coupling + if ( P_.mult_coupling_ ) + { + H_ex = nonlinearities_.mult_coupling_ex( new_rates[ lag ] ); + H_in = nonlinearities_.mult_coupling_in( new_rates[ lag ] ); + } + + if ( P_.linear_summation_ ) + { + // In this case we explicitly need to distinguish the cases of + // multiplicative coupling and non-multiplicative coupling in + // order to compute input( ex + in ) instead of input(ex) + input(in) in + // the non-multiplicative case. + if ( P_.mult_coupling_ ) + { + S_.rate_ += V_.P2_ * H_ex * nonlinearities_.input( delayed_rates_ex + instant_rates_ex ); + S_.rate_ += V_.P2_ * H_in * nonlinearities_.input( delayed_rates_in + instant_rates_in ); + } + else + { + S_.rate_ += + V_.P2_ * nonlinearities_.input( delayed_rates_ex + instant_rates_ex + delayed_rates_in + instant_rates_in ); + } + } + else + { + // In this case multiplicative and non-multiplicative coupling + // can be handled with the same code. + S_.rate_ += V_.P2_ * H_ex * ( delayed_rates_ex + instant_rates_ex ); + S_.rate_ += V_.P2_ * H_in * ( delayed_rates_in + instant_rates_in ); + } + + if ( P_.rectify_output_ and S_.rate_ < P_.rectify_rate_ ) + { + S_.rate_ = P_.rectify_rate_; + } + + if ( called_from_wfr_update ) + { + // check if deviation from last iteration exceeds wfr_tol + wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; + // update last_y_values for next wfr iteration + B_.last_y_values[ lag ] = S_.rate_; + } + else + { + // rate logging + B_.logger_.record_data( origin.get_steps() + lag ); + } + } + + if ( not called_from_wfr_update ) + { + // Send delay-rate-neuron-event. This only happens in the final iteration + // to avoid accumulation in the buffers of the receiving neurons. + DelayedRateConnectionEvent drve; + drve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, drve ); + + // clear last_y_values + std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); + + // modifiy new_rates for rate-neuron-event as proxy for next min_delay + for ( long temp = from; temp < to; ++temp ) + { + new_rates[ temp ] = S_.rate_; + } + + // create new random numbers + B_.random_numbers.resize( buffer_size, numerics::nan ); + for ( unsigned int i = 0; i < buffer_size; i++ ) + { + B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); + } + } + + // Send rate-neuron-event + InstantaneousRateConnectionEvent rve; + rve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, rve ); + + // Reset variables + std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); + std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_in_ ); + + return wfr_tol_exceeded; +} + + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + if ( weight >= 0.0 ) + { + B_.instant_rates_ex_[ i ] += weight * e.get_coeffvalue( it ); + } + else + { + B_.instant_rates_in_[ i ] += weight * e.get_coeffvalue( it ); + } + } + else + { + if ( weight >= 0.0 ) + { + B_.instant_rates_ex_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); + } + else + { + B_.instant_rates_in_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); + } + } + i++; + } +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + if ( weight >= 0.0 ) + { + B_.delayed_rates_ex_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); + } + else + { + B_.delayed_rates_in_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); + } + } + else + { + if ( weight >= 0.0 ) + { + B_.delayed_rates_ex_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); + } + else + { + B_.delayed_rates_in_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); + } + } + ++i; + } +} + +template < class TNonlinearities > +void +rate_neuron_ipn< TNonlinearities >::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); +} + } // namespace #endif /* #ifndef RATE_NEURON_IPN_H */ diff --git a/models/rate_neuron_ipn_impl.h b/models/rate_neuron_ipn_impl.h deleted file mode 100644 index 286c22c0af..0000000000 --- a/models/rate_neuron_ipn_impl.h +++ /dev/null @@ -1,479 +0,0 @@ -/* - * rate_neuron_ipn_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef RATE_NEURON_IPN_IMPL_H -#define RATE_NEURON_IPN_IMPL_H - -#include "rate_neuron_ipn.h" - -// C++ includes: -#include // in case we need isnan() // fabs -#include -#include -#include -#include -#include - -// Includes from libnestutil: -#include "dict_util.h" -#include "numerics.h" - -// Includes from nestkernel: -#include "exceptions.h" -#include "kernel_manager.h" -#include "universal_data_logger_impl.h" - -// Includes from sli: -#include "dict.h" -#include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" - -namespace nest -{ - -/* ---------------------------------------------------------------- - * Recordables map - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -RecordablesMap< rate_neuron_ipn< TNonlinearities > > rate_neuron_ipn< TNonlinearities >::recordablesMap_; - -/* ---------------------------------------------------------------- - * Default constructors defining default parameters and state - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::Parameters_::Parameters_() - : tau_( 10.0 ) // ms - , lambda_( 1.0 ) // ms - , sigma_( 1.0 ) - , mu_( 0.0 ) - , rectify_rate_( 0.0 ) - , linear_summation_( true ) - , rectify_output_( false ) - , mult_coupling_( false ) -{ - recordablesMap_.create(); -} - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::State_::State_() - : rate_( 0.0 ) - , noise_( 0.0 ) -{ -} - -/* ---------------------------------------------------------------- - * Parameter and state extractions and manipulation functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const -{ - def< double >( d, names::tau, tau_ ); - def< double >( d, names::lambda, lambda_ ); - def< double >( d, names::sigma, sigma_ ); - def< double >( d, names::mu, mu_ ); - def< double >( d, names::rectify_rate, rectify_rate_ ); - def< bool >( d, names::linear_summation, linear_summation_ ); - def< bool >( d, names::rectify_output, rectify_output_ ); - def< bool >( d, names::mult_coupling, mult_coupling_ ); - - // Also allow old names (to not break old scripts) - def< double >( d, names::std, sigma_ ); - def< double >( d, names::mean, mu_ ); -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< double >( d, names::tau, tau_, node ); - updateValueParam< double >( d, names::lambda, lambda_, node ); - updateValueParam< double >( d, names::mu, mu_, node ); - updateValueParam< double >( d, names::rectify_rate, rectify_rate_, node ); - updateValueParam< double >( d, names::sigma, sigma_, node ); - updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); - updateValueParam< bool >( d, names::rectify_output, rectify_output_, node ); - updateValueParam< bool >( d, names::mult_coupling, mult_coupling_, node ); - - // Check for old names - if ( updateValueParam< double >( d, names::mean, mu_, node ) ) - { - LOG( M_WARNING, - "rate_neuron_ipn< TNonlinearities >::Parameters_::set", - "The parameter mean has been renamed to mu. Please use the new " - "name from now on." ); - } - - if ( updateValueParam< double >( d, names::std, sigma_, node ) ) - { - LOG( M_WARNING, - "rate_neuron_ipn< TNonlinearities >::Parameters_::set", - "The parameter std has been renamed to sigma. Please use the new " - "name from now on." ); - } - - // Check for invalid parameters - if ( tau_ <= 0 ) - { - throw BadProperty( "Time constant must be > 0." ); - } - if ( lambda_ < 0 ) - { - throw BadProperty( "Passive decay rate must be >= 0." ); - } - if ( sigma_ < 0 ) - { - throw BadProperty( "Noise parameter must not be negative." ); - } - if ( rectify_rate_ < 0 ) - { - throw BadProperty( "Rectifying rate must not be negative." ); - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::State_::get( DictionaryDatum& d ) const -{ - def< double >( d, names::rate, rate_ ); // Rate - def< double >( d, names::noise, noise_ ); // Noise -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< double >( d, names::rate, rate_, node ); // Rate -} - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::Buffers_::Buffers_( rate_neuron_ipn< TNonlinearities >& n ) - : logger_( n ) -{ -} - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, rate_neuron_ipn< TNonlinearities >& n ) - : logger_( n ) -{ -} - -/* ---------------------------------------------------------------- - * Default and copy constructor for node - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn() - : ArchivingNode() - , P_() - , S_() - , B_( *this ) -{ - recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -template < class TNonlinearities > -nest::rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn( const rate_neuron_ipn& n ) - : ArchivingNode( n ) - , nonlinearities_( n.nonlinearities_ ) - , P_( n.P_ ) - , S_( n.S_ ) - , B_( n.B_, *this ) -{ - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -/* ---------------------------------------------------------------- - * Node initialization functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::init_buffers_() -{ - B_.delayed_rates_ex_.clear(); // includes resize - B_.delayed_rates_in_.clear(); // includes resize - - // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - B_.instant_rates_ex_.resize( buffer_size, 0.0 ); - B_.instant_rates_in_.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( buffer_size, 0.0 ); - B_.random_numbers.resize( buffer_size, numerics::nan ); - - // initialize random numbers - for ( unsigned int i = 0; i < buffer_size; i++ ) - { - B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); - } - - B_.logger_.reset(); // includes resize - ArchivingNode::clear_history(); -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::pre_run_hook() -{ - B_.logger_.init(); // ensures initialization in case mm connected after Simulate - - const double h = Time::get_resolution().get_ms(); - - if ( P_.lambda_ > 0 ) - { - // use stochastic exponential Euler method - V_.P1_ = std::exp( -P_.lambda_ * h / P_.tau_ ); - V_.P2_ = -1.0 / P_.lambda_ * numerics::expm1( -P_.lambda_ * h / P_.tau_ ); - V_.input_noise_factor_ = std::sqrt( -0.5 / P_.lambda_ * numerics::expm1( -2. * P_.lambda_ * h / P_.tau_ ) ); - } - else - { - // use Euler-Maruyama method - V_.P1_ = 1; - V_.P2_ = h / P_.tau_; - V_.input_noise_factor_ = std::sqrt( h / P_.tau_ ); - } -} - -/* ---------------------------------------------------------------- - * Update and event handling functions - */ - -template < class TNonlinearities > -bool -nest::rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, - const long from, - const long to, - const bool called_from_wfr_update ) -{ - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); - bool wfr_tol_exceeded = false; - - // allocate memory to store rates to be sent by rate events - std::vector< double > new_rates( buffer_size, 0.0 ); - - for ( long lag = from; lag < to; ++lag ) - { - // store rate - new_rates[ lag ] = S_.rate_; - // get noise - S_.noise_ = P_.sigma_ * B_.random_numbers[ lag ]; - // propagate rate to new time step (exponential integration) - S_.rate_ = V_.P1_ * new_rates[ lag ] + V_.P2_ * P_.mu_ + V_.input_noise_factor_ * S_.noise_; - - double delayed_rates_in = 0; - double delayed_rates_ex = 0; - if ( called_from_wfr_update ) - { - // use get_value_wfr_update to keep values in buffer - delayed_rates_in = B_.delayed_rates_in_.get_value_wfr_update( lag ); - delayed_rates_ex = B_.delayed_rates_ex_.get_value_wfr_update( lag ); - } - else - { - // use get_value to clear values in buffer after reading - delayed_rates_in = B_.delayed_rates_in_.get_value( lag ); - delayed_rates_ex = B_.delayed_rates_ex_.get_value( lag ); - } - double instant_rates_in = B_.instant_rates_in_[ lag ]; - double instant_rates_ex = B_.instant_rates_ex_[ lag ]; - double H_ex = 1.; // valid value for non-multiplicative coupling - double H_in = 1.; // valid value for non-multiplicative coupling - if ( P_.mult_coupling_ ) - { - H_ex = nonlinearities_.mult_coupling_ex( new_rates[ lag ] ); - H_in = nonlinearities_.mult_coupling_in( new_rates[ lag ] ); - } - - if ( P_.linear_summation_ ) - { - // In this case we explicitly need to distinguish the cases of - // multiplicative coupling and non-multiplicative coupling in - // order to compute input( ex + in ) instead of input(ex) + input(in) in - // the non-multiplicative case. - if ( P_.mult_coupling_ ) - { - S_.rate_ += V_.P2_ * H_ex * nonlinearities_.input( delayed_rates_ex + instant_rates_ex ); - S_.rate_ += V_.P2_ * H_in * nonlinearities_.input( delayed_rates_in + instant_rates_in ); - } - else - { - S_.rate_ += - V_.P2_ * nonlinearities_.input( delayed_rates_ex + instant_rates_ex + delayed_rates_in + instant_rates_in ); - } - } - else - { - // In this case multiplicative and non-multiplicative coupling - // can be handled with the same code. - S_.rate_ += V_.P2_ * H_ex * ( delayed_rates_ex + instant_rates_ex ); - S_.rate_ += V_.P2_ * H_in * ( delayed_rates_in + instant_rates_in ); - } - - if ( P_.rectify_output_ and S_.rate_ < P_.rectify_rate_ ) - { - S_.rate_ = P_.rectify_rate_; - } - - if ( called_from_wfr_update ) - { - // check if deviation from last iteration exceeds wfr_tol - wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; - // update last_y_values for next wfr iteration - B_.last_y_values[ lag ] = S_.rate_; - } - else - { - // rate logging - B_.logger_.record_data( origin.get_steps() + lag ); - } - } - - if ( not called_from_wfr_update ) - { - // Send delay-rate-neuron-event. This only happens in the final iteration - // to avoid accumulation in the buffers of the receiving neurons. - DelayedRateConnectionEvent drve; - drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); - - // clear last_y_values - std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); - - // modifiy new_rates for rate-neuron-event as proxy for next min_delay - for ( long temp = from; temp < to; ++temp ) - { - new_rates[ temp ] = S_.rate_; - } - - // create new random numbers - B_.random_numbers.resize( buffer_size, numerics::nan ); - for ( unsigned int i = 0; i < buffer_size; i++ ) - { - B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); - } - } - - // Send rate-neuron-event - InstantaneousRateConnectionEvent rve; - rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); - - // Reset variables - std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); - std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_in_ ); - - return wfr_tol_exceeded; -} - - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - if ( weight >= 0.0 ) - { - B_.instant_rates_ex_[ i ] += weight * e.get_coeffvalue( it ); - } - else - { - B_.instant_rates_in_[ i ] += weight * e.get_coeffvalue( it ); - } - } - else - { - if ( weight >= 0.0 ) - { - B_.instant_rates_ex_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); - } - else - { - B_.instant_rates_in_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); - } - } - i++; - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - if ( weight >= 0.0 ) - { - B_.delayed_rates_ex_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); - } - else - { - B_.delayed_rates_in_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); - } - } - else - { - if ( weight >= 0.0 ) - { - B_.delayed_rates_ex_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); - } - else - { - B_.delayed_rates_in_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); - } - } - ++i; - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_ipn< TNonlinearities >::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); -} - -} // namespace - -#endif /* #ifndef RATE_NEURON_IPN_IMPL_H */ diff --git a/models/rate_neuron_opn.h b/models/rate_neuron_opn.h index 1d4232c111..e08448ced6 100644 --- a/models/rate_neuron_opn.h +++ b/models/rate_neuron_opn.h @@ -23,17 +23,23 @@ #ifndef RATE_NEURON_OPN_H #define RATE_NEURON_OPN_H -// Generated includes: -#include "config.h" - // C++ includes: +#include // in case we need isnan() // fabs #include +// Includes from libnestutil: +#include "numerics.h" + +// Includes from sli: +#include "dict.h" +#include "dictutils.h" + // Includes from nestkernel: #include "archiving_node.h" #include "connection.h" #include "event.h" -#include "nest_types.h" +#include "exceptions.h" +#include "kernel_manager.h" #include "node.h" #include "random_generators.h" #include "recordables_map.h" @@ -383,6 +389,403 @@ rate_neuron_opn< TNonlinearities >::set_status( const DictionaryDatum& d ) nonlinearities_.set( d, this ); } +/* ---------------------------------------------------------------- + * Recordables map + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +RecordablesMap< rate_neuron_opn< TNonlinearities > > rate_neuron_opn< TNonlinearities >::recordablesMap_; + + +/* ---------------------------------------------------------------- + * Default constructors defining default parameters and state + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::Parameters_::Parameters_() + : tau_( 10.0 ) // ms + , sigma_( 1.0 ) + , mu_( 0.0 ) + , linear_summation_( true ) + , mult_coupling_( false ) +{ + recordablesMap_.create(); +} + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::State_::State_() + : rate_( 0.0 ) + , noise_( 0.0 ) + , noisy_rate_( 0.0 ) +{ +} + +/* ---------------------------------------------------------------- + * Parameter and state extractions and manipulation functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const +{ + def< double >( d, names::tau, tau_ ); + def< double >( d, names::sigma, sigma_ ); + def< double >( d, names::mu, mu_ ); + def< bool >( d, names::linear_summation, linear_summation_ ); + def< bool >( d, names::mult_coupling, mult_coupling_ ); + + // Also allow old names (to not break old scripts) + def< double >( d, names::std, sigma_ ); + def< double >( d, names::mean, mu_ ); +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< double >( d, names::tau, tau_, node ); + updateValueParam< double >( d, names::mu, mu_, node ); + updateValueParam< double >( d, names::sigma, sigma_, node ); + updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); + updateValueParam< bool >( d, names::mult_coupling, mult_coupling_, node ); + + // Check for old names + if ( updateValueParam< double >( d, names::mean, mu_, node ) ) + { + LOG( M_WARNING, + "rate_neuron_opn< TNonlinearities >::Parameters_::set", + "The parameter mean has been renamed to mu. Please use the new " + "name from now on." ); + } + + if ( updateValueParam< double >( d, names::std, sigma_, node ) ) + { + LOG( M_WARNING, + "rate_neuron_opn< TNonlinearities >::Parameters_::set", + "The parameter std has been renamed to sigma. Please use the new " + "name from now on." ); + } + + // Check for invalid parameters + if ( tau_ <= 0 ) + { + throw BadProperty( "Time constant must be > 0." ); + } + if ( sigma_ < 0 ) + { + throw BadProperty( "Noise parameter must not be negative." ); + } +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::State_::get( DictionaryDatum& d ) const +{ + def< double >( d, names::rate, rate_ ); // Rate + def< double >( d, names::noise, noise_ ); // Noise + def< double >( d, names::noisy_rate, noisy_rate_ ); // Noisy rate +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< double >( d, names::rate, rate_, node ); // Rate +} + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::Buffers_::Buffers_( rate_neuron_opn< TNonlinearities >& n ) + : logger_( n ) +{ +} + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, rate_neuron_opn< TNonlinearities >& n ) + : logger_( n ) +{ +} + +/* ---------------------------------------------------------------- + * Default and copy constructor for node + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn() + : ArchivingNode() + , P_() + , S_() + , B_( *this ) +{ + recordablesMap_.create(); + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +template < class TNonlinearities > +nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn( const rate_neuron_opn& n ) + : ArchivingNode( n ) + , P_( n.P_ ) + , S_( n.S_ ) + , B_( n.B_, *this ) +{ + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +/* ---------------------------------------------------------------- + * Node initialization functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::init_buffers_() +{ + B_.delayed_rates_ex_.clear(); // includes resize + B_.delayed_rates_in_.clear(); // includes resize + + // resize buffers + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + B_.instant_rates_ex_.resize( buffer_size, 0.0 ); + B_.instant_rates_in_.resize( buffer_size, 0.0 ); + B_.last_y_values.resize( buffer_size, 0.0 ); + B_.random_numbers.resize( buffer_size, numerics::nan ); + + // initialize random numbers + for ( unsigned int i = 0; i < buffer_size; i++ ) + { + B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); + } + + B_.logger_.reset(); // includes resize + ArchivingNode::clear_history(); +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::pre_run_hook() +{ + B_.logger_.init(); // ensures initialization in case mm connected after Simulate + + const double h = Time::get_resolution().get_ms(); + + // propagators + V_.P1_ = std::exp( -h / P_.tau_ ); + V_.P2_ = -numerics::expm1( -h / P_.tau_ ); + + // Gaussian white noise approximated by piecewise constant value + V_.output_noise_factor_ = std::sqrt( P_.tau_ / h ); +} + +/* ---------------------------------------------------------------- + * Update and event handling functions + */ + +template < class TNonlinearities > +bool +nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, + const long from, + const long to, + const bool called_from_wfr_update ) +{ + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + bool wfr_tol_exceeded = false; + + // allocate memory to store rates to be sent by rate events + std::vector< double > new_rates( buffer_size, 0.0 ); + + for ( long lag = from; lag < to; ++lag ) + { + // get noise + S_.noise_ = P_.sigma_ * B_.random_numbers[ lag ]; + // the noise is added to the noisy_rate variable + S_.noisy_rate_ = S_.rate_ + V_.output_noise_factor_ * S_.noise_; + // store rate + new_rates[ lag ] = S_.noisy_rate_; + // propagate rate to new time step (exponential integration) + S_.rate_ = V_.P1_ * S_.rate_ + V_.P2_ * P_.mu_; + + double delayed_rates_in = 0; + double delayed_rates_ex = 0; + if ( called_from_wfr_update ) + { + // use get_value_wfr_update to keep values in buffer + delayed_rates_in = B_.delayed_rates_in_.get_value_wfr_update( lag ); + delayed_rates_ex = B_.delayed_rates_ex_.get_value_wfr_update( lag ); + } + else + { + // use get_value to clear values in buffer after reading + delayed_rates_in = B_.delayed_rates_in_.get_value( lag ); + delayed_rates_ex = B_.delayed_rates_ex_.get_value( lag ); + } + double instant_rates_in = B_.instant_rates_in_[ lag ]; + double instant_rates_ex = B_.instant_rates_ex_[ lag ]; + double H_ex = 1.; // valid value for non-multiplicative coupling + double H_in = 1.; // valid value for non-multiplicative coupling + if ( P_.mult_coupling_ ) + { + H_ex = nonlinearities_.mult_coupling_ex( new_rates[ lag ] ); + H_in = nonlinearities_.mult_coupling_in( new_rates[ lag ] ); + } + + if ( P_.linear_summation_ ) + { + // In this case we explicitly need to distinguish the cases of + // multiplicative coupling and non-multiplicative coupling in + // order to compute input( ex + in ) instead of input(ex) + input(in) in + // the non-multiplicative case. + if ( P_.mult_coupling_ ) + { + S_.rate_ += V_.P2_ * H_ex * nonlinearities_.input( delayed_rates_ex + instant_rates_ex ); + S_.rate_ += V_.P2_ * H_in * nonlinearities_.input( delayed_rates_in + instant_rates_in ); + } + else + { + S_.rate_ += + V_.P2_ * nonlinearities_.input( delayed_rates_ex + instant_rates_ex + delayed_rates_in + instant_rates_in ); + } + } + else + { + // In this case multiplicative and non-multiplicative coupling + // can be handled with the same code. + S_.rate_ += V_.P2_ * H_ex * ( delayed_rates_ex + instant_rates_ex ); + S_.rate_ += V_.P2_ * H_in * ( delayed_rates_in + instant_rates_in ); + } + + if ( called_from_wfr_update ) + { + // check if deviation from last iteration exceeds wfr_tol + wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; + // update last_y_values for next wfr iteration + B_.last_y_values[ lag ] = S_.rate_; + } + else + { + // rate logging + B_.logger_.record_data( origin.get_steps() + lag ); + } + } + + if ( not called_from_wfr_update ) + { + // Send delay-rate-neuron-event. This only happens in the final iteration + // to avoid accumulation in the buffers of the receiving neurons. + DelayedRateConnectionEvent drve; + drve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, drve ); + + // clear last_y_values + std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); + + // modify new_rates for rate-neuron-event as proxy for next min_delay + for ( long temp = from; temp < to; ++temp ) + { + new_rates[ temp ] = S_.noisy_rate_; + } + + // create new random numbers + B_.random_numbers.resize( buffer_size, numerics::nan ); + for ( unsigned int i = 0; i < buffer_size; i++ ) + { + B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); + } + } + + // Send rate-neuron-event + InstantaneousRateConnectionEvent rve; + rve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, rve ); + + // Reset variables + std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); + std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_in_ ); + + return wfr_tol_exceeded; +} + + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + if ( weight >= 0.0 ) + { + B_.instant_rates_ex_[ i ] += weight * e.get_coeffvalue( it ); + } + else + { + B_.instant_rates_in_[ i ] += weight * e.get_coeffvalue( it ); + } + } + else + { + if ( weight >= 0.0 ) + { + B_.instant_rates_ex_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); + } + else + { + B_.instant_rates_in_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); + } + } + i++; + } +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + if ( weight >= 0.0 ) + { + B_.delayed_rates_ex_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); + } + else + { + B_.delayed_rates_in_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); + } + } + else + { + if ( weight >= 0.0 ) + { + B_.delayed_rates_ex_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); + } + else + { + B_.delayed_rates_in_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); + } + } + ++i; + } +} + +template < class TNonlinearities > +void +nest::rate_neuron_opn< TNonlinearities >::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); +} + } // namespace #endif /* #ifndef RATE_NEURON_OPN_H */ diff --git a/models/rate_neuron_opn_impl.h b/models/rate_neuron_opn_impl.h deleted file mode 100644 index 2479fe8c23..0000000000 --- a/models/rate_neuron_opn_impl.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * rate_neuron_opn_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef RATE_NEURON_OPN_IMPL_H -#define RATE_NEURON_OPN_IMPL_H - -#include "rate_neuron_opn.h" - -// C++ includes: -#include // in case we need isnan() // fabs -#include -#include -#include -#include -#include - -// Includes from libnestutil: -#include "numerics.h" - -// Includes from nestkernel: -#include "exceptions.h" -#include "kernel_manager.h" -#include "universal_data_logger_impl.h" - -// Includes from sli: -#include "dict.h" -#include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" - -namespace nest -{ - -/* ---------------------------------------------------------------- - * Recordables map - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -RecordablesMap< rate_neuron_opn< TNonlinearities > > rate_neuron_opn< TNonlinearities >::recordablesMap_; - - -/* ---------------------------------------------------------------- - * Default constructors defining default parameters and state - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::Parameters_::Parameters_() - : tau_( 10.0 ) // ms - , sigma_( 1.0 ) - , mu_( 0.0 ) - , linear_summation_( true ) - , mult_coupling_( false ) -{ - recordablesMap_.create(); -} - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::State_::State_() - : rate_( 0.0 ) - , noise_( 0.0 ) - , noisy_rate_( 0.0 ) -{ -} - -/* ---------------------------------------------------------------- - * Parameter and state extractions and manipulation functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const -{ - def< double >( d, names::tau, tau_ ); - def< double >( d, names::sigma, sigma_ ); - def< double >( d, names::mu, mu_ ); - def< bool >( d, names::linear_summation, linear_summation_ ); - def< bool >( d, names::mult_coupling, mult_coupling_ ); - - // Also allow old names (to not break old scripts) - def< double >( d, names::std, sigma_ ); - def< double >( d, names::mean, mu_ ); -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< double >( d, names::tau, tau_, node ); - updateValueParam< double >( d, names::mu, mu_, node ); - updateValueParam< double >( d, names::sigma, sigma_, node ); - updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); - updateValueParam< bool >( d, names::mult_coupling, mult_coupling_, node ); - - // Check for old names - if ( updateValueParam< double >( d, names::mean, mu_, node ) ) - { - LOG( M_WARNING, - "rate_neuron_opn< TNonlinearities >::Parameters_::set", - "The parameter mean has been renamed to mu. Please use the new " - "name from now on." ); - } - - if ( updateValueParam< double >( d, names::std, sigma_, node ) ) - { - LOG( M_WARNING, - "rate_neuron_opn< TNonlinearities >::Parameters_::set", - "The parameter std has been renamed to sigma. Please use the new " - "name from now on." ); - } - - // Check for invalid parameters - if ( tau_ <= 0 ) - { - throw BadProperty( "Time constant must be > 0." ); - } - if ( sigma_ < 0 ) - { - throw BadProperty( "Noise parameter must not be negative." ); - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::State_::get( DictionaryDatum& d ) const -{ - def< double >( d, names::rate, rate_ ); // Rate - def< double >( d, names::noise, noise_ ); // Noise - def< double >( d, names::noisy_rate, noisy_rate_ ); // Noisy rate -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< double >( d, names::rate, rate_, node ); // Rate -} - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::Buffers_::Buffers_( rate_neuron_opn< TNonlinearities >& n ) - : logger_( n ) -{ -} - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, rate_neuron_opn< TNonlinearities >& n ) - : logger_( n ) -{ -} - -/* ---------------------------------------------------------------- - * Default and copy constructor for node - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn() - : ArchivingNode() - , P_() - , S_() - , B_( *this ) -{ - recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -template < class TNonlinearities > -nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn( const rate_neuron_opn& n ) - : ArchivingNode( n ) - , P_( n.P_ ) - , S_( n.S_ ) - , B_( n.B_, *this ) -{ - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -/* ---------------------------------------------------------------- - * Node initialization functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::init_buffers_() -{ - B_.delayed_rates_ex_.clear(); // includes resize - B_.delayed_rates_in_.clear(); // includes resize - - // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - B_.instant_rates_ex_.resize( buffer_size, 0.0 ); - B_.instant_rates_in_.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( buffer_size, 0.0 ); - B_.random_numbers.resize( buffer_size, numerics::nan ); - - // initialize random numbers - for ( unsigned int i = 0; i < buffer_size; i++ ) - { - B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); - } - - B_.logger_.reset(); // includes resize - ArchivingNode::clear_history(); -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::pre_run_hook() -{ - B_.logger_.init(); // ensures initialization in case mm connected after Simulate - - const double h = Time::get_resolution().get_ms(); - - // propagators - V_.P1_ = std::exp( -h / P_.tau_ ); - V_.P2_ = -numerics::expm1( -h / P_.tau_ ); - - // Gaussian white noise approximated by piecewise constant value - V_.output_noise_factor_ = std::sqrt( P_.tau_ / h ); -} - -/* ---------------------------------------------------------------- - * Update and event handling functions - */ - -template < class TNonlinearities > -bool -nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, - const long from, - const long to, - const bool called_from_wfr_update ) -{ - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); - bool wfr_tol_exceeded = false; - - // allocate memory to store rates to be sent by rate events - std::vector< double > new_rates( buffer_size, 0.0 ); - - for ( long lag = from; lag < to; ++lag ) - { - // get noise - S_.noise_ = P_.sigma_ * B_.random_numbers[ lag ]; - // the noise is added to the noisy_rate variable - S_.noisy_rate_ = S_.rate_ + V_.output_noise_factor_ * S_.noise_; - // store rate - new_rates[ lag ] = S_.noisy_rate_; - // propagate rate to new time step (exponential integration) - S_.rate_ = V_.P1_ * S_.rate_ + V_.P2_ * P_.mu_; - - double delayed_rates_in = 0; - double delayed_rates_ex = 0; - if ( called_from_wfr_update ) - { - // use get_value_wfr_update to keep values in buffer - delayed_rates_in = B_.delayed_rates_in_.get_value_wfr_update( lag ); - delayed_rates_ex = B_.delayed_rates_ex_.get_value_wfr_update( lag ); - } - else - { - // use get_value to clear values in buffer after reading - delayed_rates_in = B_.delayed_rates_in_.get_value( lag ); - delayed_rates_ex = B_.delayed_rates_ex_.get_value( lag ); - } - double instant_rates_in = B_.instant_rates_in_[ lag ]; - double instant_rates_ex = B_.instant_rates_ex_[ lag ]; - double H_ex = 1.; // valid value for non-multiplicative coupling - double H_in = 1.; // valid value for non-multiplicative coupling - if ( P_.mult_coupling_ ) - { - H_ex = nonlinearities_.mult_coupling_ex( new_rates[ lag ] ); - H_in = nonlinearities_.mult_coupling_in( new_rates[ lag ] ); - } - - if ( P_.linear_summation_ ) - { - // In this case we explicitly need to distinguish the cases of - // multiplicative coupling and non-multiplicative coupling in - // order to compute input( ex + in ) instead of input(ex) + input(in) in - // the non-multiplicative case. - if ( P_.mult_coupling_ ) - { - S_.rate_ += V_.P2_ * H_ex * nonlinearities_.input( delayed_rates_ex + instant_rates_ex ); - S_.rate_ += V_.P2_ * H_in * nonlinearities_.input( delayed_rates_in + instant_rates_in ); - } - else - { - S_.rate_ += - V_.P2_ * nonlinearities_.input( delayed_rates_ex + instant_rates_ex + delayed_rates_in + instant_rates_in ); - } - } - else - { - // In this case multiplicative and non-multiplicative coupling - // can be handled with the same code. - S_.rate_ += V_.P2_ * H_ex * ( delayed_rates_ex + instant_rates_ex ); - S_.rate_ += V_.P2_ * H_in * ( delayed_rates_in + instant_rates_in ); - } - - if ( called_from_wfr_update ) - { - // check if deviation from last iteration exceeds wfr_tol - wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; - // update last_y_values for next wfr iteration - B_.last_y_values[ lag ] = S_.rate_; - } - else - { - // rate logging - B_.logger_.record_data( origin.get_steps() + lag ); - } - } - - if ( not called_from_wfr_update ) - { - // Send delay-rate-neuron-event. This only happens in the final iteration - // to avoid accumulation in the buffers of the receiving neurons. - DelayedRateConnectionEvent drve; - drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); - - // clear last_y_values - std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); - - // modify new_rates for rate-neuron-event as proxy for next min_delay - for ( long temp = from; temp < to; ++temp ) - { - new_rates[ temp ] = S_.noisy_rate_; - } - - // create new random numbers - B_.random_numbers.resize( buffer_size, numerics::nan ); - for ( unsigned int i = 0; i < buffer_size; i++ ) - { - B_.random_numbers[ i ] = V_.normal_dist_( get_vp_specific_rng( get_thread() ) ); - } - } - - // Send rate-neuron-event - InstantaneousRateConnectionEvent rve; - rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); - - // Reset variables - std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); - std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_in_ ); - - return wfr_tol_exceeded; -} - - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - if ( weight >= 0.0 ) - { - B_.instant_rates_ex_[ i ] += weight * e.get_coeffvalue( it ); - } - else - { - B_.instant_rates_in_[ i ] += weight * e.get_coeffvalue( it ); - } - } - else - { - if ( weight >= 0.0 ) - { - B_.instant_rates_ex_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); - } - else - { - B_.instant_rates_in_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); - } - } - i++; - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - if ( weight >= 0.0 ) - { - B_.delayed_rates_ex_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); - } - else - { - B_.delayed_rates_in_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); - } - } - else - { - if ( weight >= 0.0 ) - { - B_.delayed_rates_ex_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); - } - else - { - B_.delayed_rates_in_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); - } - } - ++i; - } -} - -template < class TNonlinearities > -void -nest::rate_neuron_opn< TNonlinearities >::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); -} - -} // namespace - -#endif /* #ifndef RATE_NEURON_OPN_IMPL_H */ diff --git a/models/rate_transformer_node.h b/models/rate_transformer_node.h index 50383f18f7..7f3390c9a9 100644 --- a/models/rate_transformer_node.h +++ b/models/rate_transformer_node.h @@ -327,6 +327,268 @@ rate_transformer_node< TNonlinearities >::set_status( const DictionaryDatum& d ) nonlinearities_.set( d, this ); } +/* ---------------------------------------------------------------- + * Recordables map + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +RecordablesMap< rate_transformer_node< TNonlinearities > > rate_transformer_node< TNonlinearities >::recordablesMap_; + +/* ---------------------------------------------------------------- + * Default constructors defining default parameters and state + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::Parameters_::Parameters_() + : linear_summation_( true ) +{ +} + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::State_::State_() + : rate_( 0.0 ) +{ +} + +/* ---------------------------------------------------------------- + * Parameter and state extractions and manipulation functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const +{ + def< bool >( d, names::linear_summation, linear_summation_ ); +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::State_::get( DictionaryDatum& d ) const +{ + def< double >( d, names::rate, rate_ ); // Rate +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) +{ + updateValueParam< double >( d, names::rate, rate_, node ); // Rate +} + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::Buffers_::Buffers_( rate_transformer_node< TNonlinearities >& n ) + : logger_( n ) +{ +} + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, + rate_transformer_node< TNonlinearities >& n ) + : logger_( n ) +{ +} + +/* ---------------------------------------------------------------- + * Default and copy constructor for node + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::rate_transformer_node() + : ArchivingNode() + , S_() + , B_( *this ) +{ + recordablesMap_.create(); + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +template < class TNonlinearities > +nest::rate_transformer_node< TNonlinearities >::rate_transformer_node( const rate_transformer_node& n ) + : ArchivingNode( n ) + , nonlinearities_( n.nonlinearities_ ) + , S_( n.S_ ) + , B_( n.B_, *this ) +{ + Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); +} + +/* ---------------------------------------------------------------- + * Node initialization functions + * ---------------------------------------------------------------- */ + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::init_buffers_() +{ + B_.delayed_rates_.clear(); // includes resize + + // resize buffers + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + B_.instant_rates_.resize( buffer_size, 0.0 ); + B_.last_y_values.resize( buffer_size, 0.0 ); + + B_.logger_.reset(); // includes resize + ArchivingNode::clear_history(); +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::pre_run_hook() +{ + B_.logger_.init(); // ensures initialization in case mm connected after Simulate +} + +/* ---------------------------------------------------------------- + * Update and event handling functions + */ + +template < class TNonlinearities > +bool +nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, + const long from, + const long to, + const bool called_from_wfr_update ) +{ + const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + bool wfr_tol_exceeded = false; + + // allocate memory to store rates to be sent by rate events + std::vector< double > new_rates( buffer_size, 0.0 ); + + for ( long lag = from; lag < to; ++lag ) + { + // store rate + new_rates[ lag ] = S_.rate_; + // reinitialize output rate + S_.rate_ = 0.0; + + double delayed_rates = 0; + if ( called_from_wfr_update ) + { + // use get_value_wfr_update to keep values in buffer + delayed_rates = B_.delayed_rates_.get_value_wfr_update( lag ); + } + else + { + // use get_value to clear values in buffer after reading + delayed_rates = B_.delayed_rates_.get_value( lag ); + } + + if ( P_.linear_summation_ ) + { + S_.rate_ += nonlinearities_.input( delayed_rates + B_.instant_rates_[ lag ] ); + } + else + { + S_.rate_ += delayed_rates + B_.instant_rates_[ lag ]; + } + + if ( called_from_wfr_update ) + { + // check if deviation from last iteration exceeds wfr_tol + wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; + // update last_y_values for next wfr iteration + B_.last_y_values[ lag ] = S_.rate_; + } + else + { + // rate logging + B_.logger_.record_data( origin.get_steps() + lag ); + } + } + + if ( not called_from_wfr_update ) + { + // Send delay-rate-neuron-event. This only happens in the final iteration + // to avoid accumulation in the buffers of the receiving neurons. + DelayedRateConnectionEvent drve; + drve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, drve ); + + // clear last_y_values + std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); + + // modifiy new_rates for rate-neuron-event as proxy for next min_delay + for ( long temp = from; temp < to; ++temp ) + { + new_rates[ temp ] = S_.rate_; + } + } + + // Send rate-neuron-event + InstantaneousRateConnectionEvent rve; + rve.set_coeffarray( new_rates ); + kernel().event_delivery_manager.send_secondary( *this, rve ); + + // Reset variables + std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ ); + + return wfr_tol_exceeded; +} + + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + B_.instant_rates_[ i ] += weight * e.get_coeffvalue( it ); + } + else + { + B_.instant_rates_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); + } + ++i; + } +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) +{ + const double weight = e.get_weight(); + const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + + size_t i = 0; + std::vector< unsigned int >::iterator it = e.begin(); + // The call to get_coeffvalue( it ) in this loop also advances the iterator it + while ( it != e.end() ) + { + if ( P_.linear_summation_ ) + { + B_.delayed_rates_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); + } + else + { + B_.delayed_rates_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); + } + ++i; + } +} + +template < class TNonlinearities > +void +nest::rate_transformer_node< TNonlinearities >::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); +} + } // namespace #endif /* #ifndef RATE_TRANSFORMER_NODE_H */ diff --git a/models/rate_transformer_node_impl.h b/models/rate_transformer_node_impl.h deleted file mode 100644 index 2809b97130..0000000000 --- a/models/rate_transformer_node_impl.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * rate_transformer_node_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef RATE_TRANSFORMER_NODE_IMPL_H -#define RATE_TRANSFORMER_NODE_IMPL_H - -#include "rate_transformer_node.h" - -// C++ includes: -#include // in case we need isnan() // fabs -#include -#include -#include -#include -#include - -// Includes from libnestutil: -#include "dict_util.h" -#include "numerics.h" - - -// Includes from nestkernel: -#include "exceptions.h" -#include "kernel_manager.h" -#include "universal_data_logger_impl.h" - -// Includes from sli: -#include "dict.h" -#include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" - -namespace nest -{ - -/* ---------------------------------------------------------------- - * Recordables map - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -RecordablesMap< rate_transformer_node< TNonlinearities > > rate_transformer_node< TNonlinearities >::recordablesMap_; - -/* ---------------------------------------------------------------- - * Default constructors defining default parameters and state - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::Parameters_::Parameters_() - : linear_summation_( true ) -{ -} - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::State_::State_() - : rate_( 0.0 ) -{ -} - -/* ---------------------------------------------------------------- - * Parameter and state extractions and manipulation functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::Parameters_::get( DictionaryDatum& d ) const -{ - def< bool >( d, names::linear_summation, linear_summation_ ); -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::Parameters_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< bool >( d, names::linear_summation, linear_summation_, node ); -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::State_::get( DictionaryDatum& d ) const -{ - def< double >( d, names::rate, rate_ ); // Rate -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::State_::set( const DictionaryDatum& d, Node* node ) -{ - updateValueParam< double >( d, names::rate, rate_, node ); // Rate -} - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::Buffers_::Buffers_( rate_transformer_node< TNonlinearities >& n ) - : logger_( n ) -{ -} - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::Buffers_::Buffers_( const Buffers_&, - rate_transformer_node< TNonlinearities >& n ) - : logger_( n ) -{ -} - -/* ---------------------------------------------------------------- - * Default and copy constructor for node - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::rate_transformer_node() - : ArchivingNode() - , S_() - , B_( *this ) -{ - recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -template < class TNonlinearities > -nest::rate_transformer_node< TNonlinearities >::rate_transformer_node( const rate_transformer_node& n ) - : ArchivingNode( n ) - , nonlinearities_( n.nonlinearities_ ) - , S_( n.S_ ) - , B_( n.B_, *this ) -{ - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); -} - -/* ---------------------------------------------------------------- - * Node initialization functions - * ---------------------------------------------------------------- */ - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::init_buffers_() -{ - B_.delayed_rates_.clear(); // includes resize - - // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - B_.instant_rates_.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( buffer_size, 0.0 ); - - B_.logger_.reset(); // includes resize - ArchivingNode::clear_history(); -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::pre_run_hook() -{ - B_.logger_.init(); // ensures initialization in case mm connected after Simulate -} - -/* ---------------------------------------------------------------- - * Update and event handling functions - */ - -template < class TNonlinearities > -bool -nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, - const long from, - const long to, - const bool called_from_wfr_update ) -{ - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); - bool wfr_tol_exceeded = false; - - // allocate memory to store rates to be sent by rate events - std::vector< double > new_rates( buffer_size, 0.0 ); - - for ( long lag = from; lag < to; ++lag ) - { - // store rate - new_rates[ lag ] = S_.rate_; - // reinitialize output rate - S_.rate_ = 0.0; - - double delayed_rates = 0; - if ( called_from_wfr_update ) - { - // use get_value_wfr_update to keep values in buffer - delayed_rates = B_.delayed_rates_.get_value_wfr_update( lag ); - } - else - { - // use get_value to clear values in buffer after reading - delayed_rates = B_.delayed_rates_.get_value( lag ); - } - - if ( P_.linear_summation_ ) - { - S_.rate_ += nonlinearities_.input( delayed_rates + B_.instant_rates_[ lag ] ); - } - else - { - S_.rate_ += delayed_rates + B_.instant_rates_[ lag ]; - } - - if ( called_from_wfr_update ) - { - // check if deviation from last iteration exceeds wfr_tol - wfr_tol_exceeded = wfr_tol_exceeded or fabs( S_.rate_ - B_.last_y_values[ lag ] ) > wfr_tol; - // update last_y_values for next wfr iteration - B_.last_y_values[ lag ] = S_.rate_; - } - else - { - // rate logging - B_.logger_.record_data( origin.get_steps() + lag ); - } - } - - if ( not called_from_wfr_update ) - { - // Send delay-rate-neuron-event. This only happens in the final iteration - // to avoid accumulation in the buffers of the receiving neurons. - DelayedRateConnectionEvent drve; - drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); - - // clear last_y_values - std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); - - // modifiy new_rates for rate-neuron-event as proxy for next min_delay - for ( long temp = from; temp < to; ++temp ) - { - new_rates[ temp ] = S_.rate_; - } - } - - // Send rate-neuron-event - InstantaneousRateConnectionEvent rve; - rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); - - // Reset variables - std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ ); - - return wfr_tol_exceeded; -} - - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::handle( InstantaneousRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - B_.instant_rates_[ i ] += weight * e.get_coeffvalue( it ); - } - else - { - B_.instant_rates_[ i ] += weight * nonlinearities_.input( e.get_coeffvalue( it ) ); - } - ++i; - } -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) -{ - const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); - - size_t i = 0; - std::vector< unsigned int >::iterator it = e.begin(); - // The call to get_coeffvalue( it ) in this loop also advances the iterator it - while ( it != e.end() ) - { - if ( P_.linear_summation_ ) - { - B_.delayed_rates_.add_value( delay + i, weight * e.get_coeffvalue( it ) ); - } - else - { - B_.delayed_rates_.add_value( delay + i, weight * nonlinearities_.input( e.get_coeffvalue( it ) ) ); - } - ++i; - } -} - -template < class TNonlinearities > -void -nest::rate_transformer_node< TNonlinearities >::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); -} - -} // namespace - -#endif /* #ifndef RATE_TRANSFORMER_NODE_IMPL_H */ diff --git a/models/sic_connection.cpp b/models/sic_connection.cpp index d10b3b412b..25edb56841 100644 --- a/models/sic_connection.cpp +++ b/models/sic_connection.cpp @@ -22,9 +22,6 @@ #include "sic_connection.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_sic_connection( const std::string& name ) { diff --git a/models/siegert_neuron.cpp b/models/siegert_neuron.cpp index ec39ab5cec..1dd35eb371 100644 --- a/models/siegert_neuron.cpp +++ b/models/siegert_neuron.cpp @@ -26,7 +26,6 @@ // C++ includes: #include // in case we need isnan() // fabs -#include #include // Includes from libnestutil: @@ -36,11 +35,8 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: -#include "dict.h" #include "dictutils.h" struct my_params diff --git a/models/sigmoid_rate.cpp b/models/sigmoid_rate.cpp index b50e5b5867..c201130cb4 100644 --- a/models/sigmoid_rate.cpp +++ b/models/sigmoid_rate.cpp @@ -22,10 +22,7 @@ #include "sigmoid_rate.h" -// Includes from nestkernel -#include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include namespace nest { diff --git a/models/sigmoid_rate.h b/models/sigmoid_rate.h index 634ce53f9f..a675c77937 100644 --- a/models/sigmoid_rate.h +++ b/models/sigmoid_rate.h @@ -28,9 +28,7 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" namespace nest diff --git a/models/sigmoid_rate_gg_1998.cpp b/models/sigmoid_rate_gg_1998.cpp index 2fcea61a4a..d2772cd441 100644 --- a/models/sigmoid_rate_gg_1998.cpp +++ b/models/sigmoid_rate_gg_1998.cpp @@ -22,10 +22,7 @@ #include "sigmoid_rate_gg_1998.h" -// Includes from nestkernel -#include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include namespace nest { diff --git a/models/sigmoid_rate_gg_1998.h b/models/sigmoid_rate_gg_1998.h index 6039d9692b..b12c2c7df7 100644 --- a/models/sigmoid_rate_gg_1998.h +++ b/models/sigmoid_rate_gg_1998.h @@ -28,9 +28,7 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" namespace nest { diff --git a/models/sinusoidal_gamma_generator.cpp b/models/sinusoidal_gamma_generator.cpp index 30c49fe411..18c7321c32 100644 --- a/models/sinusoidal_gamma_generator.cpp +++ b/models/sinusoidal_gamma_generator.cpp @@ -35,11 +35,9 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "booldatum.h" diff --git a/models/sinusoidal_gamma_generator.h b/models/sinusoidal_gamma_generator.h index 2a570caa6b..f51956d5b0 100644 --- a/models/sinusoidal_gamma_generator.h +++ b/models/sinusoidal_gamma_generator.h @@ -33,7 +33,6 @@ // Includes from nestkernel: #include "connection.h" -#include "device_node.h" #include "event.h" #include "nest_types.h" #include "stimulation_device.h" diff --git a/models/sinusoidal_poisson_generator.cpp b/models/sinusoidal_poisson_generator.cpp index 6a24cfae5f..85248c4cb4 100644 --- a/models/sinusoidal_poisson_generator.cpp +++ b/models/sinusoidal_poisson_generator.cpp @@ -31,11 +31,9 @@ #include "numerics.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" +#include "event_delivery_manager.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "booldatum.h" diff --git a/models/spike_dilutor.cpp b/models/spike_dilutor.cpp index b4d105a70d..906aeba557 100644 --- a/models/spike_dilutor.cpp +++ b/models/spike_dilutor.cpp @@ -26,11 +26,8 @@ #include "dict_util.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" // Includes from sli: #include "dict.h" diff --git a/models/spike_generator.cpp b/models/spike_generator.cpp index c6153ede4c..40032bded7 100644 --- a/models/spike_generator.cpp +++ b/models/spike_generator.cpp @@ -23,13 +23,12 @@ #include "spike_generator.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from libnestutil: #include "dict_util.h" +#include "event_delivery_manager.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/spike_recorder.cpp b/models/spike_recorder.cpp index 5769fa60e7..760002abb7 100644 --- a/models/spike_recorder.cpp +++ b/models/spike_recorder.cpp @@ -27,10 +27,9 @@ #include "compose.hpp" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include "nest.h" +#include "node_manager.h" // Includes from sli: #include "dict.h" diff --git a/models/spike_train_injector.cpp b/models/spike_train_injector.cpp index 9d596a8e5a..fc9b320d6e 100644 --- a/models/spike_train_injector.cpp +++ b/models/spike_train_injector.cpp @@ -23,14 +23,12 @@ #include "spike_train_injector.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "exceptions.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" // Includes from libnestutil: #include "dict_util.h" +#include "event_delivery_manager.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/spin_detector.cpp b/models/spin_detector.cpp index 190ece15cb..d080eed688 100644 --- a/models/spin_detector.cpp +++ b/models/spin_detector.cpp @@ -22,19 +22,10 @@ #include "spin_detector.h" - -// Includes from libnestutil: -#include "compose.hpp" - // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include -// Includes from sli: -#include "dict.h" -#include "dictutils.h" void nest::register_spin_detector( const std::string& name ) diff --git a/models/static_synapse.cpp b/models/static_synapse.cpp index 5149f4032f..181adc02f5 100644 --- a/models/static_synapse.cpp +++ b/models/static_synapse.cpp @@ -22,9 +22,6 @@ #include "static_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_static_synapse( const std::string& name ) { diff --git a/models/static_synapse.h b/models/static_synapse.h index 25ebf2ce61..aa184fca73 100644 --- a/models/static_synapse.h +++ b/models/static_synapse.h @@ -25,6 +25,8 @@ // Includes from nestkernel: #include "connection.h" +#include "event_delivery_manager.h" +#include "target_identifier.h" namespace nest { diff --git a/models/static_synapse_hom_w.cpp b/models/static_synapse_hom_w.cpp index 5b5694fa67..e39614598b 100644 --- a/models/static_synapse_hom_w.cpp +++ b/models/static_synapse_hom_w.cpp @@ -22,9 +22,6 @@ #include "static_synapse_hom_w.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_static_synapse_hom_w( const std::string& name ) { diff --git a/models/stdp_dopamine_synapse.cpp b/models/stdp_dopamine_synapse.cpp index 1a7ba75eca..29cf8171f9 100644 --- a/models/stdp_dopamine_synapse.cpp +++ b/models/stdp_dopamine_synapse.cpp @@ -24,10 +24,7 @@ // Includes from nestkernel: #include "common_synapse_properties.h" -#include "connector_model.h" -#include "event.h" #include "kernel_manager.h" -#include "nest_impl.h" // Includes from sli: #include "dictdatum.h" diff --git a/models/stdp_facetshw_synapse_hom.cpp b/models/stdp_facetshw_synapse_hom.cpp index 09c328f4b1..2b9dcd03b1 100644 --- a/models/stdp_facetshw_synapse_hom.cpp +++ b/models/stdp_facetshw_synapse_hom.cpp @@ -21,10 +21,6 @@ */ #include "stdp_facetshw_synapse_hom.h" -#include "stdp_facetshw_synapse_hom_impl.h" - -// Includes from nestkernel: -#include "nest_impl.h" void nest::register_stdp_facetshw_synapse_hom( const std::string& name ) diff --git a/models/stdp_facetshw_synapse_hom.h b/models/stdp_facetshw_synapse_hom.h index 2b103eed48..10287f5188 100644 --- a/models/stdp_facetshw_synapse_hom.h +++ b/models/stdp_facetshw_synapse_hom.h @@ -536,6 +536,302 @@ stdp_facetshw_synapse_hom< targetidentifierT >::send( Event& e, return true; } + +template < typename targetidentifierT > +STDPFACETSHWHomCommonProperties< targetidentifierT >::STDPFACETSHWHomCommonProperties() + : CommonSynapseProperties() + , tau_plus_( 20.0 ) + , tau_minus_( 20.0 ) + , Wmax_( 100.0 ) + , no_synapses_( 0 ) + , synapses_per_driver_( 50 ) // hardware efficiency of 50/256=20%, + // which is comparable to Fieres et al. (2008) + , driver_readout_time_( 15.0 ) // in ms; measured on hardware +{ + lookuptable_0_.resize( 16 ); + lookuptable_1_.resize( 16 ); + lookuptable_2_.resize( 16 ); + + // intermediate Guetig (mu=0.4) + // with r=4 bits and n=36 SSPs, see [3]_ + lookuptable_0_.at( 0 ) = 2; + lookuptable_0_.at( 1 ) = 3; + lookuptable_0_.at( 2 ) = 4; + lookuptable_0_.at( 3 ) = 4; + lookuptable_0_.at( 4 ) = 5; + lookuptable_0_.at( 5 ) = 6; + lookuptable_0_.at( 6 ) = 7; + lookuptable_0_.at( 7 ) = 8; + lookuptable_0_.at( 8 ) = 9; + lookuptable_0_.at( 9 ) = 10; + lookuptable_0_.at( 10 ) = 11; + lookuptable_0_.at( 11 ) = 12; + lookuptable_0_.at( 12 ) = 13; + lookuptable_0_.at( 13 ) = 14; + lookuptable_0_.at( 14 ) = 14; + lookuptable_0_.at( 15 ) = 15; + + lookuptable_1_.at( 0 ) = 0; + lookuptable_1_.at( 1 ) = 0; + lookuptable_1_.at( 2 ) = 1; + lookuptable_1_.at( 3 ) = 2; + lookuptable_1_.at( 4 ) = 3; + lookuptable_1_.at( 5 ) = 4; + lookuptable_1_.at( 6 ) = 5; + lookuptable_1_.at( 7 ) = 6; + lookuptable_1_.at( 8 ) = 7; + lookuptable_1_.at( 9 ) = 8; + lookuptable_1_.at( 10 ) = 9; + lookuptable_1_.at( 11 ) = 10; + lookuptable_1_.at( 12 ) = 10; + lookuptable_1_.at( 13 ) = 11; + lookuptable_1_.at( 14 ) = 12; + lookuptable_1_.at( 15 ) = 13; + + for ( size_t i = 0; i < lookuptable_0_.size(); ++i ) + { + lookuptable_2_.at( i ) = i; + } + + configbit_0_.resize( 4 ); + configbit_1_.resize( 4 ); + + // see [4]_ + configbit_0_.at( 0 ) = 0; + configbit_0_.at( 1 ) = 0; + configbit_0_.at( 2 ) = 1; + configbit_0_.at( 3 ) = 0; + configbit_1_.at( 0 ) = 0; + configbit_1_.at( 1 ) = 1; + configbit_1_.at( 2 ) = 0; + configbit_1_.at( 3 ) = 0; + + reset_pattern_.resize( 6 ); + for ( size_t i = 0; i < reset_pattern_.size(); ++i ) + { + reset_pattern_.at( i ) = true; + } + + weight_per_lut_entry_ = Wmax_ / ( lookuptable_0_.size() - 1 ); + calc_readout_cycle_duration_(); +} + +template < typename targetidentifierT > +void +STDPFACETSHWHomCommonProperties< targetidentifierT >::calc_readout_cycle_duration_() +{ + readout_cycle_duration_ = int( ( no_synapses_ - 1.0 ) / synapses_per_driver_ + 1.0 ) * driver_readout_time_; +} + +template < typename targetidentifierT > +void +STDPFACETSHWHomCommonProperties< targetidentifierT >::get_status( DictionaryDatum& d ) const +{ + CommonSynapseProperties::get_status( d ); + + def< double >( d, names::tau_plus, tau_plus_ ); + def< double >( d, names::tau_minus_stdp, tau_minus_ ); + def< double >( d, names::Wmax, Wmax_ ); + def< double >( d, names::weight_per_lut_entry, weight_per_lut_entry_ ); + + def< long >( d, names::no_synapses, no_synapses_ ); + def< long >( d, names::synapses_per_driver, synapses_per_driver_ ); + def< double >( d, names::driver_readout_time, driver_readout_time_ ); + def< double >( d, names::readout_cycle_duration, readout_cycle_duration_ ); + + ( *d )[ names::lookuptable_0 ] = IntVectorDatum( new std::vector< long >( lookuptable_0_ ) ); + ( *d )[ names::lookuptable_1 ] = IntVectorDatum( new std::vector< long >( lookuptable_1_ ) ); + ( *d )[ names::lookuptable_2 ] = IntVectorDatum( new std::vector< long >( lookuptable_2_ ) ); + ( *d )[ names::configbit_0 ] = IntVectorDatum( new std::vector< long >( configbit_0_ ) ); + ( *d )[ names::configbit_1 ] = IntVectorDatum( new std::vector< long >( configbit_1_ ) ); + ( *d )[ names::reset_pattern ] = IntVectorDatum( new std::vector< long >( reset_pattern_ ) ); +} + +template < typename targetidentifierT > +void +STDPFACETSHWHomCommonProperties< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) +{ + CommonSynapseProperties::set_status( d, cm ); + + updateValue< double >( d, names::tau_plus, tau_plus_ ); + updateValue< double >( d, names::tau_minus_stdp, tau_minus_ ); + if ( updateValue< double >( d, names::Wmax, Wmax_ ) ) + { + weight_per_lut_entry_ = Wmax_ / ( lookuptable_0_.size() - 1 ); + } + + // TP: they should not be allowed to be changed! But needed for CopyModel ... + updateValue< double >( d, names::weight_per_lut_entry, weight_per_lut_entry_ ); + updateValue< double >( d, names::readout_cycle_duration, readout_cycle_duration_ ); + if ( updateValue< long >( d, names::no_synapses, no_synapses_ ) ) + { + calc_readout_cycle_duration_(); + } + + if ( updateValue< long >( d, names::synapses_per_driver, synapses_per_driver_ ) ) + { + calc_readout_cycle_duration_(); + } + if ( updateValue< double >( d, names::driver_readout_time, driver_readout_time_ ) ) + { + calc_readout_cycle_duration_(); + } + + if ( d->known( names::lookuptable_0 ) ) + { + updateValue< std::vector< long > >( d, names::lookuptable_0, lookuptable_0_ ); + + // right size? + if ( lookuptable_0_.size() != lookuptable_1_.size() ) + { + throw BadProperty( "Look-up table has not 2^4 entries!" ); + } + + // are look-up table entries out of bounds? + for ( size_t i = 0; i < size_t( lookuptable_0_.size() ); ++i ) + { + if ( lookuptable_0_[ i ] < 0 or lookuptable_0_[ i ] > 15 ) + { + throw BadProperty( "Look-up table entries must be integers in [0,15]" ); + } + } + } + if ( d->known( names::lookuptable_1 ) ) + { + updateValue< std::vector< long > >( d, names::lookuptable_1, lookuptable_1_ ); + + // right size? + if ( lookuptable_1_.size() != lookuptable_0_.size() ) + { + throw BadProperty( "Look-up table has not 2^4 entries!" ); + } + + // are look-up table entries out of bounds? + for ( size_t i = 0; i < size_t( lookuptable_1_.size() ); ++i ) + { + if ( lookuptable_1_[ i ] < 0 or lookuptable_1_[ i ] > 15 ) + { + throw BadProperty( "Look-up table entries must be integers in [0,15]" ); + } + } + } + if ( d->known( names::lookuptable_2 ) ) + { + updateValue< std::vector< long > >( d, names::lookuptable_2, lookuptable_2_ ); + + // right size? + if ( lookuptable_2_.size() != lookuptable_0_.size() ) + { + throw BadProperty( "Look-up table has not 2^4 entries!" ); + } + + // are look-up table entries out of bounds? + for ( size_t i = 0; i < size_t( lookuptable_2_.size() ); ++i ) + { + if ( lookuptable_2_[ i ] < 0 or lookuptable_2_[ i ] > 15 ) + { + throw BadProperty( "Look-up table entries must be integers in [0,15]" ); + } + } + } + + if ( d->known( names::configbit_0 ) ) + { + updateValue< std::vector< long > >( d, names::configbit_0, configbit_0_ ); + + // right size? + if ( configbit_0_.size() != 4 ) + { + throw BadProperty( "Wrong number of configuration bits (!=4)." ); + } + } + if ( d->known( names::configbit_1 ) ) + { + updateValue< std::vector< long > >( d, names::configbit_1, configbit_1_ ); + + // right size? + if ( configbit_1_.size() != 4 ) + { + throw BadProperty( "Wrong number of configuration bits (!=4)." ); + } + } + if ( d->known( names::reset_pattern ) ) + { + updateValue< std::vector< long > >( d, names::reset_pattern, reset_pattern_ ); + + // right size? + if ( reset_pattern_.size() != 6 ) + { + throw BadProperty( "Wrong number of reset bits (!=6)." ); + } + } +} + + +// +// Implementation of class stdp_facetshw_synapse_hom. +// +template < typename targetidentifierT > +stdp_facetshw_synapse_hom< targetidentifierT >::stdp_facetshw_synapse_hom() + : weight_( 1.0 ) + , a_causal_( 0.0 ) + , a_acausal_( 0.0 ) + , a_thresh_th_( 21.835 ) + , a_thresh_tl_( 21.835 ) // exp(-10ms/20ms) * 36SSPs + , init_flag_( false ) + , synapse_id_( 0 ) + , next_readout_time_( 0.0 ) + , discrete_weight_( 0 ) + , t_lastspike_( 0.0 ) +{ +} + +template < typename targetidentifierT > +void +stdp_facetshw_synapse_hom< targetidentifierT >::get_status( DictionaryDatum& d ) const +{ + // base class properties, different for individual synapse + ConnectionBase::get_status( d ); + def< double >( d, names::weight, weight_ ); + + // own properties, different for individual synapse + def< double >( d, names::a_causal, a_causal_ ); + def< double >( d, names::a_acausal, a_acausal_ ); + def< double >( d, names::a_thresh_th, a_thresh_th_ ); + def< double >( d, names::a_thresh_tl, a_thresh_tl_ ); + + def< bool >( d, names::init_flag, init_flag_ ); + def< long >( d, names::synapse_id, synapse_id_ ); + def< double >( d, names::next_readout_time, next_readout_time_ ); + // useful to get conversion before activity, but weight_per_lut_entry_ not + // known here + // def(d, "discrete_weight", + // entry_to_weight_(weight_to_entry_(weight_, + // weight_per_lut_entry_), weight_per_lut_entry_)); +} + +template < typename targetidentifierT > +void +stdp_facetshw_synapse_hom< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) +{ + // base class properties + ConnectionBase::set_status( d, cm ); + updateValue< double >( d, names::weight, weight_ ); + + updateValue< double >( d, names::a_causal, a_causal_ ); + updateValue< double >( d, names::a_acausal, a_acausal_ ); + updateValue< double >( d, names::a_thresh_th, a_thresh_th_ ); + updateValue< double >( d, names::a_thresh_tl, a_thresh_tl_ ); + + updateValue< long >( d, names::synapse_id, synapse_id_ ); + + // TP: they should not be allowed to be changed! But needed for CopyModel ... + updateValue< bool >( d, names::init_flag, init_flag_ ); + updateValue< double >( d, names::next_readout_time, next_readout_time_ ); + + // setting discrete_weight_ does not make sense, is temporary variable +} + } // of namespace nest #endif // of #ifndef STDP_SYNAPSE_FACETSHW_HOM_H diff --git a/models/stdp_facetshw_synapse_hom_impl.h b/models/stdp_facetshw_synapse_hom_impl.h deleted file mode 100644 index 765b814715..0000000000 --- a/models/stdp_facetshw_synapse_hom_impl.h +++ /dev/null @@ -1,339 +0,0 @@ -/* - * stdp_facetshw_synapse_hom_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef STDP_SYNAPSE_FACETSHW_HOM_IMPL_H -#define STDP_SYNAPSE_FACETSHW_HOM_IMPL_H - -#include "stdp_facetshw_synapse_hom.h" - -// Includes from nestkernel: -#include "common_synapse_properties.h" -#include "connector_model.h" -#include "event.h" - -// Includes from sli: -#include "dictdatum.h" - -namespace nest -{ -// -// Implementation of class STDPFACETSHWHomCommonProperties. -// - -template < typename targetidentifierT > -STDPFACETSHWHomCommonProperties< targetidentifierT >::STDPFACETSHWHomCommonProperties() - : CommonSynapseProperties() - , tau_plus_( 20.0 ) - , tau_minus_( 20.0 ) - , Wmax_( 100.0 ) - , no_synapses_( 0 ) - , synapses_per_driver_( 50 ) // hardware efficiency of 50/256=20%, - // which is comparable to Fieres et al. (2008) - , driver_readout_time_( 15.0 ) // in ms; measured on hardware -{ - lookuptable_0_.resize( 16 ); - lookuptable_1_.resize( 16 ); - lookuptable_2_.resize( 16 ); - - // intermediate Guetig (mu=0.4) - // with r=4 bits and n=36 SSPs, see [3]_ - lookuptable_0_.at( 0 ) = 2; - lookuptable_0_.at( 1 ) = 3; - lookuptable_0_.at( 2 ) = 4; - lookuptable_0_.at( 3 ) = 4; - lookuptable_0_.at( 4 ) = 5; - lookuptable_0_.at( 5 ) = 6; - lookuptable_0_.at( 6 ) = 7; - lookuptable_0_.at( 7 ) = 8; - lookuptable_0_.at( 8 ) = 9; - lookuptable_0_.at( 9 ) = 10; - lookuptable_0_.at( 10 ) = 11; - lookuptable_0_.at( 11 ) = 12; - lookuptable_0_.at( 12 ) = 13; - lookuptable_0_.at( 13 ) = 14; - lookuptable_0_.at( 14 ) = 14; - lookuptable_0_.at( 15 ) = 15; - - lookuptable_1_.at( 0 ) = 0; - lookuptable_1_.at( 1 ) = 0; - lookuptable_1_.at( 2 ) = 1; - lookuptable_1_.at( 3 ) = 2; - lookuptable_1_.at( 4 ) = 3; - lookuptable_1_.at( 5 ) = 4; - lookuptable_1_.at( 6 ) = 5; - lookuptable_1_.at( 7 ) = 6; - lookuptable_1_.at( 8 ) = 7; - lookuptable_1_.at( 9 ) = 8; - lookuptable_1_.at( 10 ) = 9; - lookuptable_1_.at( 11 ) = 10; - lookuptable_1_.at( 12 ) = 10; - lookuptable_1_.at( 13 ) = 11; - lookuptable_1_.at( 14 ) = 12; - lookuptable_1_.at( 15 ) = 13; - - for ( size_t i = 0; i < lookuptable_0_.size(); ++i ) - { - lookuptable_2_.at( i ) = i; - } - - configbit_0_.resize( 4 ); - configbit_1_.resize( 4 ); - - // see [4]_ - configbit_0_.at( 0 ) = 0; - configbit_0_.at( 1 ) = 0; - configbit_0_.at( 2 ) = 1; - configbit_0_.at( 3 ) = 0; - configbit_1_.at( 0 ) = 0; - configbit_1_.at( 1 ) = 1; - configbit_1_.at( 2 ) = 0; - configbit_1_.at( 3 ) = 0; - - reset_pattern_.resize( 6 ); - for ( size_t i = 0; i < reset_pattern_.size(); ++i ) - { - reset_pattern_.at( i ) = true; - } - - weight_per_lut_entry_ = Wmax_ / ( lookuptable_0_.size() - 1 ); - calc_readout_cycle_duration_(); -} - -template < typename targetidentifierT > -void -STDPFACETSHWHomCommonProperties< targetidentifierT >::calc_readout_cycle_duration_() -{ - readout_cycle_duration_ = int( ( no_synapses_ - 1.0 ) / synapses_per_driver_ + 1.0 ) * driver_readout_time_; -} - -template < typename targetidentifierT > -void -STDPFACETSHWHomCommonProperties< targetidentifierT >::get_status( DictionaryDatum& d ) const -{ - CommonSynapseProperties::get_status( d ); - - def< double >( d, names::tau_plus, tau_plus_ ); - def< double >( d, names::tau_minus_stdp, tau_minus_ ); - def< double >( d, names::Wmax, Wmax_ ); - def< double >( d, names::weight_per_lut_entry, weight_per_lut_entry_ ); - - def< long >( d, names::no_synapses, no_synapses_ ); - def< long >( d, names::synapses_per_driver, synapses_per_driver_ ); - def< double >( d, names::driver_readout_time, driver_readout_time_ ); - def< double >( d, names::readout_cycle_duration, readout_cycle_duration_ ); - - ( *d )[ names::lookuptable_0 ] = IntVectorDatum( new std::vector< long >( lookuptable_0_ ) ); - ( *d )[ names::lookuptable_1 ] = IntVectorDatum( new std::vector< long >( lookuptable_1_ ) ); - ( *d )[ names::lookuptable_2 ] = IntVectorDatum( new std::vector< long >( lookuptable_2_ ) ); - ( *d )[ names::configbit_0 ] = IntVectorDatum( new std::vector< long >( configbit_0_ ) ); - ( *d )[ names::configbit_1 ] = IntVectorDatum( new std::vector< long >( configbit_1_ ) ); - ( *d )[ names::reset_pattern ] = IntVectorDatum( new std::vector< long >( reset_pattern_ ) ); -} - -template < typename targetidentifierT > -void -STDPFACETSHWHomCommonProperties< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) -{ - CommonSynapseProperties::set_status( d, cm ); - - updateValue< double >( d, names::tau_plus, tau_plus_ ); - updateValue< double >( d, names::tau_minus_stdp, tau_minus_ ); - if ( updateValue< double >( d, names::Wmax, Wmax_ ) ) - { - weight_per_lut_entry_ = Wmax_ / ( lookuptable_0_.size() - 1 ); - } - - // TP: they should not be allowed to be changed! But needed for CopyModel ... - updateValue< double >( d, names::weight_per_lut_entry, weight_per_lut_entry_ ); - updateValue< double >( d, names::readout_cycle_duration, readout_cycle_duration_ ); - if ( updateValue< long >( d, names::no_synapses, no_synapses_ ) ) - { - calc_readout_cycle_duration_(); - } - - if ( updateValue< long >( d, names::synapses_per_driver, synapses_per_driver_ ) ) - { - calc_readout_cycle_duration_(); - } - if ( updateValue< double >( d, names::driver_readout_time, driver_readout_time_ ) ) - { - calc_readout_cycle_duration_(); - } - - if ( d->known( names::lookuptable_0 ) ) - { - updateValue< std::vector< long > >( d, names::lookuptable_0, lookuptable_0_ ); - - // right size? - if ( lookuptable_0_.size() != lookuptable_1_.size() ) - { - throw BadProperty( "Look-up table has not 2^4 entries!" ); - } - - // are look-up table entries out of bounds? - for ( size_t i = 0; i < size_t( lookuptable_0_.size() ); ++i ) - { - if ( lookuptable_0_[ i ] < 0 or lookuptable_0_[ i ] > 15 ) - { - throw BadProperty( "Look-up table entries must be integers in [0,15]" ); - } - } - } - if ( d->known( names::lookuptable_1 ) ) - { - updateValue< std::vector< long > >( d, names::lookuptable_1, lookuptable_1_ ); - - // right size? - if ( lookuptable_1_.size() != lookuptable_0_.size() ) - { - throw BadProperty( "Look-up table has not 2^4 entries!" ); - } - - // are look-up table entries out of bounds? - for ( size_t i = 0; i < size_t( lookuptable_1_.size() ); ++i ) - { - if ( lookuptable_1_[ i ] < 0 or lookuptable_1_[ i ] > 15 ) - { - throw BadProperty( "Look-up table entries must be integers in [0,15]" ); - } - } - } - if ( d->known( names::lookuptable_2 ) ) - { - updateValue< std::vector< long > >( d, names::lookuptable_2, lookuptable_2_ ); - - // right size? - if ( lookuptable_2_.size() != lookuptable_0_.size() ) - { - throw BadProperty( "Look-up table has not 2^4 entries!" ); - } - - // are look-up table entries out of bounds? - for ( size_t i = 0; i < size_t( lookuptable_2_.size() ); ++i ) - { - if ( lookuptable_2_[ i ] < 0 or lookuptable_2_[ i ] > 15 ) - { - throw BadProperty( "Look-up table entries must be integers in [0,15]" ); - } - } - } - - if ( d->known( names::configbit_0 ) ) - { - updateValue< std::vector< long > >( d, names::configbit_0, configbit_0_ ); - - // right size? - if ( configbit_0_.size() != 4 ) - { - throw BadProperty( "Wrong number of configuration bits (!=4)." ); - } - } - if ( d->known( names::configbit_1 ) ) - { - updateValue< std::vector< long > >( d, names::configbit_1, configbit_1_ ); - - // right size? - if ( configbit_1_.size() != 4 ) - { - throw BadProperty( "Wrong number of configuration bits (!=4)." ); - } - } - if ( d->known( names::reset_pattern ) ) - { - updateValue< std::vector< long > >( d, names::reset_pattern, reset_pattern_ ); - - // right size? - if ( reset_pattern_.size() != 6 ) - { - throw BadProperty( "Wrong number of reset bits (!=6)." ); - } - } -} - - -// -// Implementation of class stdp_facetshw_synapse_hom. -// -template < typename targetidentifierT > -stdp_facetshw_synapse_hom< targetidentifierT >::stdp_facetshw_synapse_hom() - : weight_( 1.0 ) - , a_causal_( 0.0 ) - , a_acausal_( 0.0 ) - , a_thresh_th_( 21.835 ) - , a_thresh_tl_( 21.835 ) // exp(-10ms/20ms) * 36SSPs - , init_flag_( false ) - , synapse_id_( 0 ) - , next_readout_time_( 0.0 ) - , discrete_weight_( 0 ) - , t_lastspike_( 0.0 ) -{ -} - -template < typename targetidentifierT > -void -stdp_facetshw_synapse_hom< targetidentifierT >::get_status( DictionaryDatum& d ) const -{ - // base class properties, different for individual synapse - ConnectionBase::get_status( d ); - def< double >( d, names::weight, weight_ ); - - // own properties, different for individual synapse - def< double >( d, names::a_causal, a_causal_ ); - def< double >( d, names::a_acausal, a_acausal_ ); - def< double >( d, names::a_thresh_th, a_thresh_th_ ); - def< double >( d, names::a_thresh_tl, a_thresh_tl_ ); - - def< bool >( d, names::init_flag, init_flag_ ); - def< long >( d, names::synapse_id, synapse_id_ ); - def< double >( d, names::next_readout_time, next_readout_time_ ); - // useful to get conversion before activity, but weight_per_lut_entry_ not - // known here - // def(d, "discrete_weight", - // entry_to_weight_(weight_to_entry_(weight_, - // weight_per_lut_entry_), weight_per_lut_entry_)); -} - -template < typename targetidentifierT > -void -stdp_facetshw_synapse_hom< targetidentifierT >::set_status( const DictionaryDatum& d, ConnectorModel& cm ) -{ - // base class properties - ConnectionBase::set_status( d, cm ); - updateValue< double >( d, names::weight, weight_ ); - - updateValue< double >( d, names::a_causal, a_causal_ ); - updateValue< double >( d, names::a_acausal, a_acausal_ ); - updateValue< double >( d, names::a_thresh_th, a_thresh_th_ ); - updateValue< double >( d, names::a_thresh_tl, a_thresh_tl_ ); - - updateValue< long >( d, names::synapse_id, synapse_id_ ); - - // TP: they should not be allowed to be changed! But needed for CopyModel ... - updateValue< bool >( d, names::init_flag, init_flag_ ); - updateValue< double >( d, names::next_readout_time, next_readout_time_ ); - - // setting discrete_weight_ does not make sense, is temporary variable -} - -} // of namespace nest - -#endif // #ifndef STDP_SYNAPSE_FACETSHW_HOM_IMPL_H diff --git a/models/stdp_nn_pre_centered_synapse.cpp b/models/stdp_nn_pre_centered_synapse.cpp index baa03ddfa5..99041c8e28 100644 --- a/models/stdp_nn_pre_centered_synapse.cpp +++ b/models/stdp_nn_pre_centered_synapse.cpp @@ -22,9 +22,6 @@ #include "stdp_nn_pre_centered_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_stdp_nn_pre_centered_synapse( const std::string& name ) { diff --git a/models/stdp_nn_restr_synapse.cpp b/models/stdp_nn_restr_synapse.cpp index c531ac5c38..1ba4d3f729 100644 --- a/models/stdp_nn_restr_synapse.cpp +++ b/models/stdp_nn_restr_synapse.cpp @@ -22,9 +22,6 @@ #include "stdp_nn_restr_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_stdp_nn_restr_synapse( const std::string& name ) { diff --git a/models/stdp_nn_symm_synapse.cpp b/models/stdp_nn_symm_synapse.cpp index b1b69b3b26..85ca3c5b4c 100644 --- a/models/stdp_nn_symm_synapse.cpp +++ b/models/stdp_nn_symm_synapse.cpp @@ -22,9 +22,6 @@ #include "stdp_nn_symm_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_stdp_nn_symm_synapse( const std::string& name ) { diff --git a/models/stdp_pl_synapse_hom.cpp b/models/stdp_pl_synapse_hom.cpp index 3bc6a0c247..f9eb393d9a 100644 --- a/models/stdp_pl_synapse_hom.cpp +++ b/models/stdp_pl_synapse_hom.cpp @@ -24,9 +24,7 @@ // Includes from nestkernel: #include "common_synapse_properties.h" -#include "connector_model.h" #include "event.h" -#include "nest_impl.h" // Includes from sli: #include "dictdatum.h" diff --git a/models/stdp_synapse.cpp b/models/stdp_synapse.cpp index cbd48650ad..63fe836e86 100644 --- a/models/stdp_synapse.cpp +++ b/models/stdp_synapse.cpp @@ -22,9 +22,6 @@ #include "stdp_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_stdp_synapse( const std::string& name ) { diff --git a/models/stdp_synapse_hom.cpp b/models/stdp_synapse_hom.cpp index fccdf1b148..fb23eefe80 100644 --- a/models/stdp_synapse_hom.cpp +++ b/models/stdp_synapse_hom.cpp @@ -24,8 +24,6 @@ // Includes from nestkernel: #include "common_synapse_properties.h" -#include "connector_model.h" -#include "nest_impl.h" // Includes from sli: #include "dictdatum.h" diff --git a/models/stdp_triplet_synapse.cpp b/models/stdp_triplet_synapse.cpp index 597c1ee9c6..28bd3fadd4 100644 --- a/models/stdp_triplet_synapse.cpp +++ b/models/stdp_triplet_synapse.cpp @@ -22,9 +22,6 @@ #include "stdp_triplet_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_stdp_triplet_synapse( const std::string& name ) { diff --git a/models/step_current_generator.cpp b/models/step_current_generator.cpp index d006dc30af..ee7c852b4e 100644 --- a/models/step_current_generator.cpp +++ b/models/step_current_generator.cpp @@ -23,10 +23,7 @@ #include "step_current_generator.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "booldatum.h" diff --git a/models/step_rate_generator.cpp b/models/step_rate_generator.cpp index dfe104987d..f6ba8c432e 100644 --- a/models/step_rate_generator.cpp +++ b/models/step_rate_generator.cpp @@ -23,10 +23,7 @@ #include "step_rate_generator.h" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "nest_impl.h" -#include "universal_data_logger_impl.h" // Includes from sli: #include "booldatum.h" diff --git a/models/tanh_rate.cpp b/models/tanh_rate.cpp index c1a41cb679..fccc105446 100644 --- a/models/tanh_rate.cpp +++ b/models/tanh_rate.cpp @@ -22,10 +22,7 @@ #include "tanh_rate.h" -// Includes from nestkernel -#include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include namespace nest { diff --git a/models/tanh_rate.h b/models/tanh_rate.h index ce04f718b2..b8b4e599df 100644 --- a/models/tanh_rate.h +++ b/models/tanh_rate.h @@ -25,11 +25,8 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_neuron_opn.h" -#include "rate_neuron_opn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" namespace nest { diff --git a/models/threshold_lin_rate.cpp b/models/threshold_lin_rate.cpp index dbb877601e..cb054d7a52 100644 --- a/models/threshold_lin_rate.cpp +++ b/models/threshold_lin_rate.cpp @@ -22,10 +22,7 @@ #include "threshold_lin_rate.h" -// Includes from nestkernel -#include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" +#include namespace nest { diff --git a/models/threshold_lin_rate.h b/models/threshold_lin_rate.h index 2329f4064a..4c1dbeeea2 100644 --- a/models/threshold_lin_rate.h +++ b/models/threshold_lin_rate.h @@ -28,11 +28,8 @@ // Includes from models: #include "rate_neuron_ipn.h" -#include "rate_neuron_ipn_impl.h" #include "rate_neuron_opn.h" -#include "rate_neuron_opn_impl.h" #include "rate_transformer_node.h" -#include "rate_transformer_node_impl.h" namespace nest { diff --git a/models/tsodyks2_synapse.cpp b/models/tsodyks2_synapse.cpp index 0019c0b11f..95c5abbc32 100644 --- a/models/tsodyks2_synapse.cpp +++ b/models/tsodyks2_synapse.cpp @@ -22,9 +22,6 @@ #include "tsodyks2_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_tsodyks2_synapse( const std::string& name ) { diff --git a/models/tsodyks_synapse.cpp b/models/tsodyks_synapse.cpp index 19e2222ae9..8a0bd64960 100644 --- a/models/tsodyks_synapse.cpp +++ b/models/tsodyks_synapse.cpp @@ -22,9 +22,6 @@ #include "tsodyks_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_tsodyks_synapse( const std::string& name ) { diff --git a/models/tsodyks_synapse_hom.cpp b/models/tsodyks_synapse_hom.cpp index 65e450ba2b..caec1959f1 100644 --- a/models/tsodyks_synapse_hom.cpp +++ b/models/tsodyks_synapse_hom.cpp @@ -22,10 +22,6 @@ #include "tsodyks_synapse_hom.h" -// Includes from nestkernel: -#include "connector_model.h" -#include "nest_impl.h" - void nest::register_tsodyks_synapse_hom( const std::string& name ) { diff --git a/models/urbanczik_synapse.cpp b/models/urbanczik_synapse.cpp index 8fd26b8550..e698e72f0f 100644 --- a/models/urbanczik_synapse.cpp +++ b/models/urbanczik_synapse.cpp @@ -22,9 +22,6 @@ #include "urbanczik_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_urbanczik_synapse( const std::string& name ) { diff --git a/models/vogels_sprekeler_synapse.cpp b/models/vogels_sprekeler_synapse.cpp index e1279d0888..989aec0e75 100644 --- a/models/vogels_sprekeler_synapse.cpp +++ b/models/vogels_sprekeler_synapse.cpp @@ -22,9 +22,6 @@ #include "vogels_sprekeler_synapse.h" -// Includes from nestkernel: -#include "nest_impl.h" - void nest::register_vogels_sprekeler_synapse( const std::string& name ) { diff --git a/models/volume_transmitter.cpp b/models/volume_transmitter.cpp index 659838b288..69f048a03e 100644 --- a/models/volume_transmitter.cpp +++ b/models/volume_transmitter.cpp @@ -24,11 +24,7 @@ // Includes from nestkernel: -#include "connector_base.h" -#include "exceptions.h" #include "kernel_manager.h" -#include "model_manager_impl.h" -#include "nest_impl.h" #include "spikecounter.h" // Includes from libnestutil: diff --git a/models/weight_recorder.cpp b/models/weight_recorder.cpp index 33f1fef67d..cd1f9f434d 100644 --- a/models/weight_recorder.cpp +++ b/models/weight_recorder.cpp @@ -27,12 +27,11 @@ #include "compose.hpp" // Includes from nestkernel: -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "model_manager_impl.h" +#include "nest.h" #include "nest_datums.h" -#include "nest_impl.h" #include "node_collection.h" +#include "node_manager.h" // Includes from sli: #include "arraydatum.h" diff --git a/models/weight_recorder.h b/models/weight_recorder.h index ef23ad8f6d..fbff6d2a64 100644 --- a/models/weight_recorder.h +++ b/models/weight_recorder.h @@ -23,14 +23,10 @@ #ifndef WEIGHT_RECORDER_H #define WEIGHT_RECORDER_H -// C++ includes: -#include - // Includes from nestkernel: -#include "device_node.h" #include "event.h" #include "exceptions.h" -#include "kernel_manager.h" +#include "nest_datums.h" #include "nest_types.h" #include "recording_device.h" diff --git a/nestkernel/CMakeLists.txt b/nestkernel/CMakeLists.txt index ab3257a89f..f54a0aaccd 100644 --- a/nestkernel/CMakeLists.txt +++ b/nestkernel/CMakeLists.txt @@ -18,21 +18,21 @@ # along with NEST. If not, see . set ( nestkernel_sources - universal_data_logger_impl.h universal_data_logger.h + universal_data_logger.h recordables_map.h archiving_node.h archiving_node.cpp clopath_archiving_node.h clopath_archiving_node.cpp - urbanczik_archiving_node.h urbanczik_archiving_node_impl.h - eprop_archiving_node.h eprop_archiving_node_impl.h + urbanczik_archiving_node.h + eprop_archiving_node.h eprop_archiving_node_readout.h - eprop_archiving_node_recurrent.h eprop_archiving_node_recurrent_impl.h + eprop_archiving_node_recurrent.h common_synapse_properties.h common_synapse_properties.cpp connection.h connection_label.h common_properties_hom_w.h syn_id_delay.h - connector_base.h connector_base_impl.h - connector_model.h connector_model_impl.h connector_model.cpp + connector_base.h connector_base.cpp + connector_model.h connector_model.cpp connector_model_impl.h connection_id.h connection_id.cpp deprecation_warning.h deprecation_warning.cpp device.h device.cpp @@ -40,12 +40,12 @@ set ( nestkernel_sources module_manager.h module_manager.cpp event.h event.cpp exceptions.h exceptions.cpp - genericmodel.h genericmodel_impl.h + genericmodel.h node_collection.h node_collection.cpp generic_factory.h histentry.h histentry.cpp model.h model.cpp - model_manager.h model_manager_impl.h model_manager.cpp + model_manager.h model_manager.cpp nest_datums.h nest_datums.cpp nest_names.cpp nest_names.h nestmodule.h nestmodule.cpp @@ -60,35 +60,35 @@ set ( nestkernel_sources random_generators.h recording_device.h recording_device.cpp pseudo_recording_device.h - ring_buffer.h ring_buffer_impl.h ring_buffer.cpp - secondary_event.h secondary_event_impl.h + ring_buffer.h ring_buffer.cpp + secondary_event.h slice_ring_buffer.cpp slice_ring_buffer.h spikecounter.h spikecounter.cpp stimulation_device.h stimulation_device.cpp target_identifier.h sparse_node_array.h sparse_node_array.cpp conn_parameter.h conn_parameter.cpp - conn_builder.h conn_builder_impl.h conn_builder.cpp + conn_builder.h conn_builder.cpp conn_builder_factory.h conn_builder_conngen.h conn_builder_conngen.cpp sonata_connector.h sonata_connector.cpp music_event_handler.h music_event_handler.cpp music_rate_in_handler.h music_rate_in_handler.cpp music_manager.cpp music_manager.h - nest.h nest_impl.h nest.cpp + nest.h nest.cpp synaptic_element.h synaptic_element.cpp growth_curve.h growth_curve.cpp growth_curve_factory.h kernel_manager.h kernel_manager.cpp - vp_manager.h vp_manager_impl.h vp_manager.cpp - io_manager.h io_manager_impl.h io_manager.cpp - mpi_manager.h mpi_manager_impl.h mpi_manager.cpp + vp_manager.h vp_manager.cpp + io_manager.h io_manager.cpp + mpi_manager.h mpi_manager.cpp simulation_manager.h simulation_manager.cpp - connection_manager.h connection_manager_impl.h connection_manager.cpp - sp_manager.h sp_manager_impl.h sp_manager.cpp + connection_manager.h connection_manager.cpp + sp_manager.h sp_manager.cpp delay_checker.h delay_checker.cpp random_manager.h random_manager.cpp - event_delivery_manager.h event_delivery_manager_impl.h + event_delivery_manager.h event_delivery_manager.cpp node_manager.h node_manager.cpp logging_manager.h logging_manager.cpp @@ -98,7 +98,7 @@ set ( nestkernel_sources recording_backend_screen.h recording_backend_screen.cpp manager_interface.h target_table.h target_table.cpp - target_table_devices.h target_table_devices.cpp target_table_devices_impl.h + target_table_devices.h target_table_devices.cpp target.h target_data.h static_assert.h send_buffer_position.h send_buffer_position.cpp source.h @@ -106,19 +106,19 @@ set ( nestkernel_sources source_table_position.h spike_data.h structural_plasticity_node.h structural_plasticity_node.cpp - connection_creator.h connection_creator.cpp connection_creator_impl.h + connection_creator.h connection_creator.cpp free_layer.h grid_layer.h grid_mask.h layer.h layer.cpp layer_impl.h - mask.h mask.cpp mask_impl.h - ntree.h ntree_impl.h + mask.h mask.cpp + ntree.h position.h spatial.h spatial.cpp stimulation_backend.h buffer_resize_log.h buffer_resize_log.cpp nest_extension_interface.h - stopwatch.h stopwatch_impl.h + stopwatch.h ) diff --git a/nestkernel/archiving_node.cpp b/nestkernel/archiving_node.cpp index 7ad511eb3c..ba35130aa1 100644 --- a/nestkernel/archiving_node.cpp +++ b/nestkernel/archiving_node.cpp @@ -23,6 +23,7 @@ #include "archiving_node.h" // Includes from nestkernel: +#include "connection_manager.h" #include "kernel_manager.h" // Includes from sli: diff --git a/nestkernel/buffer_resize_log.cpp b/nestkernel/buffer_resize_log.cpp index a37e383c7a..948766245b 100644 --- a/nestkernel/buffer_resize_log.cpp +++ b/nestkernel/buffer_resize_log.cpp @@ -23,8 +23,10 @@ #include "buffer_resize_log.h" // Includes from nestkernel: +#include "dictutils.h" #include "kernel_manager.h" #include "nest_names.h" +#include "simulation_manager.h" namespace nest { diff --git a/nestkernel/clopath_archiving_node.cpp b/nestkernel/clopath_archiving_node.cpp index 42bb957ea2..07f3e30038 100644 --- a/nestkernel/clopath_archiving_node.cpp +++ b/nestkernel/clopath_archiving_node.cpp @@ -23,6 +23,7 @@ #include "clopath_archiving_node.h" // Includes from nestkernel: +#include "connection_manager.h" #include "kernel_manager.h" // Includes from sli: diff --git a/nestkernel/common_properties_hom_w.h b/nestkernel/common_properties_hom_w.h index b5b8d16879..527568cb25 100644 --- a/nestkernel/common_properties_hom_w.h +++ b/nestkernel/common_properties_hom_w.h @@ -25,6 +25,8 @@ // Includes from nestkernel: #include "common_synapse_properties.h" +#include "dictutils.h" +#include "nest_names.h" namespace nest { diff --git a/nestkernel/common_synapse_properties.cpp b/nestkernel/common_synapse_properties.cpp index 2a5053c0c3..6bb03a4f6b 100644 --- a/nestkernel/common_synapse_properties.cpp +++ b/nestkernel/common_synapse_properties.cpp @@ -24,9 +24,8 @@ // Includes from nestkernel: #include "connector_model.h" -#include "nest_timeconverter.h" -#include "nest_types.h" #include "node.h" +#include "node_manager.h" // Includes from models: #include "weight_recorder.h" diff --git a/nestkernel/common_synapse_properties.h b/nestkernel/common_synapse_properties.h index fab02945c6..256cf92c16 100644 --- a/nestkernel/common_synapse_properties.h +++ b/nestkernel/common_synapse_properties.h @@ -23,12 +23,6 @@ #ifndef COMMON_SYNAPSE_PROPERTIES_H #define COMMON_SYNAPSE_PROPERTIES_H -// Includes from nestkernel: -#include "connector_model.h" -#include "nest_datums.h" -#include "nest_types.h" -#include "node.h" - // Includes from sli: #include "dictdatum.h" @@ -38,8 +32,8 @@ namespace nest // forward declarations class weight_recorder; -class ConnectorModel; class TimeConverter; +class ConnectorModel; /** * Class containing the common properties for all connections of a certain type. diff --git a/nestkernel/conn_builder.cpp b/nestkernel/conn_builder.cpp index f70f00f0b4..1169651b38 100644 --- a/nestkernel/conn_builder.cpp +++ b/nestkernel/conn_builder.cpp @@ -26,14 +26,17 @@ #include "logging.h" // Includes from nestkernel: -#include "conn_builder_impl.h" #include "conn_parameter.h" #include "connection_manager.h" #include "exceptions.h" #include "kernel_manager.h" +#include "logging_manager.h" +#include "model_manager.h" +#include "nest.h" #include "nest_names.h" #include "node.h" -#include "vp_manager_impl.h" +#include "node_manager.h" +#include "random_manager.h" // Includes from sli: #include "dict.h" diff --git a/nestkernel/conn_builder.h b/nestkernel/conn_builder.h index 801d4bb355..4e24c7a60b 100644 --- a/nestkernel/conn_builder.h +++ b/nestkernel/conn_builder.h @@ -40,9 +40,10 @@ // Includes from nestkernel: #include "conn_parameter.h" -#include "nest_time.h" +#include "kernel_manager.h" #include "node_collection.h" #include "parameter.h" +#include "sp_manager.h" // Includes from sli: #include "dictdatum.h" @@ -860,6 +861,20 @@ BipartiteConnBuilder::skip_conn_parameter_( size_t target_thread, size_t n_skip } } +inline void +BipartiteConnBuilder::single_disconnect_( size_t snode_id, Node& target, size_t target_thread ) +{ + // index tnode_id = target.get_node_id(); + // This is the most simple case in which only the synapse_model_ has been + // defined. TODO: Add functionality to delete synapses with a given weight + // or a given delay + if ( synapse_model_id_.size() > 1 ) + { + throw KernelException( "Can only disconnect when single element syn_spec has been used." ); + } + kernel().sp_manager.disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); +} + } // namespace nest #endif diff --git a/nestkernel/conn_builder_impl.h b/nestkernel/conn_builder_impl.h deleted file mode 100644 index 385bb12ec8..0000000000 --- a/nestkernel/conn_builder_impl.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * conn_builder_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef CONN_BUILDER_IMPL_H -#define CONN_BUILDER_IMPL_H - -#include "conn_builder.h" - -// Includes from nestkernel: -#include "kernel_manager.h" -#include "nest_names.h" - -namespace nest -{ - -inline void -BipartiteConnBuilder::single_disconnect_( size_t snode_id, Node& target, size_t target_thread ) -{ - // index tnode_id = target.get_node_id(); - // This is the most simple case in which only the synapse_model_ has been - // defined. TODO: Add functionality to delete synapses with a given weight - // or a given delay - if ( synapse_model_id_.size() > 1 ) - { - throw KernelException( "Can only disconnect when single element syn_spec has been used." ); - } - kernel().sp_manager.disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); -} - -} // namespace nest - -#endif /* CONN_BUILDER_IMPL_H */ diff --git a/nestkernel/connection.h b/nestkernel/connection.h index 4a90eb2f0f..ddec70f6bb 100644 --- a/nestkernel/connection.h +++ b/nestkernel/connection.h @@ -26,7 +26,6 @@ // Includes from nestkernel: #include "common_synapse_properties.h" #include "connection_label.h" -#include "connector_base_impl.h" #include "delay_checker.h" #include "event.h" #include "kernel_manager.h" diff --git a/nestkernel/connection_creator.h b/nestkernel/connection_creator.h index 62d2671344..087e5f43a8 100644 --- a/nestkernel/connection_creator.h +++ b/nestkernel/connection_creator.h @@ -27,9 +27,12 @@ #include // Includes from nestkernel: +#include "connection_manager.h" #include "kernel_manager.h" +#include "nest.h" #include "nest_names.h" #include "nestmodule.h" +#include "node_manager.h" // Includes from spatial: #include "mask.h" @@ -188,6 +191,870 @@ class ConnectionCreator std::vector< std::shared_ptr< Parameter > > delay_; }; +template < int D > +void +ConnectionCreator::connect( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + switch ( type_ ) + { + case Pairwise_bernoulli_on_source: + + pairwise_bernoulli_on_source_( source, source_nc, target, target_nc ); + break; + + case Fixed_indegree: + + fixed_indegree_( source, source_nc, target, target_nc ); + break; + + case Fixed_outdegree: + + fixed_outdegree_( source, source_nc, target, target_nc ); + break; + + case Pairwise_bernoulli_on_target: + + pairwise_bernoulli_on_target_( source, source_nc, target, target_nc ); + break; + + case Pairwise_poisson: + + pairwise_poisson_( source, source_nc, target, target_nc ); + break; + + default: + throw BadProperty( "Unknown connection type." ); + } +} + +template < typename Iterator, int D > +void +ConnectionCreator::connect_to_target_( Iterator from, + Iterator to, + Node* tgt_ptr, + const Position< D >& tgt_pos, + size_t tgt_thread, + const Layer< D >& source ) +{ + RngPtr rng = get_vp_specific_rng( tgt_thread ); + + // We create a source pos vector here that can be updated with the + // source position. This is done to avoid creating and destroying + // unnecessarily many vectors. + std::vector< double > source_pos( D ); + const std::vector< double > target_pos = tgt_pos.get_vector(); + + for ( Iterator iter = from; iter != to; ++iter ) + { + if ( not allow_autapses_ and ( iter->second == tgt_ptr->get_node_id() ) ) + { + continue; + } + iter->first.get_vector( source_pos ); + + if ( not kernel_ or rng->drand() < kernel_->value( rng, source_pos, target_pos, source, tgt_ptr ) ) + { + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + kernel().connection_manager.connect( iter->second, + tgt_ptr, + tgt_thread, + synapse_model_[ indx ], + param_dicts_[ indx ][ tgt_thread ], + delay_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ), + weight_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ) ); + } + } + } +} + +template < typename Iterator, int D > +void +ConnectionCreator::connect_to_target_poisson_( Iterator from, + Iterator to, + Node* tgt_ptr, + const Position< D >& tgt_pos, + size_t tgt_thread, + const Layer< D >& source ) +{ + RngPtr rng = get_vp_specific_rng( tgt_thread ); + + // We create a source pos vector here that can be updated with the + // source position. This is done to avoid creating and destroying + // unnecessarily many vectors. + std::vector< double > source_pos( D ); + const std::vector< double > target_pos = tgt_pos.get_vector(); + poisson_distribution poi_dist; + + for ( Iterator iter = from; iter != to; ++iter ) + { + if ( not allow_autapses_ and ( iter->second == tgt_ptr->get_node_id() ) ) + { + continue; + } + iter->first.get_vector( source_pos ); + + // Sample number of connections that are to be established + poisson_distribution::param_type param( kernel_->value( rng, source_pos, target_pos, source, tgt_ptr ) ); + const unsigned long num_conns = poi_dist( rng, param ); + for ( unsigned long conn_counter = 0; conn_counter < num_conns; ++conn_counter ) + { + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + kernel().connection_manager.connect( iter->second, + tgt_ptr, + tgt_thread, + synapse_model_[ indx ], + param_dicts_[ indx ][ tgt_thread ], + delay_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ), + weight_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ) ); + } + } + } +} + +template < int D > +ConnectionCreator::PoolWrapper_< D >::PoolWrapper_() + : masked_layer_( 0 ) + , positions_( 0 ) +{ +} + +template < int D > +ConnectionCreator::PoolWrapper_< D >::~PoolWrapper_< D >() +{ + if ( masked_layer_ ) + { + delete masked_layer_; + } +} + +template < int D > +void +ConnectionCreator::PoolWrapper_< D >::define( MaskedLayer< D >* ml ) +{ + assert( masked_layer_ == 0 ); + assert( positions_ == 0 ); + assert( ml != 0 ); + masked_layer_ = ml; +} + +template < int D > +void +ConnectionCreator::PoolWrapper_< D >::define( std::vector< std::pair< Position< D >, size_t > >* pos ) +{ + assert( masked_layer_ == 0 ); + assert( positions_ == 0 ); + assert( pos != 0 ); + positions_ = pos; +} + +template < int D > +typename Ntree< D, size_t >::masked_iterator +ConnectionCreator::PoolWrapper_< D >::masked_begin( const Position< D >& pos ) const +{ + return masked_layer_->begin( pos ); +} + +template < int D > +typename Ntree< D, size_t >::masked_iterator +ConnectionCreator::PoolWrapper_< D >::masked_end() const +{ + return masked_layer_->end(); +} + +template < int D > +typename std::vector< std::pair< Position< D >, size_t > >::iterator +ConnectionCreator::PoolWrapper_< D >::begin() const +{ + return positions_->begin(); +} + +template < int D > +typename std::vector< std::pair< Position< D >, size_t > >::iterator +ConnectionCreator::PoolWrapper_< D >::end() const +{ + return positions_->end(); +} + + +template < int D > +void +ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + // Connect using pairwise Bernoulli drawing source nodes (target driven) + // For each local target node: + // 1. Apply Mask to source layer + // 2. For each source node: Compute probability, draw random number, make + // connection conditionally + + // retrieve global positions, either for masked or unmasked pool + PoolWrapper_< D > pool; + if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor + { + pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, source_nc ) ); + } + else + { + pool.define( source.get_global_positions_vector( source_nc ) ); + } + + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + +#pragma omp parallel + { + const int thread_id = kernel().vp_manager.get_thread_id(); + try + { + NodeCollection::const_iterator target_begin = target_nc->begin(); + NodeCollection::const_iterator target_end = target_nc->end(); + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + + if ( not tgt->is_proxy() ) + { + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); + + if ( mask_.get() ) + { + connect_to_target_( + pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, source ); + } + else + { + connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, source ); + } + } + } // for target_begin + } + catch ( std::exception& err ) + { + // We must create a new exception here, err's lifetime ends at + // the end of the catch block. + exceptions_raised_.at( thread_id ) = + std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); + } + } // omp parallel + // check if any exceptions have been raised + for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + { + if ( exceptions_raised_.at( thr ).get() ) + { + throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); + } + } +} + + +template < int D > +void +ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + // Connecting using pairwise Bernoulli drawing target nodes (source driven) + // It is actually implemented as pairwise Bernoulli on source nodes, + // but with displacements computed in the target layer. The Mask has been + // reversed so that it can be applied to the source instead of the target. + // For each local target node: + // 1. Apply (Converse)Mask to source layer + // 2. For each source node: Compute probability, draw random number, make + // connection conditionally + + PoolWrapper_< D > pool; + if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor + { + // By supplying the target layer to the MaskedLayer constructor, the + // mask is mirrored so it may be applied to the source layer instead + pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, target, source_nc ) ); + } + else + { + pool.define( source.get_global_positions_vector( source_nc ) ); + } + + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + + // We only need to check the first in the NodeCollection + Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + if ( not first_in_tgt->has_proxies() ) + { + throw IllegalConnection( "Spatial Connect with pairwise_bernoulli to devices is not possible." ); + } + +#pragma omp parallel + { + const int thread_id = kernel().vp_manager.get_thread_id(); + try + { + NodeCollection::const_iterator target_begin = target_nc->thread_local_begin(); + NodeCollection::const_iterator target_end = target_nc->end(); + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + + assert( not tgt->is_proxy() ); + + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); + + if ( mask_.get() ) + { + // We do the same as in the target driven case, except that we calculate displacements in the target layer. + // We therefore send in target as last parameter. + connect_to_target_( pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, target ); + } + else + { + // We do the same as in the target driven case, except that we calculate displacements in the target layer. + // We therefore send in target as last parameter. + connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, target ); + } + + } // end for + } + catch ( std::exception& err ) + { + // We must create a new exception here, err's lifetime ends at the end of the catch block. + exceptions_raised_.at( thread_id ) = + std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); + } + } // omp parallel + // check if any exceptions have been raised + for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + { + if ( exceptions_raised_.at( thr ).get() ) + { + throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); + } + } +} + + +template < int D > +void +ConnectionCreator::pairwise_poisson_( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + // Connect using pairwise Poisson drawing source nodes (target driven) + // For each local target node: + // 1. Apply Mask to source layer + // 2. For each source node: Compute probability, draw random number, make + // connection conditionally + + // retrieve global positions, either for masked or unmasked pool + PoolWrapper_< D > pool; + if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor + { + pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, source_nc ) ); + } + else + { + pool.define( source.get_global_positions_vector( source_nc ) ); + } + + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + +#pragma omp parallel + { + const int thread_id = kernel().vp_manager.get_thread_id(); + try + { + NodeCollection::const_iterator target_begin = target_nc->begin(); + NodeCollection::const_iterator target_end = target_nc->end(); + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + + if ( not tgt->is_proxy() ) + { + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); + + if ( mask_.get() ) + { + connect_to_target_poisson_( + pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, source ); + } + else + { + connect_to_target_poisson_( pool.begin(), pool.end(), tgt, target_pos, thread_id, source ); + } + } + } // for target_begin + } + catch ( std::exception& err ) + { + // We must create a new exception here, err's lifetime ends at + // the end of the catch block. + exceptions_raised_.at( thread_id ) = + std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); + } + } // omp parallel + // check if any exceptions have been raised + for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + { + if ( exceptions_raised_.at( thr ).get() ) + { + throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); + } + } +} + + +template < int D > +void +ConnectionCreator::fixed_indegree_( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + // fixed_indegree connections (fixed fan in) + // + // For each local target node: + // 1. Apply Mask to source layer + // 2. Compute connection probability for each source position + // 3. Draw source nodes and make connections + + // We only need to check the first in the NodeCollection + Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + if ( not first_in_tgt->has_proxies() ) + { + throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." ); + } + + NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); + NodeCollection::const_iterator target_end = target_nc->end(); + + // protect against connecting to devices without proxies + // we need to do this before creating the first connection to leave + // the network untouched if any target does not have proxies + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + assert( not kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + } + + if ( mask_.get() ) + { + MaskedLayer< D > masked_source( source, mask_, allow_oversized_, source_nc ); + const auto masked_source_end = masked_source.end(); + + std::vector< std::pair< Position< D >, size_t > > positions; + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + size_t target_id = ( *tgt_it ).node_id; + Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); + + size_t target_thread = tgt->get_thread(); + RngPtr rng = get_vp_specific_rng( target_thread ); + Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); + + // We create a source pos vector here that can be updated with the + // source position. This is done to avoid creating and destroying + // unnecessarily many vectors. + std::vector< double > source_pos_vector( D ); + const std::vector< double > target_pos_vector = target_pos.get_vector(); + + unsigned long target_number_connections = + std::round( number_of_connections_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); + + // Get (position,node ID) pairs for sources inside mask + positions.resize( std::distance( masked_source.begin( target_pos ), masked_source_end ) ); + std::copy( masked_source.begin( target_pos ), masked_source_end, positions.begin() ); + + // We will select `number_of_connections_` sources within the mask. + // If there is no kernel, we can just draw uniform random numbers, + // but with a kernel we have to set up a probability distribution + // function using a discrete_distribution. + if ( kernel_ ) + { + + std::vector< double > probabilities; + probabilities.reserve( positions.size() ); + + // Collect probabilities for the sources + for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator iter = positions.begin(); + iter != positions.end(); + ++iter ) + { + iter->first.get_vector( source_pos_vector ); + probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); + } + + if ( positions.empty() + or ( not allow_autapses_ and ( positions.size() == 1 ) and positions[ 0 ].second == target_id ) + or ( not allow_multapses_ and ( positions.size() < target_number_connections ) ) ) + { + std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id ); + throw KernelException( msg.c_str() ); + } + + // A discrete_distribution draws random integers with a non-uniform + // distribution. + discrete_distribution lottery; + const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); + lottery.param( param ); + + // If multapses are not allowed, we must keep track of which + // sources have been selected already. + std::vector< bool > is_selected( positions.size() ); + + // Draw `target_number_connections` sources + while ( target_number_connections > 0 ) + { + size_t random_id = lottery( rng ); + if ( not allow_multapses_ and is_selected[ random_id ] ) + { + continue; + } + + size_t source_id = positions[ random_id ].second; + if ( not allow_autapses_ and source_id == target_id ) + { + continue; + } + positions[ random_id ].first.get_vector( source_pos_vector ); + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + kernel().connection_manager.connect( + source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); + } + + is_selected[ random_id ] = true; + --target_number_connections; + } + } + else + { + + // no kernel + + if ( positions.empty() + or ( not allow_autapses_ and ( positions.size() == 1 ) and positions[ 0 ].second == target_id ) + or ( not allow_multapses_ and ( positions.size() < target_number_connections ) ) ) + { + std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id ); + throw KernelException( msg.c_str() ); + } + + // If multapses are not allowed, we must keep track of which + // sources have been selected already. + std::vector< bool > is_selected( positions.size() ); + + // Draw `target_number_connections` sources + while ( target_number_connections > 0 ) + { + const size_t random_id = rng->ulrand( positions.size() ); + if ( not allow_multapses_ and is_selected[ random_id ] ) + { + continue; + } + positions[ random_id ].first.get_vector( source_pos_vector ); + const size_t source_id = positions[ random_id ].second; + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + kernel().connection_manager.connect( + source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); + } + + is_selected[ random_id ] = true; + --target_number_connections; + } + } + } + } + else + { + // no mask + + // Get (position,node ID) pairs for all nodes in source layer + std::vector< std::pair< Position< D >, size_t > >* positions = source.get_global_positions_vector( source_nc ); + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + size_t target_id = ( *tgt_it ).node_id; + Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); + size_t target_thread = tgt->get_thread(); + RngPtr rng = get_vp_specific_rng( target_thread ); + Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); + + unsigned long target_number_connections = std::round( number_of_connections_->value( rng, tgt ) ); + + std::vector< double > source_pos_vector( D ); + const std::vector< double > target_pos_vector = target_pos.get_vector(); + + if ( ( positions->size() == 0 ) + or ( not allow_autapses_ and ( positions->size() == 1 ) and ( ( *positions )[ 0 ].second == target_id ) ) + or ( not allow_multapses_ and ( positions->size() < target_number_connections ) ) ) + { + std::string msg = String::compose( "Global target ID %1: Not enough sources found", target_id ); + throw KernelException( msg.c_str() ); + } + + // We will select `target_number_connections` sources within the mask. + // If there is no kernel, we can just draw uniform random numbers, + // but with a kernel we have to set up a probability distribution + // function using a discrete_distribution. + if ( kernel_ ) + { + + std::vector< double > probabilities; + probabilities.reserve( positions->size() ); + + // Collect probabilities for the sources + for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator iter = positions->begin(); + iter != positions->end(); + ++iter ) + { + iter->first.get_vector( source_pos_vector ); + probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); + } + + // A discrete_distribution draws random integers with a non-uniform + // distribution. + discrete_distribution lottery; + const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); + lottery.param( param ); + + // If multapses are not allowed, we must keep track of which + // sources have been selected already. + std::vector< bool > is_selected( positions->size() ); + + // Draw `target_number_connections` sources + while ( target_number_connections > 0 ) + { + const size_t random_id = lottery( rng ); + if ( not allow_multapses_ and is_selected[ random_id ] ) + { + continue; + } + + const size_t source_id = ( *positions )[ random_id ].second; + if ( not allow_autapses_ and source_id == target_id ) + { + continue; + } + + ( *positions )[ random_id ].first.get_vector( source_pos_vector ); + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + kernel().connection_manager.connect( + source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); + } + + is_selected[ random_id ] = true; + --target_number_connections; + } + } + else + { + + // no kernel + + // If multapses are not allowed, we must keep track of which + // sources have been selected already. + std::vector< bool > is_selected( positions->size() ); + + // Draw `target_number_connections` sources + while ( target_number_connections > 0 ) + { + const size_t random_id = rng->ulrand( positions->size() ); + if ( not allow_multapses_ and is_selected[ random_id ] ) + { + continue; + } + + const size_t source_id = ( *positions )[ random_id ].second; + if ( not allow_autapses_ and source_id == target_id ) + { + continue; + } + + ( *positions )[ random_id ].first.get_vector( source_pos_vector ); + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); + kernel().connection_manager.connect( + source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); + } + + is_selected[ random_id ] = true; + --target_number_connections; + } + } + } + } +} + + +template < int D > +void +ConnectionCreator::fixed_outdegree_( Layer< D >& source, + NodeCollectionPTR source_nc, + Layer< D >& target, + NodeCollectionPTR target_nc ) +{ + // protect against connecting to devices without proxies + // we need to do this before creating the first connection to leave + // the network untouched if any target does not have proxies + + // We only need to check the first in the NodeCollection + Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + if ( not first_in_tgt->has_proxies() ) + { + throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." ); + } + + NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); + NodeCollection::const_iterator target_end = target_nc->end(); + + for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) + { + assert( not kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + } + + // Fixed_outdegree connections (fixed fan out) + // + // For each (global) source: (All connections made on all mpi procs) + // 1. Apply mask to global targets + // 2. If using kernel: Compute connection probability for each global target + // 3. Draw connections to make using global rng + + MaskedLayer< D > masked_target( target, mask_, allow_oversized_, target_nc ); + const auto masked_target_end = masked_target.end(); + + // We create a target positions vector here that can be updated with the + // position and node ID pairs. This is done to avoid creating and destroying + // unnecessarily many vectors. + std::vector< std::pair< Position< D >, size_t > > target_pos_node_id_pairs; + std::vector< std::pair< Position< D >, size_t > > source_pos_node_id_pairs = + *source.get_global_positions_vector( source_nc ); + + for ( const auto& source_pos_node_id_pair : source_pos_node_id_pairs ) + { + const Position< D > source_pos = source_pos_node_id_pair.first; + const size_t source_id = source_pos_node_id_pair.second; + const auto src = kernel().node_manager.get_node_or_proxy( source_id ); + const std::vector< double > source_pos_vector = source_pos.get_vector(); + + // We create a target pos vector here that can be updated with the + // target position. This is done to avoid creating and destroying + // unnecessarily many vectors. + std::vector< double > target_pos_vector( D ); + std::vector< double > probabilities; + + // Find potential targets and probabilities + RngPtr grng = get_rank_synced_rng(); + target_pos_node_id_pairs.resize( std::distance( masked_target.begin( source_pos ), masked_target_end ) ); + std::copy( masked_target.begin( source_pos ), masked_target_end, target_pos_node_id_pairs.begin() ); + + probabilities.reserve( target_pos_node_id_pairs.size() ); + if ( kernel_ ) + { + for ( const auto& target_pos_node_id_pair : target_pos_node_id_pairs ) + { + // TODO: Why is probability calculated in source layer, but weight and delay in target layer? + target_pos_node_id_pair.first.get_vector( target_pos_vector ); + const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pair.second ); + probabilities.push_back( kernel_->value( grng, source_pos_vector, target_pos_vector, source, tgt ) ); + } + } + else + { + probabilities.resize( target_pos_node_id_pairs.size(), 1.0 ); + } + + unsigned long number_of_connections = std::round( number_of_connections_->value( grng, src ) ); + + if ( target_pos_node_id_pairs.empty() + or ( not allow_multapses_ and target_pos_node_id_pairs.size() < number_of_connections ) ) + { + std::string msg = String::compose( "Global source ID %1: Not enough targets found", source_id ); + throw KernelException( msg.c_str() ); + } + + // Draw targets. A discrete_distribution draws random integers with a + // non-uniform distribution. + discrete_distribution lottery; + const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); + lottery.param( param ); + + // If multapses are not allowed, we must keep track of which + // targets have been selected already. + std::vector< bool > is_selected( target_pos_node_id_pairs.size() ); + + // Draw `number_of_connections` targets + while ( number_of_connections > 0 ) + { + const size_t random_id = lottery( get_rank_synced_rng() ); + if ( not allow_multapses_ and is_selected[ random_id ] ) + { + continue; + } + const size_t target_id = target_pos_node_id_pairs[ random_id ].second; + if ( not allow_autapses_ and source_id == target_id ) + { + continue; + } + + is_selected[ random_id ] = true; + + target_pos_node_id_pairs[ random_id ].first.get_vector( target_pos_vector ); + + std::vector< double > rng_weight_vec; + std::vector< double > rng_delay_vec; + for ( size_t indx = 0; indx < weight_.size(); ++indx ) + { + const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); + rng_weight_vec.push_back( weight_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); + rng_delay_vec.push_back( delay_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); + } + + // Each VP has now decided to create this connection and drawn any random parameter values + // required for it. Each VP thus counts the connection as created, but only the VP hosting the + // target neuron actually creates the connection. + --number_of_connections; + if ( not kernel().node_manager.is_local_node_id( target_id ) ) + { + continue; + } + + Node* target_ptr = kernel().node_manager.get_node_or_proxy( target_id ); + const size_t target_thread = target_ptr->get_thread(); + + for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) + { + kernel().connection_manager.connect( source_id, + target_ptr, + target_thread, + synapse_model_[ indx ], + param_dicts_[ indx ][ target_thread ], + rng_delay_vec[ indx ], + rng_weight_vec[ indx ] ); + } + } + } +} + } // namespace nest #endif diff --git a/nestkernel/connection_creator_impl.h b/nestkernel/connection_creator_impl.h deleted file mode 100644 index e094ab59d5..0000000000 --- a/nestkernel/connection_creator_impl.h +++ /dev/null @@ -1,907 +0,0 @@ -/* - * connection_creator_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef CONNECTION_CREATOR_IMPL_H -#define CONNECTION_CREATOR_IMPL_H - -#include "connection_creator.h" - -// C++ includes: -#include - -// Includes from nestkernel: -#include "kernel_manager.h" -#include "nest.h" - -namespace nest -{ -template < int D > -void -ConnectionCreator::connect( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - switch ( type_ ) - { - case Pairwise_bernoulli_on_source: - - pairwise_bernoulli_on_source_( source, source_nc, target, target_nc ); - break; - - case Fixed_indegree: - - fixed_indegree_( source, source_nc, target, target_nc ); - break; - - case Fixed_outdegree: - - fixed_outdegree_( source, source_nc, target, target_nc ); - break; - - case Pairwise_bernoulli_on_target: - - pairwise_bernoulli_on_target_( source, source_nc, target, target_nc ); - break; - - case Pairwise_poisson: - - pairwise_poisson_( source, source_nc, target, target_nc ); - break; - - default: - throw BadProperty( "Unknown connection type." ); - } -} - -template < typename Iterator, int D > -void -ConnectionCreator::connect_to_target_( Iterator from, - Iterator to, - Node* tgt_ptr, - const Position< D >& tgt_pos, - size_t tgt_thread, - const Layer< D >& source ) -{ - RngPtr rng = get_vp_specific_rng( tgt_thread ); - - // We create a source pos vector here that can be updated with the - // source position. This is done to avoid creating and destroying - // unnecessarily many vectors. - std::vector< double > source_pos( D ); - const std::vector< double > target_pos = tgt_pos.get_vector(); - - for ( Iterator iter = from; iter != to; ++iter ) - { - if ( not allow_autapses_ and ( iter->second == tgt_ptr->get_node_id() ) ) - { - continue; - } - iter->first.get_vector( source_pos ); - - if ( not kernel_ or rng->drand() < kernel_->value( rng, source_pos, target_pos, source, tgt_ptr ) ) - { - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - kernel().connection_manager.connect( iter->second, - tgt_ptr, - tgt_thread, - synapse_model_[ indx ], - param_dicts_[ indx ][ tgt_thread ], - delay_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ), - weight_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ) ); - } - } - } -} - -template < typename Iterator, int D > -void -ConnectionCreator::connect_to_target_poisson_( Iterator from, - Iterator to, - Node* tgt_ptr, - const Position< D >& tgt_pos, - size_t tgt_thread, - const Layer< D >& source ) -{ - RngPtr rng = get_vp_specific_rng( tgt_thread ); - - // We create a source pos vector here that can be updated with the - // source position. This is done to avoid creating and destroying - // unnecessarily many vectors. - std::vector< double > source_pos( D ); - const std::vector< double > target_pos = tgt_pos.get_vector(); - poisson_distribution poi_dist; - - for ( Iterator iter = from; iter != to; ++iter ) - { - if ( not allow_autapses_ and ( iter->second == tgt_ptr->get_node_id() ) ) - { - continue; - } - iter->first.get_vector( source_pos ); - - // Sample number of connections that are to be established - poisson_distribution::param_type param( kernel_->value( rng, source_pos, target_pos, source, tgt_ptr ) ); - const unsigned long num_conns = poi_dist( rng, param ); - for ( unsigned long conn_counter = 0; conn_counter < num_conns; ++conn_counter ) - { - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - kernel().connection_manager.connect( iter->second, - tgt_ptr, - tgt_thread, - synapse_model_[ indx ], - param_dicts_[ indx ][ tgt_thread ], - delay_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ), - weight_[ indx ]->value( rng, source_pos, target_pos, source, tgt_ptr ) ); - } - } - } -} - -template < int D > -ConnectionCreator::PoolWrapper_< D >::PoolWrapper_() - : masked_layer_( 0 ) - , positions_( 0 ) -{ -} - -template < int D > -ConnectionCreator::PoolWrapper_< D >::~PoolWrapper_< D >() -{ - if ( masked_layer_ ) - { - delete masked_layer_; - } -} - -template < int D > -void -ConnectionCreator::PoolWrapper_< D >::define( MaskedLayer< D >* ml ) -{ - assert( masked_layer_ == 0 ); - assert( positions_ == 0 ); - assert( ml != 0 ); - masked_layer_ = ml; -} - -template < int D > -void -ConnectionCreator::PoolWrapper_< D >::define( std::vector< std::pair< Position< D >, size_t > >* pos ) -{ - assert( masked_layer_ == 0 ); - assert( positions_ == 0 ); - assert( pos != 0 ); - positions_ = pos; -} - -template < int D > -typename Ntree< D, size_t >::masked_iterator -ConnectionCreator::PoolWrapper_< D >::masked_begin( const Position< D >& pos ) const -{ - return masked_layer_->begin( pos ); -} - -template < int D > -typename Ntree< D, size_t >::masked_iterator -ConnectionCreator::PoolWrapper_< D >::masked_end() const -{ - return masked_layer_->end(); -} - -template < int D > -typename std::vector< std::pair< Position< D >, size_t > >::iterator -ConnectionCreator::PoolWrapper_< D >::begin() const -{ - return positions_->begin(); -} - -template < int D > -typename std::vector< std::pair< Position< D >, size_t > >::iterator -ConnectionCreator::PoolWrapper_< D >::end() const -{ - return positions_->end(); -} - - -template < int D > -void -ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - // Connect using pairwise Bernoulli drawing source nodes (target driven) - // For each local target node: - // 1. Apply Mask to source layer - // 2. For each source node: Compute probability, draw random number, make - // connection conditionally - - // retrieve global positions, either for masked or unmasked pool - PoolWrapper_< D > pool; - if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor - { - pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, source_nc ) ); - } - else - { - pool.define( source.get_global_positions_vector( source_nc ) ); - } - - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); - -#pragma omp parallel - { - const int thread_id = kernel().vp_manager.get_thread_id(); - try - { - NodeCollection::const_iterator target_begin = target_nc->begin(); - NodeCollection::const_iterator target_end = target_nc->end(); - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); - - if ( not tgt->is_proxy() ) - { - const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); - - if ( mask_.get() ) - { - connect_to_target_( - pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, source ); - } - else - { - connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, source ); - } - } - } // for target_begin - } - catch ( std::exception& err ) - { - // We must create a new exception here, err's lifetime ends at - // the end of the catch block. - exceptions_raised_.at( thread_id ) = - std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); - } - } // omp parallel - // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) - { - if ( exceptions_raised_.at( thr ).get() ) - { - throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); - } - } -} - - -template < int D > -void -ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - // Connecting using pairwise Bernoulli drawing target nodes (source driven) - // It is actually implemented as pairwise Bernoulli on source nodes, - // but with displacements computed in the target layer. The Mask has been - // reversed so that it can be applied to the source instead of the target. - // For each local target node: - // 1. Apply (Converse)Mask to source layer - // 2. For each source node: Compute probability, draw random number, make - // connection conditionally - - PoolWrapper_< D > pool; - if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor - { - // By supplying the target layer to the MaskedLayer constructor, the - // mask is mirrored so it may be applied to the source layer instead - pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, target, source_nc ) ); - } - else - { - pool.define( source.get_global_positions_vector( source_nc ) ); - } - - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); - - // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); - if ( not first_in_tgt->has_proxies() ) - { - throw IllegalConnection( "Spatial Connect with pairwise_bernoulli to devices is not possible." ); - } - -#pragma omp parallel - { - const int thread_id = kernel().vp_manager.get_thread_id(); - try - { - NodeCollection::const_iterator target_begin = target_nc->thread_local_begin(); - NodeCollection::const_iterator target_end = target_nc->end(); - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); - - assert( not tgt->is_proxy() ); - - const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); - - if ( mask_.get() ) - { - // We do the same as in the target driven case, except that we calculate displacements in the target layer. - // We therefore send in target as last parameter. - connect_to_target_( pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, target ); - } - else - { - // We do the same as in the target driven case, except that we calculate displacements in the target layer. - // We therefore send in target as last parameter. - connect_to_target_( pool.begin(), pool.end(), tgt, target_pos, thread_id, target ); - } - - } // end for - } - catch ( std::exception& err ) - { - // We must create a new exception here, err's lifetime ends at the end of the catch block. - exceptions_raised_.at( thread_id ) = - std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); - } - } // omp parallel - // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) - { - if ( exceptions_raised_.at( thr ).get() ) - { - throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); - } - } -} - - -template < int D > -void -ConnectionCreator::pairwise_poisson_( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - // Connect using pairwise Poisson drawing source nodes (target driven) - // For each local target node: - // 1. Apply Mask to source layer - // 2. For each source node: Compute probability, draw random number, make - // connection conditionally - - // retrieve global positions, either for masked or unmasked pool - PoolWrapper_< D > pool; - if ( mask_.get() ) // MaskedLayer will be freed by PoolWrapper d'tor - { - pool.define( new MaskedLayer< D >( source, mask_, allow_oversized_, source_nc ) ); - } - else - { - pool.define( source.get_global_positions_vector( source_nc ) ); - } - - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); - -#pragma omp parallel - { - const int thread_id = kernel().vp_manager.get_thread_id(); - try - { - NodeCollection::const_iterator target_begin = target_nc->begin(); - NodeCollection::const_iterator target_end = target_nc->end(); - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); - - if ( not tgt->is_proxy() ) - { - const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); - - if ( mask_.get() ) - { - connect_to_target_poisson_( - pool.masked_begin( target_pos ), pool.masked_end(), tgt, target_pos, thread_id, source ); - } - else - { - connect_to_target_poisson_( pool.begin(), pool.end(), tgt, target_pos, thread_id, source ); - } - } - } // for target_begin - } - catch ( std::exception& err ) - { - // We must create a new exception here, err's lifetime ends at - // the end of the catch block. - exceptions_raised_.at( thread_id ) = - std::shared_ptr< WrappedThreadException >( new WrappedThreadException( err ) ); - } - } // omp parallel - // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) - { - if ( exceptions_raised_.at( thr ).get() ) - { - throw WrappedThreadException( *( exceptions_raised_.at( thr ) ) ); - } - } -} - - -template < int D > -void -ConnectionCreator::fixed_indegree_( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - // fixed_indegree connections (fixed fan in) - // - // For each local target node: - // 1. Apply Mask to source layer - // 2. Compute connection probability for each source position - // 3. Draw source nodes and make connections - - // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); - if ( not first_in_tgt->has_proxies() ) - { - throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." ); - } - - NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); - NodeCollection::const_iterator target_end = target_nc->end(); - - // protect against connecting to devices without proxies - // we need to do this before creating the first connection to leave - // the network untouched if any target does not have proxies - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id ); - - assert( not tgt->is_proxy() ); - } - - if ( mask_.get() ) - { - MaskedLayer< D > masked_source( source, mask_, allow_oversized_, source_nc ); - const auto masked_source_end = masked_source.end(); - - std::vector< std::pair< Position< D >, size_t > > positions; - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); - - size_t target_thread = tgt->get_thread(); - RngPtr rng = get_vp_specific_rng( target_thread ); - Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); - - // We create a source pos vector here that can be updated with the - // source position. This is done to avoid creating and destroying - // unnecessarily many vectors. - std::vector< double > source_pos_vector( D ); - const std::vector< double > target_pos_vector = target_pos.get_vector(); - - unsigned long target_number_connections = - std::round( number_of_connections_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); - - // Get (position,node ID) pairs for sources inside mask - positions.resize( std::distance( masked_source.begin( target_pos ), masked_source_end ) ); - std::copy( masked_source.begin( target_pos ), masked_source_end, positions.begin() ); - - // We will select `number_of_connections_` sources within the mask. - // If there is no kernel, we can just draw uniform random numbers, - // but with a kernel we have to set up a probability distribution - // function using a discrete_distribution. - if ( kernel_ ) - { - - std::vector< double > probabilities; - probabilities.reserve( positions.size() ); - - // Collect probabilities for the sources - for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator iter = positions.begin(); - iter != positions.end(); - ++iter ) - { - iter->first.get_vector( source_pos_vector ); - probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); - } - - if ( positions.empty() - or ( not allow_autapses_ and ( positions.size() == 1 ) and positions[ 0 ].second == target_id ) - or ( not allow_multapses_ and ( positions.size() < target_number_connections ) ) ) - { - std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id ); - throw KernelException( msg.c_str() ); - } - - // A discrete_distribution draws random integers with a non-uniform - // distribution. - discrete_distribution lottery; - const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); - lottery.param( param ); - - // If multapses are not allowed, we must keep track of which - // sources have been selected already. - std::vector< bool > is_selected( positions.size() ); - - // Draw `target_number_connections` sources - while ( target_number_connections > 0 ) - { - size_t random_id = lottery( rng ); - if ( not allow_multapses_ and is_selected[ random_id ] ) - { - continue; - } - - size_t source_id = positions[ random_id ].second; - if ( not allow_autapses_ and source_id == target_id ) - { - continue; - } - positions[ random_id ].first.get_vector( source_pos_vector ); - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( - source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); - } - - is_selected[ random_id ] = true; - --target_number_connections; - } - } - else - { - - // no kernel - - if ( positions.empty() - or ( not allow_autapses_ and ( positions.size() == 1 ) and positions[ 0 ].second == target_id ) - or ( not allow_multapses_ and ( positions.size() < target_number_connections ) ) ) - { - std::string msg = String::compose( "Global target ID %1: Not enough sources found inside mask", target_id ); - throw KernelException( msg.c_str() ); - } - - // If multapses are not allowed, we must keep track of which - // sources have been selected already. - std::vector< bool > is_selected( positions.size() ); - - // Draw `target_number_connections` sources - while ( target_number_connections > 0 ) - { - const size_t random_id = rng->ulrand( positions.size() ); - if ( not allow_multapses_ and is_selected[ random_id ] ) - { - continue; - } - positions[ random_id ].first.get_vector( source_pos_vector ); - const size_t source_id = positions[ random_id ].second; - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( - source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); - } - - is_selected[ random_id ] = true; - --target_number_connections; - } - } - } - } - else - { - // no mask - - // Get (position,node ID) pairs for all nodes in source layer - std::vector< std::pair< Position< D >, size_t > >* positions = source.get_global_positions_vector( source_nc ); - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); - size_t target_thread = tgt->get_thread(); - RngPtr rng = get_vp_specific_rng( target_thread ); - Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); - - unsigned long target_number_connections = std::round( number_of_connections_->value( rng, tgt ) ); - - std::vector< double > source_pos_vector( D ); - const std::vector< double > target_pos_vector = target_pos.get_vector(); - - if ( ( positions->size() == 0 ) - or ( not allow_autapses_ and ( positions->size() == 1 ) and ( ( *positions )[ 0 ].second == target_id ) ) - or ( not allow_multapses_ and ( positions->size() < target_number_connections ) ) ) - { - std::string msg = String::compose( "Global target ID %1: Not enough sources found", target_id ); - throw KernelException( msg.c_str() ); - } - - // We will select `target_number_connections` sources within the mask. - // If there is no kernel, we can just draw uniform random numbers, - // but with a kernel we have to set up a probability distribution - // function using a discrete_distribution. - if ( kernel_ ) - { - - std::vector< double > probabilities; - probabilities.reserve( positions->size() ); - - // Collect probabilities for the sources - for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator iter = positions->begin(); - iter != positions->end(); - ++iter ) - { - iter->first.get_vector( source_pos_vector ); - probabilities.push_back( kernel_->value( rng, source_pos_vector, target_pos_vector, source, tgt ) ); - } - - // A discrete_distribution draws random integers with a non-uniform - // distribution. - discrete_distribution lottery; - const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); - lottery.param( param ); - - // If multapses are not allowed, we must keep track of which - // sources have been selected already. - std::vector< bool > is_selected( positions->size() ); - - // Draw `target_number_connections` sources - while ( target_number_connections > 0 ) - { - const size_t random_id = lottery( rng ); - if ( not allow_multapses_ and is_selected[ random_id ] ) - { - continue; - } - - const size_t source_id = ( *positions )[ random_id ].second; - if ( not allow_autapses_ and source_id == target_id ) - { - continue; - } - - ( *positions )[ random_id ].first.get_vector( source_pos_vector ); - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( - source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); - } - - is_selected[ random_id ] = true; - --target_number_connections; - } - } - else - { - - // no kernel - - // If multapses are not allowed, we must keep track of which - // sources have been selected already. - std::vector< bool > is_selected( positions->size() ); - - // Draw `target_number_connections` sources - while ( target_number_connections > 0 ) - { - const size_t random_id = rng->ulrand( positions->size() ); - if ( not allow_multapses_ and is_selected[ random_id ] ) - { - continue; - } - - const size_t source_id = ( *positions )[ random_id ].second; - if ( not allow_autapses_ and source_id == target_id ) - { - continue; - } - - ( *positions )[ random_id ].first.get_vector( source_pos_vector ); - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( - source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); - } - - is_selected[ random_id ] = true; - --target_number_connections; - } - } - } - } -} - - -template < int D > -void -ConnectionCreator::fixed_outdegree_( Layer< D >& source, - NodeCollectionPTR source_nc, - Layer< D >& target, - NodeCollectionPTR target_nc ) -{ - // protect against connecting to devices without proxies - // we need to do this before creating the first connection to leave - // the network untouched if any target does not have proxies - - // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); - if ( not first_in_tgt->has_proxies() ) - { - throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." ); - } - - NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); - NodeCollection::const_iterator target_end = target_nc->end(); - - for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) - { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id ); - - assert( not tgt->is_proxy() ); - } - - // Fixed_outdegree connections (fixed fan out) - // - // For each (global) source: (All connections made on all mpi procs) - // 1. Apply mask to global targets - // 2. If using kernel: Compute connection probability for each global target - // 3. Draw connections to make using global rng - - MaskedLayer< D > masked_target( target, mask_, allow_oversized_, target_nc ); - const auto masked_target_end = masked_target.end(); - - // We create a target positions vector here that can be updated with the - // position and node ID pairs. This is done to avoid creating and destroying - // unnecessarily many vectors. - std::vector< std::pair< Position< D >, size_t > > target_pos_node_id_pairs; - std::vector< std::pair< Position< D >, size_t > > source_pos_node_id_pairs = - *source.get_global_positions_vector( source_nc ); - - for ( const auto& source_pos_node_id_pair : source_pos_node_id_pairs ) - { - const Position< D > source_pos = source_pos_node_id_pair.first; - const size_t source_id = source_pos_node_id_pair.second; - const auto src = kernel().node_manager.get_node_or_proxy( source_id ); - const std::vector< double > source_pos_vector = source_pos.get_vector(); - - // We create a target pos vector here that can be updated with the - // target position. This is done to avoid creating and destroying - // unnecessarily many vectors. - std::vector< double > target_pos_vector( D ); - std::vector< double > probabilities; - - // Find potential targets and probabilities - RngPtr grng = get_rank_synced_rng(); - target_pos_node_id_pairs.resize( std::distance( masked_target.begin( source_pos ), masked_target_end ) ); - std::copy( masked_target.begin( source_pos ), masked_target_end, target_pos_node_id_pairs.begin() ); - - probabilities.reserve( target_pos_node_id_pairs.size() ); - if ( kernel_ ) - { - for ( const auto& target_pos_node_id_pair : target_pos_node_id_pairs ) - { - // TODO: Why is probability calculated in source layer, but weight and delay in target layer? - target_pos_node_id_pair.first.get_vector( target_pos_vector ); - const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pair.second ); - probabilities.push_back( kernel_->value( grng, source_pos_vector, target_pos_vector, source, tgt ) ); - } - } - else - { - probabilities.resize( target_pos_node_id_pairs.size(), 1.0 ); - } - - unsigned long number_of_connections = std::round( number_of_connections_->value( grng, src ) ); - - if ( target_pos_node_id_pairs.empty() - or ( not allow_multapses_ and target_pos_node_id_pairs.size() < number_of_connections ) ) - { - std::string msg = String::compose( "Global source ID %1: Not enough targets found", source_id ); - throw KernelException( msg.c_str() ); - } - - // Draw targets. A discrete_distribution draws random integers with a - // non-uniform distribution. - discrete_distribution lottery; - const discrete_distribution::param_type param( probabilities.begin(), probabilities.end() ); - lottery.param( param ); - - // If multapses are not allowed, we must keep track of which - // targets have been selected already. - std::vector< bool > is_selected( target_pos_node_id_pairs.size() ); - - // Draw `number_of_connections` targets - while ( number_of_connections > 0 ) - { - const size_t random_id = lottery( get_rank_synced_rng() ); - if ( not allow_multapses_ and is_selected[ random_id ] ) - { - continue; - } - const size_t target_id = target_pos_node_id_pairs[ random_id ].second; - if ( not allow_autapses_ and source_id == target_id ) - { - continue; - } - - is_selected[ random_id ] = true; - - target_pos_node_id_pairs[ random_id ].first.get_vector( target_pos_vector ); - - std::vector< double > rng_weight_vec; - std::vector< double > rng_delay_vec; - for ( size_t indx = 0; indx < weight_.size(); ++indx ) - { - const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); - rng_weight_vec.push_back( weight_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); - rng_delay_vec.push_back( delay_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); - } - - // Each VP has now decided to create this connection and drawn any random parameter values - // required for it. Each VP thus counts the connection as created, but only the VP hosting the - // target neuron actually creates the connection. - --number_of_connections; - if ( not kernel().node_manager.is_local_node_id( target_id ) ) - { - continue; - } - - Node* target_ptr = kernel().node_manager.get_node_or_proxy( target_id ); - const size_t target_thread = target_ptr->get_thread(); - - for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) - { - kernel().connection_manager.connect( source_id, - target_ptr, - target_thread, - synapse_model_[ indx ], - param_dicts_[ indx ][ target_thread ], - rng_delay_vec[ indx ], - rng_weight_vec[ indx ] ); - } - } - } -} - -} // namespace nest - -#endif diff --git a/nestkernel/connection_label.h b/nestkernel/connection_label.h index a20b55f850..860f6994e2 100644 --- a/nestkernel/connection_label.h +++ b/nestkernel/connection_label.h @@ -25,7 +25,7 @@ #include "dictdatum.h" #include "dictutils.h" -#include "nest.h" +#include "exceptions.h" #include "nest_names.h" namespace nest diff --git a/nestkernel/connection_manager.cpp b/nestkernel/connection_manager.cpp index 0924fb73e2..c6e4cf954e 100644 --- a/nestkernel/connection_manager.cpp +++ b/nestkernel/connection_manager.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -44,7 +45,6 @@ #include "conn_builder_conngen.h" #include "conn_builder_factory.h" #include "connection_label.h" -#include "connection_manager_impl.h" #include "connector_base.h" #include "connector_model.h" #include "delay_checker.h" @@ -53,13 +53,14 @@ #include "eprop_archiving_node_recurrent.h" #include "exceptions.h" #include "kernel_manager.h" -#include "mpi_manager_impl.h" +#include "logging_manager.h" +#include "model_manager.h" #include "nest_names.h" #include "node.h" +#include "node_manager.h" +#include "simulation_manager.h" #include "sonata_connector.h" -#include "stopwatch_impl.h" -#include "target_table_devices_impl.h" -#include "vp_manager_impl.h" +#include "sp_manager.h" // Includes from sli: #include "dictutils.h" @@ -67,8 +68,10 @@ #include "token.h" #include "tokenutils.h" +namespace nest +{ -nest::ConnectionManager::ConnectionManager() +ConnectionManager::ConnectionManager() : connruledict_( new Dictionary() ) , connbuilder_factories_() , thirdconnruledict_( new Dictionary() ) @@ -87,7 +90,7 @@ nest::ConnectionManager::ConnectionManager() { } -nest::ConnectionManager::~ConnectionManager() +ConnectionManager::~ConnectionManager() { // Memory leak on purpose! // The ConnectionManager is deleted, when the network is deleted, and @@ -98,7 +101,7 @@ nest::ConnectionManager::~ConnectionManager() } void -nest::ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) +ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) { if ( not adjust_number_of_threads_or_rng_only ) { @@ -158,7 +161,7 @@ nest::ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_ } void -nest::ConnectionManager::finalize( const bool adjust_number_of_threads_or_rng_only ) +ConnectionManager::finalize( const bool adjust_number_of_threads_or_rng_only ) { source_table_.finalize(); target_table_.finalize(); @@ -187,7 +190,7 @@ nest::ConnectionManager::finalize( const bool adjust_number_of_threads_or_rng_on } void -nest::ConnectionManager::set_status( const DictionaryDatum& d ) +ConnectionManager::set_status( const DictionaryDatum& d ) { for ( size_t i = 0; i < delay_checkers_.size(); ++i ) { @@ -211,14 +214,14 @@ nest::ConnectionManager::set_status( const DictionaryDatum& d ) } } -nest::DelayChecker& -nest::ConnectionManager::get_delay_checker() +DelayChecker& +ConnectionManager::get_delay_checker() { return delay_checkers_[ kernel().vp_manager.get_thread_id() ]; } void -nest::ConnectionManager::get_status( DictionaryDatum& dict ) +ConnectionManager::get_status( DictionaryDatum& dict ) { update_delay_extrema_(); def< double >( dict, names::min_delay, Time( Time::step( min_delay_ ) ).get_ms() ); @@ -240,7 +243,7 @@ nest::ConnectionManager::get_status( DictionaryDatum& dict ) } DictionaryDatum -nest::ConnectionManager::get_synapse_status( const size_t source_node_id, +ConnectionManager::get_synapse_status( const size_t source_node_id, const size_t target_node_id, const size_t tid, const synindex syn_id, @@ -285,7 +288,7 @@ nest::ConnectionManager::get_synapse_status( const size_t source_node_id, } void -nest::ConnectionManager::set_synapse_status( const size_t source_node_id, +ConnectionManager::set_synapse_status( const size_t source_node_id, const size_t target_node_id, const size_t tid, const synindex syn_id, @@ -335,7 +338,7 @@ nest::ConnectionManager::set_synapse_status( const size_t source_node_id, } void -nest::ConnectionManager::delete_connections_() +ConnectionManager::delete_connections_() { for ( size_t tid = 0; tid < connections_.size(); ++tid ) { @@ -346,8 +349,8 @@ nest::ConnectionManager::delete_connections_() } } -const nest::Time -nest::ConnectionManager::get_min_delay_time_() const +const Time +ConnectionManager::get_min_delay_time_() const { Time min_delay = Time::pos_inf(); @@ -360,8 +363,8 @@ nest::ConnectionManager::get_min_delay_time_() const return min_delay; } -const nest::Time -nest::ConnectionManager::get_max_delay_time_() const +const Time +ConnectionManager::get_max_delay_time_() const { Time max_delay = Time::get_resolution(); @@ -375,7 +378,7 @@ nest::ConnectionManager::get_max_delay_time_() const } bool -nest::ConnectionManager::get_user_set_delay_extrema() const +ConnectionManager::get_user_set_delay_extrema() const { bool user_set_delay_extrema = false; @@ -388,8 +391,8 @@ nest::ConnectionManager::get_user_set_delay_extrema() const return user_set_delay_extrema; } -nest::BipartiteConnBuilder* -nest::ConnectionManager::get_conn_builder( const std::string& name, +BipartiteConnBuilder* +ConnectionManager::get_conn_builder( const std::string& name, NodeCollectionPTR sources, NodeCollectionPTR targets, ThirdOutBuilder* third_out, @@ -408,8 +411,8 @@ nest::ConnectionManager::get_conn_builder( const std::string& name, return cb; } -nest::ThirdOutBuilder* -nest::ConnectionManager::get_third_conn_builder( const std::string& name, +ThirdOutBuilder* +ConnectionManager::get_third_conn_builder( const std::string& name, NodeCollectionPTR sources, NodeCollectionPTR targets, ThirdInBuilder* third_in, @@ -429,7 +432,7 @@ nest::ConnectionManager::get_third_conn_builder( const std::string& name, } void -nest::ConnectionManager::calibrate( const TimeConverter& tc ) +ConnectionManager::calibrate( const TimeConverter& tc ) { for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) { @@ -438,7 +441,7 @@ nest::ConnectionManager::calibrate( const TimeConverter& tc ) } void -nest::ConnectionManager::connect( NodeCollectionPTR sources, +ConnectionManager::connect( NodeCollectionPTR sources, NodeCollectionPTR targets, const DictionaryDatum& conn_spec, const std::vector< DictionaryDatum >& syn_specs ) @@ -487,7 +490,7 @@ nest::ConnectionManager::connect( NodeCollectionPTR sources, void -nest::ConnectionManager::connect( TokenArray sources, TokenArray targets, const DictionaryDatum& syn_spec ) +ConnectionManager::connect( TokenArray sources, TokenArray targets, const DictionaryDatum& syn_spec ) { // Get synapse id size_t syn_id = 0; @@ -513,7 +516,7 @@ nest::ConnectionManager::connect( TokenArray sources, TokenArray targets, const void -nest::ConnectionManager::update_delay_extrema_() +ConnectionManager::update_delay_extrema_() { if ( kernel().simulation_manager.has_been_simulated() ) { @@ -560,7 +563,7 @@ nest::ConnectionManager::update_delay_extrema_() // node ID node thread syn_id dict delay weight void -nest::ConnectionManager::connect( const size_t snode_id, +ConnectionManager::connect( const size_t snode_id, Node* target, size_t target_thread, const synindex syn_id, @@ -592,7 +595,7 @@ nest::ConnectionManager::connect( const size_t snode_id, // node_id node_id dict syn_id bool -nest::ConnectionManager::connect( const size_t snode_id, +ConnectionManager::connect( const size_t snode_id, const size_t tnode_id, const DictionaryDatum& params, const synindex syn_id ) @@ -633,7 +636,7 @@ nest::ConnectionManager::connect( const size_t snode_id, } void -nest::ConnectionManager::connect_arrays( long* sources, +ConnectionManager::connect_arrays( long* sources, long* targets, double* weights, double* delays, @@ -808,7 +811,7 @@ nest::ConnectionManager::connect_arrays( long* sources, } void -nest::ConnectionManager::connect_sonata( const DictionaryDatum& graph_specs, const long hyberslab_size ) +ConnectionManager::connect_sonata( const DictionaryDatum& graph_specs, const long hyberslab_size ) { #ifdef HAVE_HDF5 SonataConnector sonata_connector( graph_specs, hyberslab_size ); @@ -823,7 +826,7 @@ nest::ConnectionManager::connect_sonata( const DictionaryDatum& graph_specs, con } void -nest::ConnectionManager::connect_tripartite( NodeCollectionPTR sources, +ConnectionManager::connect_tripartite( NodeCollectionPTR sources, NodeCollectionPTR targets, NodeCollectionPTR third, const DictionaryDatum& conn_spec, @@ -884,7 +887,7 @@ nest::ConnectionManager::connect_tripartite( NodeCollectionPTR sources, void -nest::ConnectionManager::connect_( Node& source, +ConnectionManager::connect_( Node& source, Node& target, const size_t s_node_id, const size_t tid, @@ -940,7 +943,7 @@ nest::ConnectionManager::connect_( Node& source, } void -nest::ConnectionManager::connect_to_device_( Node& source, +ConnectionManager::connect_to_device_( Node& source, Node& target, const size_t s_node_id, const size_t tid, @@ -956,7 +959,7 @@ nest::ConnectionManager::connect_to_device_( Node& source, } void -nest::ConnectionManager::connect_from_device_( Node& source, +ConnectionManager::connect_from_device_( Node& source, Node& target, const size_t tid, const synindex syn_id, @@ -971,7 +974,7 @@ nest::ConnectionManager::connect_from_device_( Node& source, } void -nest::ConnectionManager::increase_connection_count( const size_t tid, const synindex syn_id ) +ConnectionManager::increase_connection_count( const size_t tid, const synindex syn_id ) { if ( num_connections_[ tid ].size() <= syn_id ) { @@ -989,7 +992,7 @@ nest::ConnectionManager::increase_connection_count( const size_t tid, const syni } size_t -nest::ConnectionManager::find_connection( const size_t tid, +ConnectionManager::find_connection( const size_t tid, const synindex syn_id, const size_t snode_id, const size_t tnode_id ) @@ -1014,10 +1017,7 @@ nest::ConnectionManager::find_connection( const size_t tid, } void -nest::ConnectionManager::disconnect( const size_t tid, - const synindex syn_id, - const size_t snode_id, - const size_t tnode_id ) +ConnectionManager::disconnect( const size_t tid, const synindex syn_id, const size_t snode_id, const size_t tnode_id ) { assert( syn_id != invalid_synindex ); @@ -1036,7 +1036,7 @@ nest::ConnectionManager::disconnect( const size_t tid, } void -nest::ConnectionManager::trigger_update_weight( const long vt_id, +ConnectionManager::trigger_update_weight( const long vt_id, const std::vector< spikecounter >& dopa_spikes, const double t_trig ) { @@ -1054,7 +1054,7 @@ nest::ConnectionManager::trigger_update_weight( const long vt_id, } size_t -nest::ConnectionManager::get_num_target_data( const size_t tid ) const +ConnectionManager::get_num_target_data( const size_t tid ) const { size_t num_connections = 0; for ( synindex syn_id = 0; syn_id < connections_[ tid ].size(); ++syn_id ) @@ -1068,7 +1068,7 @@ nest::ConnectionManager::get_num_target_data( const size_t tid ) const } size_t -nest::ConnectionManager::get_num_connections() const +ConnectionManager::get_num_connections() const { size_t num_connections = 0; for ( size_t t = 0; t < num_connections_.size(); ++t ) @@ -1083,7 +1083,7 @@ nest::ConnectionManager::get_num_connections() const } size_t -nest::ConnectionManager::get_num_connections( const synindex syn_id ) const +ConnectionManager::get_num_connections( const synindex syn_id ) const { size_t num_connections = 0; for ( size_t t = 0; t < num_connections_.size(); ++t ) @@ -1098,7 +1098,7 @@ nest::ConnectionManager::get_num_connections( const synindex syn_id ) const } ArrayDatum -nest::ConnectionManager::get_connections( const DictionaryDatum& params ) +ConnectionManager::get_connections( const DictionaryDatum& params ) { std::deque< ConnectionID > connectome; const Token& source_t = params->lookup( names::source ); @@ -1180,8 +1180,8 @@ nest::ConnectionManager::get_connections( const DictionaryDatum& params ) // Helper method which removes ConnectionIDs from input deque and // appends them to output deque. -static inline std::deque< nest::ConnectionID >& -extend_connectome( std::deque< nest::ConnectionID >& out, std::deque< nest::ConnectionID >& in ) +static inline std::deque< ConnectionID >& +extend_connectome( std::deque< ConnectionID >& out, std::deque< ConnectionID >& in ) { while ( not in.empty() ) { @@ -1193,7 +1193,7 @@ extend_connectome( std::deque< nest::ConnectionID >& out, std::deque< nest::Conn } void -nest::ConnectionManager::split_to_neuron_device_vectors_( const size_t tid, +ConnectionManager::split_to_neuron_device_vectors_( const size_t tid, NodeCollectionPTR nodecollection, std::vector< size_t >& neuron_node_ids, std::vector< size_t >& device_node_ids ) const @@ -1218,7 +1218,7 @@ nest::ConnectionManager::split_to_neuron_device_vectors_( const size_t tid, } void -nest::ConnectionManager::get_connections_( const size_t tid, +ConnectionManager::get_connections_( const size_t tid, std::deque< ConnectionID >& conns_in_thread, NodeCollectionPTR, NodeCollectionPTR, @@ -1397,7 +1397,7 @@ nest::ConnectionManager::get_connections( std::deque< ConnectionID >& connectome } void -nest::ConnectionManager::get_source_node_ids_( const size_t tid, +ConnectionManager::get_source_node_ids_( const size_t tid, const synindex syn_id, const size_t tnode_id, std::vector< size_t >& sources ) @@ -1411,7 +1411,7 @@ nest::ConnectionManager::get_source_node_ids_( const size_t tid, } void -nest::ConnectionManager::get_sources( const std::vector< size_t >& targets, +ConnectionManager::get_sources( const std::vector< size_t >& targets, const size_t syn_id, std::vector< std::vector< size_t > >& sources ) { @@ -1431,7 +1431,7 @@ nest::ConnectionManager::get_sources( const std::vector< size_t >& targets, } void -nest::ConnectionManager::get_targets( const std::vector< size_t >& sources, +ConnectionManager::get_targets( const std::vector< size_t >& sources, const size_t syn_id, const std::string& post_synaptic_element, std::vector< std::vector< size_t > >& targets ) @@ -1473,7 +1473,7 @@ nest::ConnectionManager::sort_connections( const size_t tid ) } void -nest::ConnectionManager::compute_target_data_buffer_size() +ConnectionManager::compute_target_data_buffer_size() { // Determine number of target data on this rank. Since each thread // has its own data structures, we need to count connections on every @@ -1499,7 +1499,7 @@ nest::ConnectionManager::compute_target_data_buffer_size() } void -nest::ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const size_t tid ) +ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const size_t tid ) { #pragma omp single { @@ -1531,7 +1531,7 @@ nest::ConnectionManager::compute_compressed_secondary_recv_buffer_positions( con { const size_t source_node_id = source_table_.get_node_id( tid, syn_id, lcid ); const size_t sg_s_id = source_table_.pack_source_node_id_and_syn_id( source_node_id, syn_id ); - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_node_id ); + const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( source_node_id ); positions[ lcid ] = buffer_pos_of_source_node_id_syn_id_[ sg_s_id ] + kernel().mpi_manager.get_recv_displacement_secondary_events_in_int( source_rank ); @@ -1541,8 +1541,8 @@ nest::ConnectionManager::compute_compressed_secondary_recv_buffer_positions( con } } -nest::ConnectionManager::ConnectionType -nest::ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid ) +ConnectionManager::ConnectionType +ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid ) { // The caller has to check and guarantee that the target is not a // proxy and that it is on thread tid. @@ -1634,7 +1634,7 @@ nest::ConnectionManager::connection_required( Node*& source, Node*& target, size } void -nest::ConnectionManager::set_stdp_eps( const double stdp_eps ) +ConnectionManager::set_stdp_eps( const double stdp_eps ) { if ( not( stdp_eps < Time::get_resolution().get_ms() ) ) { @@ -1663,7 +1663,7 @@ nest::ConnectionManager::set_stdp_eps( const double stdp_eps ) // recv_buffer can not be a const reference as iterators used in // secondary events must not be const bool -nest::ConnectionManager::deliver_secondary_events( const size_t tid, +ConnectionManager::deliver_secondary_events( const size_t tid, const bool called_from_wfr_update, std::vector< unsigned int >& recv_buffer ) { @@ -1711,13 +1711,13 @@ nest::ConnectionManager::deliver_secondary_events( const size_t tid, } void -nest::ConnectionManager::compress_secondary_send_buffer_pos( const size_t tid ) +ConnectionManager::compress_secondary_send_buffer_pos( const size_t tid ) { target_table_.compress_secondary_send_buffer_pos( tid ); } void -nest::ConnectionManager::remove_disabled_connections( const size_t tid ) +ConnectionManager::remove_disabled_connections( const size_t tid ) { std::vector< ConnectorBase* >& connectors = connections_[ tid ]; @@ -1741,7 +1741,7 @@ nest::ConnectionManager::remove_disabled_connections( const size_t tid ) } void -nest::ConnectionManager::resize_connections() +ConnectionManager::resize_connections() { kernel().vp_manager.assert_thread_parallel(); @@ -1752,19 +1752,19 @@ nest::ConnectionManager::resize_connections() } void -nest::ConnectionManager::sync_has_primary_connections() +ConnectionManager::sync_has_primary_connections() { has_primary_connections_ = kernel().mpi_manager.any_true( has_primary_connections_ ); } void -nest::ConnectionManager::check_secondary_connections_exist() +ConnectionManager::check_secondary_connections_exist() { secondary_connections_exist_ = kernel().mpi_manager.any_true( secondary_connections_exist_ ); } void -nest::ConnectionManager::set_connections_have_changed() +ConnectionManager::set_connections_have_changed() { assert( kernel().vp_manager.get_thread_id() == 0 ); @@ -1781,14 +1781,14 @@ nest::ConnectionManager::set_connections_have_changed() } void -nest::ConnectionManager::unset_connections_have_changed() +ConnectionManager::unset_connections_have_changed() { connections_have_changed_ = false; } void -nest::ConnectionManager::collect_compressed_spike_data( const size_t tid ) +ConnectionManager::collect_compressed_spike_data( const size_t tid ) { if ( use_compressed_spikes_ ) { @@ -1799,9 +1799,9 @@ nest::ConnectionManager::collect_compressed_spike_data( const size_t tid ) } // of omp single; implicit barrier source_table_.collect_compressible_sources( tid ); - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { source_table_.fill_compressed_spike_data( compressed_spike_data_ ); @@ -1810,7 +1810,7 @@ nest::ConnectionManager::collect_compressed_spike_data( const size_t tid ) } bool -nest::ConnectionManager::fill_target_buffer( const size_t tid, +ConnectionManager::fill_target_buffer( const size_t tid, const size_t rank_start, const size_t rank_end, std::vector< TargetData >& send_buffer_target_data, @@ -1836,7 +1836,7 @@ nest::ConnectionManager::fill_target_buffer( const size_t tid, while ( source_2_idx != csd_maps.at( syn_id ).end() ) { const auto source_gid = source_2_idx->first; - const auto source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_gid ); + const auto source_rank = kernel().vp_manager.get_process_id_of_node_id( source_gid ); if ( not( rank_start <= source_rank and source_rank < rank_end ) ) { // We are not responsible for this source. @@ -1925,7 +1925,7 @@ nest::ConnectionManager::fill_target_buffer( const size_t tid, } void -nest::ConnectionManager::initialize_iteration_state() +ConnectionManager::initialize_iteration_state() { const size_t num_threads = kernel().vp_manager.get_num_threads(); iteration_state_.clear(); @@ -1940,3 +1940,23 @@ nest::ConnectionManager::initialize_iteration_state() iteration_state_.push_back( std::pair< size_t, std::map< size_t, CSDMapEntry >::const_iterator >( 0, begin ) ); } } + +void +ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, Event& e ) +{ + target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); +} + +void +ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, SecondaryEvent& e ) +{ + target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); +} + +void +ConnectionManager::send_from_device( const size_t tid, const size_t ldid, Event& e ) +{ + target_table_devices_.send_from_device( tid, ldid, e, kernel().model_manager.get_connection_models( tid ) ); +} + +} diff --git a/nestkernel/connection_manager.h b/nestkernel/connection_manager.h index ca0c07d11d..d9c032f56c 100644 --- a/nestkernel/connection_manager.h +++ b/nestkernel/connection_manager.h @@ -31,11 +31,10 @@ #include "stopwatch.h" // Includes from nestkernel: -#include "conn_builder.h" +#include "conn_builder_factory.h" #include "connection_id.h" #include "connector_base.h" #include "nest_time.h" -#include "nest_timeconverter.h" #include "nest_types.h" #include "node_collection.h" #include "per_thread_bool_indicator.h" @@ -54,13 +53,17 @@ namespace nest { class GenericBipartiteConnBuilderFactory; class GenericThirdConnBuilderFactory; -class spikecounter; class Node; class Event; class SecondaryEvent; class DelayChecker; class GrowthCurve; class SpikeData; +class BipartiteConnBuilder; +class ThirdOutBuilder; +class ThirdInBuilder; +class Time; +class TimeConverter; class ConnectionManager : public ManagerInterface { @@ -75,18 +78,25 @@ class ConnectionManager : public ManagerInterface }; ConnectionManager(); + ~ConnectionManager() override; void initialize( const bool ) override; + void finalize( const bool ) override; + void set_status( const DictionaryDatum& ) override; + void get_status( DictionaryDatum& ) override; bool valid_connection_rule( std::string ); void compute_target_data_buffer_size(); + void compute_compressed_secondary_recv_buffer_positions( const size_t tid ); + void collect_compressed_spike_data( const size_t tid ); + void clear_compressed_spike_data_map(); /** @@ -278,6 +288,7 @@ class ConnectionManager : public ManagerInterface size_t get_target_node_id( const size_t tid, const synindex syn_id, const size_t lcid ) const; bool get_device_connected( size_t tid, size_t lcid ) const; + /** * Triggered by volume transmitter in update. * @@ -311,6 +322,7 @@ class ConnectionManager : public ManagerInterface * Send event e to all device targets of source source_node_id */ void send_to_devices( const size_t tid, const size_t source_node_id, Event& e ); + void send_to_devices( const size_t tid, const size_t source_node_id, SecondaryEvent& e ); /** @@ -480,12 +492,14 @@ class ConnectionManager : public ManagerInterface NodeCollectionPTR target, synindex syn_id, long synapse_label ) const; + void get_connections_to_targets_( const size_t tid, std::deque< ConnectionID >& connectome, NodeCollectionPTR source, NodeCollectionPTR target, synindex syn_id, long synapse_label ) const; + void get_connections_from_sources_( const size_t tid, std::deque< ConnectionID >& connectome, NodeCollectionPTR source, @@ -946,6 +960,30 @@ ConnectionManager::clear_compressed_spike_data_map() source_table_.clear_compressed_spike_data_map(); } +template < typename ConnBuilder > +void +ConnectionManager::register_conn_builder( const std::string& name ) +{ + assert( not connruledict_->known( name ) ); + GenericBipartiteConnBuilderFactory* cb = new BipartiteConnBuilderFactory< ConnBuilder >(); + assert( cb ); + const int id = connbuilder_factories_.size(); + connbuilder_factories_.push_back( cb ); + connruledict_->insert( name, id ); +} + +template < typename ThirdConnBuilder > +void +ConnectionManager::register_third_conn_builder( const std::string& name ) +{ + assert( not thirdconnruledict_->known( name ) ); + GenericThirdConnBuilderFactory* cb = new ThirdConnBuilderFactory< ThirdConnBuilder >(); + assert( cb ); + const int id = thirdconnbuilder_factories_.size(); + thirdconnbuilder_factories_.push_back( cb ); + thirdconnruledict_->insert( name, id ); +} + } // namespace nest #endif /* CONNECTION_MANAGER_H */ diff --git a/nestkernel/connection_manager_impl.h b/nestkernel/connection_manager_impl.h deleted file mode 100644 index 3c8d74034c..0000000000 --- a/nestkernel/connection_manager_impl.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * connection_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef CONNECTION_MANAGER_IMPL_H -#define CONNECTION_MANAGER_IMPL_H - -#include "connection_manager.h" - -// C++ includes: -#include - -// Includes from nestkernel: -#include "conn_builder.h" -#include "conn_builder_factory.h" -#include "connector_base.h" -#include "kernel_manager.h" -#include "target_table_devices_impl.h" - -namespace nest -{ - -template < typename ConnBuilder > -void -ConnectionManager::register_conn_builder( const std::string& name ) -{ - assert( not connruledict_->known( name ) ); - GenericBipartiteConnBuilderFactory* cb = new BipartiteConnBuilderFactory< ConnBuilder >(); - assert( cb ); - const int id = connbuilder_factories_.size(); - connbuilder_factories_.push_back( cb ); - connruledict_->insert( name, id ); -} - -template < typename ThirdConnBuilder > -void -ConnectionManager::register_third_conn_builder( const std::string& name ) -{ - assert( not thirdconnruledict_->known( name ) ); - GenericThirdConnBuilderFactory* cb = new ThirdConnBuilderFactory< ThirdConnBuilder >(); - assert( cb ); - const int id = thirdconnbuilder_factories_.size(); - thirdconnbuilder_factories_.push_back( cb ); - thirdconnruledict_->insert( name, id ); -} - -inline void -ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, Event& e ) -{ - target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); -} - -inline void -ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, SecondaryEvent& e ) -{ - target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); -} - -inline void -ConnectionManager::send_from_device( const size_t tid, const size_t ldid, Event& e ) -{ - target_table_devices_.send_from_device( tid, ldid, e, kernel().model_manager.get_connection_models( tid ) ); -} - -} // namespace nest - -#endif /* CONNECTION_MANAGER_IMPL_H */ diff --git a/nestkernel/connector_base.cpp b/nestkernel/connector_base.cpp new file mode 100644 index 0000000000..951e3c0334 --- /dev/null +++ b/nestkernel/connector_base.cpp @@ -0,0 +1,50 @@ +/* + * connector_base.cpp + * + * This file is part of NEST. + * + * Copyright (C) 2004 The NEST Initiative + * + * NEST is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * NEST is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with NEST. If not, see . + * + */ + +#include "connector_base.h" + +#include "connection_manager.h" + +namespace nest +{ + +void +ConnectorBase::prepare_weight_recorder_event( WeightRecorderEvent& wr_e, + const size_t tid, + const synindex syn_id, + const unsigned int lcid, + const Event& e, + const CommonSynapseProperties& cp ) +{ + wr_e.set_port( e.get_port() ); + wr_e.set_rport( e.get_rport() ); + wr_e.set_stamp( e.get_stamp() ); + // Sender is not available for SecondaryEvents, and not needed, so we do not set it to avoid undefined behavior. + wr_e.set_sender_node_id( kernel().connection_manager.get_source_node_id( tid, syn_id, lcid ) ); + wr_e.set_weight( e.get_weight() ); + wr_e.set_delay_steps( e.get_delay_steps() ); + wr_e.set_receiver( *static_cast< Node* >( cp.get_weight_recorder() ) ); + // Set the node_id of the postsynaptic node as receiver node ID + wr_e.set_receiver_node_id( e.get_receiver_node_id() ); +} + +} // namespace nest diff --git a/nestkernel/connector_base.h b/nestkernel/connector_base.h index 7cdd91b1e8..55bf7398f3 100644 --- a/nestkernel/connector_base.h +++ b/nestkernel/connector_base.h @@ -28,12 +28,18 @@ // C++ includes: #include +#include #include +// Includes from models: +#include "weight_recorder.h" + +#ifdef HAVE_SIONLIB +#include +#endif + // Includes from libnestutil: -#include "compose.hpp" #include "sort.h" -#include "vector_util.h" // Includes from nestkernel: #include "common_synapse_properties.h" @@ -47,12 +53,15 @@ #include "spikecounter.h" // Includes from sli: -#include "arraydatum.h" #include "dictutils.h" namespace nest { +class ConnectorModel; +template < typename ConnectionT > +class GenericConnectorModel; + /** * Base class to allow storing Connectors for different synapse types * in vectors. We define the interface here to avoid casting. @@ -63,7 +72,6 @@ namespace nest */ class ConnectorBase { - public: // Destructor needs to be declared virtual to avoid undefined // behavior, avoid possible memory leak and needs to be defined to @@ -211,6 +219,14 @@ class ConnectorBase * Remove disabled connections from the connector. */ virtual void remove_disabled_connections( const size_t first_disabled_index ) = 0; + +protected: + void prepare_weight_recorder_event( WeightRecorderEvent& wr_e, + const size_t tid, + const synindex syn_id, + const unsigned int lcid, + const Event& e, + const CommonSynapseProperties& cp ); }; /** @@ -219,7 +235,6 @@ class ConnectorBase template < typename ConnectionT > class Connector : public ConnectorBase { -private: BlockVector< ConnectionT > C_; const synindex syn_id_; @@ -425,7 +440,6 @@ class Connector : public ConnectorBase return 1 + lcid_offset; // event was delivered to at least one target } - // Implemented in connector_base_impl.h void send_weight_event( const size_t tid, const unsigned int lcid, Event& e, const CommonSynapseProperties& cp ) override; @@ -514,6 +528,24 @@ class Connector : public ConnectorBase } }; +template < typename ConnectionT > +void +Connector< ConnectionT >::send_weight_event( const size_t tid, + const unsigned int lcid, + Event& e, + const CommonSynapseProperties& cp ) +{ + // If the pointer to the receiver node in the event is invalid, + // the event was not sent, and a WeightRecorderEvent is therefore not created. + if ( cp.get_weight_recorder() and e.receiver_is_valid() ) + { + // Create new event to record the weight and copy relevant content. + WeightRecorderEvent wr_e; + prepare_weight_recorder_event( wr_e, tid, syn_id_, lcid, e, cp ); + wr_e(); + } +} + } // of namespace nest #endif diff --git a/nestkernel/connector_base_impl.h b/nestkernel/connector_base_impl.h index a322b2e336..e69de29bb2 100644 --- a/nestkernel/connector_base_impl.h +++ b/nestkernel/connector_base_impl.h @@ -1,67 +0,0 @@ -/* - * connector_base_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#include "connector_base.h" - -// Includes from nestkernel: -#include "kernel_manager.h" - -// Includes from models: -#include "weight_recorder.h" - -#ifndef CONNECTOR_BASE_IMPL_H -#define CONNECTOR_BASE_IMPL_H - -namespace nest -{ - -template < typename ConnectionT > -void -Connector< ConnectionT >::send_weight_event( const size_t tid, - const unsigned int lcid, - Event& e, - const CommonSynapseProperties& cp ) -{ - // If the pointer to the receiver node in the event is invalid, - // the event was not sent, and a WeightRecorderEvent is therefore not created. - if ( cp.get_weight_recorder() and e.receiver_is_valid() ) - { - // Create new event to record the weight and copy relevant content. - WeightRecorderEvent wr_e; - wr_e.set_port( e.get_port() ); - wr_e.set_rport( e.get_rport() ); - wr_e.set_stamp( e.get_stamp() ); - // Sender is not available for SecondaryEvents, and not needed, so we do not - // set it to avoid undefined behavior. - wr_e.set_sender_node_id( kernel().connection_manager.get_source_node_id( tid, syn_id_, lcid ) ); - wr_e.set_weight( e.get_weight() ); - wr_e.set_delay_steps( e.get_delay_steps() ); - wr_e.set_receiver( *static_cast< Node* >( cp.get_weight_recorder() ) ); - // Set the node_id of the postsynaptic node as receiver node ID - wr_e.set_receiver_node_id( e.get_receiver_node_id() ); - wr_e(); - } -} - -} // of namespace nest - -#endif diff --git a/nestkernel/connector_model.cpp b/nestkernel/connector_model.cpp index 6e3bb9f078..a200aafd8c 100644 --- a/nestkernel/connector_model.cpp +++ b/nestkernel/connector_model.cpp @@ -21,6 +21,7 @@ */ #include "connector_model.h" +#include "model_manager.h" namespace nest { @@ -39,4 +40,10 @@ ConnectorModel::ConnectorModel( const ConnectorModel& cm, const std::string name { } +size_t +ConnectorModel::get_synapse_model_id( const std::string& name ) +{ + return kernel().model_manager.get_synapse_model_id( name ); +} + } // namespace nest diff --git a/nestkernel/connector_model.h b/nestkernel/connector_model.h index 0a7f83ce8e..5cd64a9d16 100644 --- a/nestkernel/connector_model.h +++ b/nestkernel/connector_model.h @@ -28,17 +28,16 @@ #include // Includes from libnestutil: +#include "enum_bitfield.h" #include "numerics.h" // Includes from nestkernel: -#include "enum_bitfield.h" #include "event.h" #include "nest_time.h" #include "nest_types.h" #include "secondary_event.h" +#include "simulation_manager.h" -// Includes from sli: -#include "dictutils.h" namespace nest { @@ -143,6 +142,9 @@ class ConnectorModel } protected: + // helper function to avoid circular dependency + static size_t get_synapse_model_id( const std::string& name ); + std::string name_; //!< name of the ConnectorModel bool default_delay_needs_check_; //!< indicates whether the default delay must be checked ConnectionModelProperties properties_; //!< connection properties diff --git a/nestkernel/connector_model_impl.h b/nestkernel/connector_model_impl.h index fc831cb916..4527a8dfac 100644 --- a/nestkernel/connector_model_impl.h +++ b/nestkernel/connector_model_impl.h @@ -20,28 +20,9 @@ * */ -#ifndef CONNECTOR_MODEL_IMPL_H -#define CONNECTOR_MODEL_IMPL_H - -#include "connector_model.h" - -// Generated includes: -#include "config.h" - -// Includes from libnestutil: -#include "compose.hpp" -#include "enum_bitfield.h" - -// Includes from nestkernel: #include "connector_base.h" +#include "connector_model.h" #include "delay_checker.h" -#include "kernel_manager.h" -#include "nest_time.h" -#include "nest_timeconverter.h" -#include "secondary_event_impl.h" - -// Includes from sli: -#include "dictutils.h" namespace nest { @@ -63,7 +44,7 @@ namespace nest // } template < typename ConnectionT > -ConnectorModel* +inline ConnectorModel* GenericConnectorModel< ConnectionT >::clone( std::string name, synindex syn_id ) const { ConnectorModel* new_cm = new GenericConnectorModel( *this, name ); // calls copy construtor @@ -79,7 +60,7 @@ GenericConnectorModel< ConnectionT >::clone( std::string name, synindex syn_id ) } template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::calibrate( const TimeConverter& tc ) { // calibrate the delay of the default properties here @@ -93,7 +74,7 @@ GenericConnectorModel< ConnectionT >::calibrate( const TimeConverter& tc ) } template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::get_status( DictionaryDatum& d ) const { // first get properties common to all synapses @@ -105,7 +86,7 @@ GenericConnectorModel< ConnectionT >::get_status( DictionaryDatum& d ) const ( *d )[ names::receptor_type ] = receptor_type_; ( *d )[ names::synapse_model ] = LiteralDatum( name_ ); - ( *d )[ names::synapse_modelid ] = kernel().model_manager.get_synapse_model_id( name_ ); + ( *d )[ names::synapse_modelid ] = get_synapse_model_id( name_ ); ( *d )[ names::requires_symmetric ] = has_property( ConnectionModelProperties::REQUIRES_SYMMETRIC ); ( *d )[ names::has_delay ] = has_property( ConnectionModelProperties::HAS_DELAY ); } @@ -139,7 +120,7 @@ GenericConnectorModel< ConnectionT >::set_status( const DictionaryDatum& d ) } template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::check_synapse_params( const DictionaryDatum& syn_spec ) const { // This is called just once per Connect() call, so we need not worry much about performance. @@ -161,7 +142,7 @@ GenericConnectorModel< ConnectionT >::check_synapse_params( const DictionaryDatu template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::used_default_delay() { // if not used before, check now. Solves bug #138, MH 08-01-08 @@ -204,21 +185,21 @@ GenericConnectorModel< ConnectionT >::used_default_delay() } template < typename ConnectionT > -size_t +inline size_t GenericConnectorModel< ConnectionT >::get_syn_id() const { return default_connection_.get_syn_id(); } template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::set_syn_id( synindex syn_id ) { default_connection_.set_syn_id( syn_id ); } template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::add_connection( Node& src, Node& tgt, std::vector< ConnectorBase* >& thread_local_connectors, @@ -294,7 +275,7 @@ GenericConnectorModel< ConnectionT >::add_connection( Node& src, template < typename ConnectionT > -void +inline void GenericConnectorModel< ConnectionT >::add_connection_( Node& src, Node& tgt, std::vector< ConnectorBase* >& thread_local_connectors, @@ -322,5 +303,3 @@ GenericConnectorModel< ConnectionT >::add_connection_( Node& src, } } // namespace nest - -#endif diff --git a/nestkernel/delay_checker.cpp b/nestkernel/delay_checker.cpp index 8a1776c98e..7148ebd110 100644 --- a/nestkernel/delay_checker.cpp +++ b/nestkernel/delay_checker.cpp @@ -30,6 +30,13 @@ #include "kernel_manager.h" #include "nest_timeconverter.h" +#include "compose.hpp" +#include "connection_manager.h" +#include "dictutils.h" +#include "logging.h" +#include "logging_manager.h" +#include "nest_names.h" + nest::DelayChecker::DelayChecker() : min_delay_( Time::pos_inf() ) , max_delay_( Time::neg_inf() ) diff --git a/nestkernel/eprop_archiving_node.h b/nestkernel/eprop_archiving_node.h index 04cfc2d3ba..673f14814c 100644 --- a/nestkernel/eprop_archiving_node.h +++ b/nestkernel/eprop_archiving_node.h @@ -152,6 +152,161 @@ class EpropArchivingNode : public Node const long delay_out_rec_ = 1; }; +template < typename HistEntryT > +EpropArchivingNode< HistEntryT >::EpropArchivingNode() + : Node() + , eprop_indegree_( 0 ) +{ +} + +template < typename HistEntryT > +EpropArchivingNode< HistEntryT >::EpropArchivingNode( const EpropArchivingNode& n ) + : Node( n ) + , eprop_indegree_( n.eprop_indegree_ ) +{ +} + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::register_eprop_connection() +{ + ++eprop_indegree_; + + const long t_first_entry = model_dependent_history_shift_(); + + const auto it_hist = get_update_history( t_first_entry ); + + if ( it_hist == update_history_.end() or it_hist->t_ != t_first_entry ) + { + update_history_.insert( it_hist, HistEntryEpropUpdate( t_first_entry, 1 ) ); + } + else + { + ++it_hist->access_counter_; + } +} + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::write_update_to_history( const long t_previous_update, + const long t_current_update, + const long eprop_isi_trace_cutoff ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + const long shift = model_dependent_history_shift_(); + + const auto it_hist_curr = get_update_history( t_current_update + shift ); + + if ( it_hist_curr != update_history_.end() and it_hist_curr->t_ == t_current_update + shift ) + { + ++it_hist_curr->access_counter_; + } + else + { + update_history_.insert( it_hist_curr, HistEntryEpropUpdate( t_current_update + shift, 1 ) ); + + if ( not history_shift_required_() ) + { + erase_used_eprop_history( eprop_isi_trace_cutoff ); + } + } + + const auto it_hist_prev = get_update_history( t_previous_update + shift ); + + if ( it_hist_prev != update_history_.end() and it_hist_prev->t_ == t_previous_update + shift ) + { + // If an entry exists for the previous update time, decrement its access counter + --it_hist_prev->access_counter_; + if ( it_hist_prev->access_counter_ == 0 ) + { + update_history_.erase( it_hist_prev ); + } + } +} + +template < typename HistEntryT > +std::vector< HistEntryEpropUpdate >::iterator +EpropArchivingNode< HistEntryT >::get_update_history( const long time_step ) +{ + return std::lower_bound( update_history_.begin(), update_history_.end(), time_step ); +} + +template < typename HistEntryT > +typename std::vector< HistEntryT >::iterator +EpropArchivingNode< HistEntryT >::get_eprop_history( const long time_step ) +{ + return std::lower_bound( eprop_history_.begin(), eprop_history_.end(), time_step ); +} + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::erase_used_eprop_history() +{ + if ( eprop_history_.empty() // nothing to remove + or update_history_.empty() // no time markers to check + ) + { + return; + } + + const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); + + auto it_update_hist = update_history_.begin(); + + for ( long t = update_history_.begin()->t_; + t <= ( update_history_.end() - 1 )->t_ and it_update_hist != update_history_.end(); + t += update_interval ) + { + if ( it_update_hist->t_ == t ) + { + ++it_update_hist; + } + else + { + // erase no longer needed entries for update intervals with no spikes sent to the target neuron + eprop_history_.erase( get_eprop_history( t ), get_eprop_history( t + update_interval ) ); + } + } + // erase no longer needed entries before the earliest current update + eprop_history_.erase( get_eprop_history( 0 ), get_eprop_history( update_history_.begin()->t_ ) ); +} + +template < typename HistEntryT > +void +EpropArchivingNode< HistEntryT >::erase_used_eprop_history( const long eprop_isi_trace_cutoff ) +{ + if ( eprop_history_.empty() // nothing to remove + or update_history_.size() < 2 // no time markers to check + ) + { + return; + } + + const long t_prev = ( update_history_.end() - 2 )->t_; + const long t_curr = ( update_history_.end() - 1 )->t_; + + if ( t_prev + eprop_isi_trace_cutoff < t_curr ) + { + // erase no longer needed entries to be ignored by trace cutoff + eprop_history_.erase( get_eprop_history( t_prev + eprop_isi_trace_cutoff ), get_eprop_history( t_curr ) ); + } + + // erase no longer needed entries before the earliest current update + eprop_history_.erase( + get_eprop_history( std::numeric_limits< long >::min() ), get_eprop_history( update_history_.begin()->t_ - 1 ) ); +} + +template < typename HistEntryT > +inline double +EpropArchivingNode< HistEntryT >::get_eprop_history_duration() const +{ + return Time::get_resolution().get_ms() * eprop_history_.size(); +} + } // namespace nest #endif // EPROP_ARCHIVING_NODE_H diff --git a/nestkernel/eprop_archiving_node_impl.h b/nestkernel/eprop_archiving_node_impl.h deleted file mode 100644 index 7c4c60a52e..0000000000 --- a/nestkernel/eprop_archiving_node_impl.h +++ /dev/null @@ -1,194 +0,0 @@ -/* - * eprop_archiving_node_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef EPROP_ARCHIVING_NODE_IMPL_H -#define EPROP_ARCHIVING_NODE_IMPL_H - -#include "eprop_archiving_node.h" - -// Includes from nestkernel: -#include "kernel_manager.h" - -// Includes from sli: -#include "dictutils.h" - -namespace nest -{ - -template < typename HistEntryT > -EpropArchivingNode< HistEntryT >::EpropArchivingNode() - : Node() - , eprop_indegree_( 0 ) -{ -} - -template < typename HistEntryT > -EpropArchivingNode< HistEntryT >::EpropArchivingNode( const EpropArchivingNode& n ) - : Node( n ) - , eprop_indegree_( n.eprop_indegree_ ) -{ -} - -template < typename HistEntryT > -void -EpropArchivingNode< HistEntryT >::register_eprop_connection() -{ - ++eprop_indegree_; - - const long t_first_entry = model_dependent_history_shift_(); - - const auto it_hist = get_update_history( t_first_entry ); - - if ( it_hist == update_history_.end() or it_hist->t_ != t_first_entry ) - { - update_history_.insert( it_hist, HistEntryEpropUpdate( t_first_entry, 1 ) ); - } - else - { - ++it_hist->access_counter_; - } -} - -template < typename HistEntryT > -void -EpropArchivingNode< HistEntryT >::write_update_to_history( const long t_previous_update, - const long t_current_update, - const long eprop_isi_trace_cutoff ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - const long shift = model_dependent_history_shift_(); - - const auto it_hist_curr = get_update_history( t_current_update + shift ); - - if ( it_hist_curr != update_history_.end() and it_hist_curr->t_ == t_current_update + shift ) - { - ++it_hist_curr->access_counter_; - } - else - { - update_history_.insert( it_hist_curr, HistEntryEpropUpdate( t_current_update + shift, 1 ) ); - - if ( not history_shift_required_() ) - { - erase_used_eprop_history( eprop_isi_trace_cutoff ); - } - } - - const auto it_hist_prev = get_update_history( t_previous_update + shift ); - - if ( it_hist_prev != update_history_.end() and it_hist_prev->t_ == t_previous_update + shift ) - { - // If an entry exists for the previous update time, decrement its access counter - --it_hist_prev->access_counter_; - if ( it_hist_prev->access_counter_ == 0 ) - { - update_history_.erase( it_hist_prev ); - } - } -} - -template < typename HistEntryT > -std::vector< HistEntryEpropUpdate >::iterator -EpropArchivingNode< HistEntryT >::get_update_history( const long time_step ) -{ - return std::lower_bound( update_history_.begin(), update_history_.end(), time_step ); -} - -template < typename HistEntryT > -typename std::vector< HistEntryT >::iterator -EpropArchivingNode< HistEntryT >::get_eprop_history( const long time_step ) -{ - return std::lower_bound( eprop_history_.begin(), eprop_history_.end(), time_step ); -} - -template < typename HistEntryT > -void -EpropArchivingNode< HistEntryT >::erase_used_eprop_history() -{ - if ( eprop_history_.empty() // nothing to remove - or update_history_.empty() // no time markers to check - ) - { - return; - } - - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - - auto it_update_hist = update_history_.begin(); - - for ( long t = update_history_.begin()->t_; - t <= ( update_history_.end() - 1 )->t_ and it_update_hist != update_history_.end(); - t += update_interval ) - { - if ( it_update_hist->t_ == t ) - { - ++it_update_hist; - } - else - { - // erase no longer needed entries for update intervals with no spikes sent to the target neuron - eprop_history_.erase( get_eprop_history( t ), get_eprop_history( t + update_interval ) ); - } - } - // erase no longer needed entries before the earliest current update - eprop_history_.erase( get_eprop_history( 0 ), get_eprop_history( update_history_.begin()->t_ ) ); -} - -template < typename HistEntryT > -void -EpropArchivingNode< HistEntryT >::erase_used_eprop_history( const long eprop_isi_trace_cutoff ) -{ - if ( eprop_history_.empty() // nothing to remove - or update_history_.size() < 2 // no time markers to check - ) - { - return; - } - - const long t_prev = ( update_history_.end() - 2 )->t_; - const long t_curr = ( update_history_.end() - 1 )->t_; - - if ( t_prev + eprop_isi_trace_cutoff < t_curr ) - { - // erase no longer needed entries to be ignored by trace cutoff - eprop_history_.erase( get_eprop_history( t_prev + eprop_isi_trace_cutoff ), get_eprop_history( t_curr ) ); - } - - // erase no longer needed entries before the earliest current update - eprop_history_.erase( - get_eprop_history( std::numeric_limits< long >::min() ), get_eprop_history( update_history_.begin()->t_ - 1 ) ); -} - -template < typename HistEntryT > -inline double -EpropArchivingNode< HistEntryT >::get_eprop_history_duration() const -{ - return Time::get_resolution().get_ms() * eprop_history_.size(); -} - -} // namespace nest - -#endif // EPROP_ARCHIVING_NODE_IMPL_H diff --git a/nestkernel/eprop_archiving_node_recurrent.h b/nestkernel/eprop_archiving_node_recurrent.h index a8872e3297..3442796b96 100644 --- a/nestkernel/eprop_archiving_node_recurrent.h +++ b/nestkernel/eprop_archiving_node_recurrent.h @@ -304,6 +304,272 @@ EpropArchivingNodeRecurrent< hist_shift_required >::history_shift_required_() co return hist_shift_required; } +template < bool hist_shift_required > +std::map< std::string, typename EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_function > + EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_funcs_ = { + { "piecewise_linear", + &EpropArchivingNodeRecurrent< hist_shift_required >::compute_piecewise_linear_surrogate_gradient }, + { "exponential", &EpropArchivingNodeRecurrent< hist_shift_required >::compute_exponential_surrogate_gradient }, + { "fast_sigmoid_derivative", + &EpropArchivingNodeRecurrent< hist_shift_required >::compute_fast_sigmoid_derivative_surrogate_gradient }, + { "arctan", &EpropArchivingNodeRecurrent< hist_shift_required >::compute_arctan_surrogate_gradient } + }; + +template < bool hist_shift_required > +EpropArchivingNodeRecurrent< hist_shift_required >::EpropArchivingNodeRecurrent() + : EpropArchivingNode() + , firing_rate_reg_( 0.0 ) + , f_av_( 0.0 ) + , n_spikes_( 0 ) +{ +} + +template < bool hist_shift_required > +EpropArchivingNodeRecurrent< hist_shift_required >::EpropArchivingNodeRecurrent( const EpropArchivingNodeRecurrent& n ) + : EpropArchivingNode( n ) + , firing_rate_reg_( n.firing_rate_reg_ ) + , f_av_( n.f_av_ ) + , n_spikes_( n.n_spikes_ ) +{ +} + +template < bool hist_shift_required > +typename EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_function +EpropArchivingNodeRecurrent< hist_shift_required >::find_surrogate_gradient( + const std::string& surrogate_gradient_function_name ) +{ + const auto found_entry_it = surrogate_gradient_funcs_.find( surrogate_gradient_function_name ); + + if ( found_entry_it != surrogate_gradient_funcs_.end() ) + { + return found_entry_it->second; + } + + std::string error_message = "Surrogate gradient / pseudo-derivate function surrogate_gradient_function from ["; + for ( const auto& surrogate_gradient_func : surrogate_gradient_funcs_ ) + { + error_message += " \"" + surrogate_gradient_func.first + "\","; + } + error_message.pop_back(); + error_message += " ] required."; + + throw BadProperty( error_message ); +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::compute_piecewise_linear_surrogate_gradient( const double r, + const double v_m, + const double v_th, + const double beta, + const double gamma ) +{ + if ( r > 0 ) + { + return 0.0; + } + + return gamma * std::max( 0.0, 1.0 - beta * std::abs( v_m - v_th ) ); +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::compute_exponential_surrogate_gradient( const double r, + const double v_m, + const double v_th, + const double beta, + const double gamma ) +{ + if ( r > 0 ) + { + return 0.0; + } + + return gamma * std::exp( -beta * std::abs( v_m - v_th ) ); +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::compute_fast_sigmoid_derivative_surrogate_gradient( const double r, + const double v_m, + const double v_th, + const double beta, + const double gamma ) +{ + if ( r > 0 ) + { + return 0.0; + } + + return gamma * std::pow( 1.0 + beta * std::abs( v_m - v_th ), -2 ); +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::compute_arctan_surrogate_gradient( const double r, + const double v_m, + const double v_th, + const double beta, + const double gamma ) +{ + if ( r > 0 ) + { + return 0.0; + } + + return gamma / M_PI * ( 1.0 / ( 1.0 + std::pow( beta * M_PI * ( v_m - v_th ), 2 ) ) ); +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::append_new_eprop_history_entry( const long time_step ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + eprop_history_.emplace_back( time_step, 0.0, 0.0, 0.0 ); +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::write_surrogate_gradient_to_history( const long time_step, + const double surrogate_gradient ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + auto it_hist = get_eprop_history( time_step ); + it_hist->surrogate_gradient_ = surrogate_gradient; +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::write_learning_signal_to_history( const long time_step, + const double learning_signal ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + long shift = delay_rec_out_ + delay_out_rec_; + + if constexpr ( hist_shift_required ) + { + shift += delay_out_norm_; + } + + + auto it_hist = get_eprop_history( time_step - shift ); + const auto it_hist_end = get_eprop_history( time_step - shift + delay_out_rec_ ); + + for ( ; it_hist != it_hist_end; ++it_hist ) + { + it_hist->learning_signal_ += learning_signal; + } +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_history( const long t_current_update, + const double f_target, + const double c_reg ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + const double update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); + const double dt = Time::get_resolution().get_ms(); + const long shift = Time::get_resolution().get_steps(); + + const double f_av = n_spikes_ / update_interval; + const double f_target_ = f_target * dt; // convert from spikes/ms to spikes/step + const double firing_rate_reg = c_reg * ( f_av - f_target_ ) / update_interval; + + firing_rate_reg_history_.emplace_back( t_current_update + shift, firing_rate_reg ); +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_history( const long time_step, + const double z, + const double f_target, + const double kappa_reg, + const double c_reg ) +{ + if ( eprop_indegree_ == 0 ) + { + return; + } + + const double dt = Time::get_resolution().get_ms(); + + const double f_target_ = f_target * dt; // convert from spikes/ms to spikes/step + + f_av_ = kappa_reg * f_av_ + ( 1.0 - kappa_reg ) * z / dt; + + firing_rate_reg_ = c_reg * ( f_av_ - f_target_ ); + + auto it_hist = get_eprop_history( time_step ); + it_hist->firing_rate_reg_ = firing_rate_reg_; +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::get_firing_rate_reg_history( const long time_step ) +{ + const auto it_hist = std::lower_bound( firing_rate_reg_history_.begin(), firing_rate_reg_history_.end(), time_step ); + assert( it_hist != firing_rate_reg_history_.end() ); + + return it_hist->firing_rate_reg_; +} + +template < bool hist_shift_required > +double +EpropArchivingNodeRecurrent< hist_shift_required >::get_learning_signal_from_history( const long time_step ) +{ + long shift = delay_rec_out_ + delay_out_rec_; + + if ( hist_shift_required ) + { + shift += delay_out_norm_; + } + + const auto it = get_eprop_history( time_step - shift ); + if ( it == eprop_history_.end() ) + { + return 0; + } + + return it->learning_signal_; +} + +template < bool hist_shift_required > +void +EpropArchivingNodeRecurrent< hist_shift_required >::erase_used_firing_rate_reg_history() +{ + auto it_update_hist = update_history_.begin(); + auto it_reg_hist = firing_rate_reg_history_.begin(); + + while ( it_update_hist != update_history_.end() and it_reg_hist != firing_rate_reg_history_.end() ) + { + if ( it_update_hist->access_counter_ == 0 ) + { + it_reg_hist = firing_rate_reg_history_.erase( it_reg_hist ); + } + else + { + ++it_reg_hist; + } + ++it_update_hist; + } +} } diff --git a/nestkernel/eprop_archiving_node_recurrent_impl.h b/nestkernel/eprop_archiving_node_recurrent_impl.h deleted file mode 100644 index 58dde7a87e..0000000000 --- a/nestkernel/eprop_archiving_node_recurrent_impl.h +++ /dev/null @@ -1,303 +0,0 @@ -/* - * eprop_archiving_node_recurrent_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -// nestkernel -#include "eprop_archiving_node.h" -#include "eprop_archiving_node_impl.h" -#include "eprop_archiving_node_recurrent.h" -#include "kernel_manager.h" - -// sli -#include "dictutils.h" - -namespace nest -{ - -template < bool hist_shift_required > -std::map< std::string, typename EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_function > - EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_funcs_ = { - { "piecewise_linear", - &EpropArchivingNodeRecurrent< hist_shift_required >::compute_piecewise_linear_surrogate_gradient }, - { "exponential", &EpropArchivingNodeRecurrent< hist_shift_required >::compute_exponential_surrogate_gradient }, - { "fast_sigmoid_derivative", - &EpropArchivingNodeRecurrent< hist_shift_required >::compute_fast_sigmoid_derivative_surrogate_gradient }, - { "arctan", &EpropArchivingNodeRecurrent< hist_shift_required >::compute_arctan_surrogate_gradient } - }; - -template < bool hist_shift_required > -EpropArchivingNodeRecurrent< hist_shift_required >::EpropArchivingNodeRecurrent() - : EpropArchivingNode() - , firing_rate_reg_( 0.0 ) - , f_av_( 0.0 ) - , n_spikes_( 0 ) -{ -} - -template < bool hist_shift_required > -EpropArchivingNodeRecurrent< hist_shift_required >::EpropArchivingNodeRecurrent( const EpropArchivingNodeRecurrent& n ) - : EpropArchivingNode( n ) - , firing_rate_reg_( n.firing_rate_reg_ ) - , f_av_( n.f_av_ ) - , n_spikes_( n.n_spikes_ ) -{ -} - -template < bool hist_shift_required > -typename EpropArchivingNodeRecurrent< hist_shift_required >::surrogate_gradient_function -EpropArchivingNodeRecurrent< hist_shift_required >::find_surrogate_gradient( - const std::string& surrogate_gradient_function_name ) -{ - const auto found_entry_it = surrogate_gradient_funcs_.find( surrogate_gradient_function_name ); - - if ( found_entry_it != surrogate_gradient_funcs_.end() ) - { - return found_entry_it->second; - } - - std::string error_message = "Surrogate gradient / pseudo-derivate function surrogate_gradient_function from ["; - for ( const auto& surrogate_gradient_func : surrogate_gradient_funcs_ ) - { - error_message += " \"" + surrogate_gradient_func.first + "\","; - } - error_message.pop_back(); - error_message += " ] required."; - - throw BadProperty( error_message ); -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::compute_piecewise_linear_surrogate_gradient( const double r, - const double v_m, - const double v_th, - const double beta, - const double gamma ) -{ - if ( r > 0 ) - { - return 0.0; - } - - return gamma * std::max( 0.0, 1.0 - beta * std::abs( v_m - v_th ) ); -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::compute_exponential_surrogate_gradient( const double r, - const double v_m, - const double v_th, - const double beta, - const double gamma ) -{ - if ( r > 0 ) - { - return 0.0; - } - - return gamma * std::exp( -beta * std::abs( v_m - v_th ) ); -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::compute_fast_sigmoid_derivative_surrogate_gradient( const double r, - const double v_m, - const double v_th, - const double beta, - const double gamma ) -{ - if ( r > 0 ) - { - return 0.0; - } - - return gamma * std::pow( 1.0 + beta * std::abs( v_m - v_th ), -2 ); -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::compute_arctan_surrogate_gradient( const double r, - const double v_m, - const double v_th, - const double beta, - const double gamma ) -{ - if ( r > 0 ) - { - return 0.0; - } - - return gamma / M_PI * ( 1.0 / ( 1.0 + std::pow( beta * M_PI * ( v_m - v_th ), 2 ) ) ); -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::append_new_eprop_history_entry( const long time_step ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - eprop_history_.emplace_back( time_step, 0.0, 0.0, 0.0 ); -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::write_surrogate_gradient_to_history( const long time_step, - const double surrogate_gradient ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - auto it_hist = get_eprop_history( time_step ); - it_hist->surrogate_gradient_ = surrogate_gradient; -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::write_learning_signal_to_history( const long time_step, - const double learning_signal ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - long shift = delay_rec_out_ + delay_out_rec_; - - if constexpr ( hist_shift_required ) - { - shift += delay_out_norm_; - } - - - auto it_hist = get_eprop_history( time_step - shift ); - const auto it_hist_end = get_eprop_history( time_step - shift + delay_out_rec_ ); - - for ( ; it_hist != it_hist_end; ++it_hist ) - { - it_hist->learning_signal_ += learning_signal; - } -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_history( const long t_current_update, - const double f_target, - const double c_reg ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - const double update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const double dt = Time::get_resolution().get_ms(); - const long shift = Time::get_resolution().get_steps(); - - const double f_av = n_spikes_ / update_interval; - const double f_target_ = f_target * dt; // convert from spikes/ms to spikes/step - const double firing_rate_reg = c_reg * ( f_av - f_target_ ) / update_interval; - - firing_rate_reg_history_.emplace_back( t_current_update + shift, firing_rate_reg ); -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_history( const long time_step, - const double z, - const double f_target, - const double kappa_reg, - const double c_reg ) -{ - if ( eprop_indegree_ == 0 ) - { - return; - } - - const double dt = Time::get_resolution().get_ms(); - - const double f_target_ = f_target * dt; // convert from spikes/ms to spikes/step - - f_av_ = kappa_reg * f_av_ + ( 1.0 - kappa_reg ) * z / dt; - - firing_rate_reg_ = c_reg * ( f_av_ - f_target_ ); - - auto it_hist = get_eprop_history( time_step ); - it_hist->firing_rate_reg_ = firing_rate_reg_; -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::get_firing_rate_reg_history( const long time_step ) -{ - const auto it_hist = std::lower_bound( firing_rate_reg_history_.begin(), firing_rate_reg_history_.end(), time_step ); - assert( it_hist != firing_rate_reg_history_.end() ); - - return it_hist->firing_rate_reg_; -} - -template < bool hist_shift_required > -double -EpropArchivingNodeRecurrent< hist_shift_required >::get_learning_signal_from_history( const long time_step ) -{ - long shift = delay_rec_out_ + delay_out_rec_; - - if ( hist_shift_required ) - { - shift += delay_out_norm_; - } - - const auto it = get_eprop_history( time_step - shift ); - if ( it == eprop_history_.end() ) - { - return 0; - } - - return it->learning_signal_; -} - -template < bool hist_shift_required > -void -EpropArchivingNodeRecurrent< hist_shift_required >::erase_used_firing_rate_reg_history() -{ - auto it_update_hist = update_history_.begin(); - auto it_reg_hist = firing_rate_reg_history_.begin(); - - while ( it_update_hist != update_history_.end() and it_reg_hist != firing_rate_reg_history_.end() ) - { - if ( it_update_hist->access_counter_ == 0 ) - { - it_reg_hist = firing_rate_reg_history_.erase( it_reg_hist ); - } - else - { - ++it_reg_hist; - } - ++it_update_hist; - } -} - - -} // namespace nest diff --git a/nestkernel/event.cpp b/nestkernel/event.cpp index 6d33a5f615..ec67e07238 100644 --- a/nestkernel/event.cpp +++ b/nestkernel/event.cpp @@ -23,9 +23,9 @@ #include "event.h" // Includes from nestkernel: +#include "connection_manager.h" #include "kernel_manager.h" #include "node.h" -#include "secondary_event_impl.h" namespace nest { diff --git a/nestkernel/event_delivery_manager.cpp b/nestkernel/event_delivery_manager.cpp index 8ba790b740..fd87d011c8 100644 --- a/nestkernel/event_delivery_manager.cpp +++ b/nestkernel/event_delivery_manager.cpp @@ -28,15 +28,11 @@ // Includes from nestkernel: #include "connection_manager.h" -#include "connection_manager_impl.h" -#include "event_delivery_manager_impl.h" #include "kernel_manager.h" -#include "mpi_manager_impl.h" +#include "model_manager.h" #include "send_buffer_position.h" -#include "source.h" -#include "stopwatch_impl.h" +#include "simulation_manager.h" #include "vp_manager.h" -#include "vp_manager_impl.h" // Includes from sli: #include "dictutils.h" @@ -423,9 +419,9 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // We introduce an explicit barrier at this point to measure how long each process idles until all other processes // reached this point as well. This barrier is directly followed by another implicit barrier due to global // communication. - kernel().get_mpi_synchronization_stopwatch().start(); + kernel().simulation_manager.get_mpi_synchronization_stopwatch().start(); kernel().mpi_manager.synchronize(); - kernel().get_mpi_synchronization_stopwatch().stop(); + kernel().simulation_manager.get_mpi_synchronization_stopwatch().stop(); #endif // Given that we templatize by plain vs offgrid, this if should not be necessary, but ... @@ -802,9 +798,9 @@ EventDeliveryManager::gather_target_data( const size_t tid ) resize_send_recv_buffers_target_data(); } } // of omp master; (no barrier) - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); kernel().connection_manager.restore_source_table_entry_point( tid ); @@ -819,9 +815,9 @@ EventDeliveryManager::gather_target_data( const size_t tid ) set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } kernel().connection_manager.save_source_table_entry_point( tid ); - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); kernel().connection_manager.clean_source_table( tid ); #pragma omp master @@ -874,9 +870,9 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) resize_send_recv_buffers_target_data(); } } // of omp master; no barrier - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); TargetSendBufferPosition send_buffer_position( assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() ); @@ -891,9 +887,9 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp master { @@ -916,9 +912,9 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) { buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data(); } // of omp master (no barrier) - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); } } // of while diff --git a/nestkernel/event_delivery_manager.h b/nestkernel/event_delivery_manager.h index dbdbe1483b..c50326bf38 100644 --- a/nestkernel/event_delivery_manager.h +++ b/nestkernel/event_delivery_manager.h @@ -25,7 +25,6 @@ // C++ includes: #include -#include #include // Includes from libnestutil: @@ -34,13 +33,16 @@ // Includes from nestkernel: #include "buffer_resize_log.h" +#include "connection_manager.h" #include "event.h" +#include "kernel_manager.h" #include "mpi_manager.h" // OffGridSpike #include "nest_time.h" #include "nest_types.h" #include "node.h" #include "per_thread_bool_indicator.h" #include "secondary_event.h" +#include "simulation_manager.h" #include "spike_data.h" #include "target_table.h" #include "vp_manager.h" @@ -531,6 +533,149 @@ EventDeliveryManager::get_slice_modulo( long d ) return slice_moduli_[ d ]; } +template < class EventT > +inline void +EventDeliveryManager::send_local_( Node& source, EventT& e, const long lag ) +{ + assert( not source.has_proxies() ); + e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); + e.set_sender( source ); + const size_t t = source.get_thread(); + const size_t ldid = source.get_local_device_id(); + kernel().connection_manager.send_from_device( t, ldid, e ); +} + +inline void +EventDeliveryManager::send_local_( Node& source, SecondaryEvent& e, const long ) +{ + assert( not source.has_proxies() ); + e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( 1 ) ); + e.set_sender( source ); + const size_t t = source.get_thread(); + const size_t ldid = source.get_local_device_id(); + kernel().connection_manager.send_from_device( t, ldid, e ); +} + +template < class EventT > +inline void +EventDeliveryManager::send( Node& source, EventT& e, const long lag ) +{ + send_local_( source, e, lag ); +} + +template <> +inline void +EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const long lag ) +{ + const size_t tid = source.get_thread(); + const size_t source_node_id = source.get_node_id(); + e.set_sender_node_id( source_node_id ); + if ( source.has_proxies() ) + { + local_spike_counter_[ tid ] += e.get_multiplicity(); + + e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); + e.set_sender( source ); + + if ( source.is_off_grid() ) + { + send_off_grid_remote( tid, e, lag ); + } + else + { + send_remote( tid, e, lag ); + } + kernel().connection_manager.send_to_devices( tid, source_node_id, e ); + } + else + { + send_local_( source, e, lag ); + } +} + +template <> +inline void +EventDeliveryManager::send< DSSpikeEvent >( Node& source, DSSpikeEvent& e, const long lag ) +{ + e.set_sender_node_id( source.get_node_id() ); + send_local_( source, e, lag ); +} + +inline void +EventDeliveryManager::send_remote( size_t tid, SpikeEvent& e, const long lag ) +{ + // Put the spike in a buffer for the remote machines + const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); + + for ( const auto& target : targets ) + { + // Unroll spike multiplicity as plastic synapses only handle individual spikes. + for ( size_t i = 0; i < e.get_multiplicity(); ++i ) + { + ( *emitted_spikes_register_[ tid ] ).emplace_back( target, lag ); + } + } +} + +inline void +EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const long lag ) +{ + // Put the spike in a buffer for the remote machines + const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); + + for ( const auto& target : targets ) + { + // Unroll spike multiplicity as plastic synapses only handle individual spikes. + for ( size_t i = 0; i < e.get_multiplicity(); ++i ) + { + ( *off_grid_emitted_spikes_register_[ tid ] ).emplace_back( target, lag, e.get_offset() ); + } + } +} + +inline void +EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) +{ + const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t source_node_id = source.get_node_id(); + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + + if ( source.has_proxies() ) + { + + // We need to consider every synapse type this event supports to + // make sure also labeled and connection created by CopyModel are + // considered. + const std::set< synindex >& supported_syn_ids = e.get_supported_syn_ids(); + for ( const auto& syn_id : supported_syn_ids ) + { + const std::vector< size_t >& positions = + kernel().connection_manager.get_secondary_send_buffer_positions( tid, lid, syn_id ); + + for ( size_t i = 0; i < positions.size(); ++i ) + { + std::vector< unsigned int >::iterator it = send_buffer_secondary_events_.begin() + positions[ i ]; + e >> it; + } + } + kernel().connection_manager.send_to_devices( tid, source_node_id, e ); + } + else + { + send_local_( source, e, 0 ); // need to pass lag (last argument), but not + // used in template specialization, so pass + // zero as dummy value + } +} + +inline size_t +EventDeliveryManager::write_toggle() const +{ + return kernel().simulation_manager.get_slice() % 2; +} + } // namespace nest #endif /* EVENT_DELIVERY_MANAGER_H */ diff --git a/nestkernel/event_delivery_manager_impl.h b/nestkernel/event_delivery_manager_impl.h deleted file mode 100644 index 55310a24bd..0000000000 --- a/nestkernel/event_delivery_manager_impl.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * event_delivery_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef EVENT_DELIVERY_MANAGER_IMPL_H -#define EVENT_DELIVERY_MANAGER_IMPL_H - -#include "event_delivery_manager.h" - -// Includes from nestkernel: -#include "connection_manager_impl.h" -#include "kernel_manager.h" - -namespace nest -{ - -template < class EventT > -inline void -EventDeliveryManager::send_local_( Node& source, EventT& e, const long lag ) -{ - assert( not source.has_proxies() ); - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); - e.set_sender( source ); - const size_t t = source.get_thread(); - const size_t ldid = source.get_local_device_id(); - kernel().connection_manager.send_from_device( t, ldid, e ); -} - -inline void -EventDeliveryManager::send_local_( Node& source, SecondaryEvent& e, const long ) -{ - assert( not source.has_proxies() ); - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( 1 ) ); - e.set_sender( source ); - const size_t t = source.get_thread(); - const size_t ldid = source.get_local_device_id(); - kernel().connection_manager.send_from_device( t, ldid, e ); -} - -template < class EventT > -inline void -EventDeliveryManager::send( Node& source, EventT& e, const long lag ) -{ - send_local_( source, e, lag ); -} - -template <> -inline void -EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const long lag ) -{ - const size_t tid = source.get_thread(); - const size_t source_node_id = source.get_node_id(); - e.set_sender_node_id( source_node_id ); - if ( source.has_proxies() ) - { - local_spike_counter_[ tid ] += e.get_multiplicity(); - - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); - e.set_sender( source ); - - if ( source.is_off_grid() ) - { - send_off_grid_remote( tid, e, lag ); - } - else - { - send_remote( tid, e, lag ); - } - kernel().connection_manager.send_to_devices( tid, source_node_id, e ); - } - else - { - send_local_( source, e, lag ); - } -} - -template <> -inline void -EventDeliveryManager::send< DSSpikeEvent >( Node& source, DSSpikeEvent& e, const long lag ) -{ - e.set_sender_node_id( source.get_node_id() ); - send_local_( source, e, lag ); -} - -inline void -EventDeliveryManager::send_remote( size_t tid, SpikeEvent& e, const long lag ) -{ - // Put the spike in a buffer for the remote machines - const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); - - for ( const auto& target : targets ) - { - // Unroll spike multiplicity as plastic synapses only handle individual spikes. - for ( size_t i = 0; i < e.get_multiplicity(); ++i ) - { - ( *emitted_spikes_register_[ tid ] ).emplace_back( target, lag ); - } - } -} - -inline void -EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const long lag ) -{ - // Put the spike in a buffer for the remote machines - const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); - - for ( const auto& target : targets ) - { - // Unroll spike multiplicity as plastic synapses only handle individual spikes. - for ( size_t i = 0; i < e.get_multiplicity(); ++i ) - { - ( *off_grid_emitted_spikes_register_[ tid ] ).emplace_back( target, lag, e.get_offset() ); - } - } -} - -inline void -EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) -{ - const size_t tid = kernel().vp_manager.get_thread_id(); - const size_t source_node_id = source.get_node_id(); - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - - if ( source.has_proxies() ) - { - - // We need to consider every synapse type this event supports to - // make sure also labeled and connection created by CopyModel are - // considered. - const std::set< synindex >& supported_syn_ids = e.get_supported_syn_ids(); - for ( const auto& syn_id : supported_syn_ids ) - { - const std::vector< size_t >& positions = - kernel().connection_manager.get_secondary_send_buffer_positions( tid, lid, syn_id ); - - for ( size_t i = 0; i < positions.size(); ++i ) - { - std::vector< unsigned int >::iterator it = send_buffer_secondary_events_.begin() + positions[ i ]; - e >> it; - } - } - kernel().connection_manager.send_to_devices( tid, source_node_id, e ); - } - else - { - send_local_( source, e, 0 ); // need to pass lag (last argument), but not - // used in template specialization, so pass - // zero as dummy value - } -} - -inline size_t -EventDeliveryManager::write_toggle() const -{ - return kernel().simulation_manager.get_slice() % 2; -} - - -} // of namespace nest - -#endif diff --git a/nestkernel/free_layer.h b/nestkernel/free_layer.h index 40b1c972cc..786c2ba075 100644 --- a/nestkernel/free_layer.h +++ b/nestkernel/free_layer.h @@ -30,13 +30,13 @@ // Includes from nestkernel: #include "nest_names.h" +#include "node_manager.h" // Includes from sli: #include "dictutils.h" // Includes from spatial: #include "layer.h" -#include "ntree_impl.h" namespace nest { diff --git a/nestkernel/genericmodel.h b/nestkernel/genericmodel.h index a6f5ddc9cb..225cb334e6 100644 --- a/nestkernel/genericmodel.h +++ b/nestkernel/genericmodel.h @@ -23,10 +23,8 @@ #ifndef GENERICMODEL_H #define GENERICMODEL_H -// C++ includes: -#include - // Includes from nestkernel: +#include "logging_manager.h" #include "model.h" namespace nest @@ -281,6 +279,24 @@ GenericModel< ElementT >::get_model_id() { return proto_.get_model_id(); } + +template < typename ElementT > +void +GenericModel< ElementT >::deprecation_warning( const std::string& caller ) +{ + if ( deprecation_warning_issued_ or deprecation_info_.empty() ) + { + return; + } + + if ( not deprecation_info_.empty() ) + { + LOG( M_DEPRECATED, caller, "Model " + get_name() + " is deprecated in " + deprecation_info_ + "." ); + } + + deprecation_warning_issued_ = true; } +} // namespace nest + #endif diff --git a/nestkernel/genericmodel_impl.h b/nestkernel/genericmodel_impl.h deleted file mode 100644 index db1d0edc3f..0000000000 --- a/nestkernel/genericmodel_impl.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * genericmodel_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef GENERICMODEL_IMPL_H -#define GENERICMODEL_IMPL_H - -#include "genericmodel.h" - -// Includes from nestkernel: -#include "kernel_manager.h" -#include "logging_manager.h" - -namespace nest -{ - -template < typename ElementT > -void -GenericModel< ElementT >::deprecation_warning( const std::string& caller ) -{ - if ( deprecation_warning_issued_ or deprecation_info_.empty() ) - { - return; - } - - if ( not deprecation_info_.empty() ) - { - LOG( M_DEPRECATED, caller, "Model " + get_name() + " is deprecated in " + deprecation_info_ + "." ); - } - - deprecation_warning_issued_ = true; -} -} -#endif diff --git a/nestkernel/io_manager.cpp b/nestkernel/io_manager.cpp index 2d6bdf27e5..4633285782 100644 --- a/nestkernel/io_manager.cpp +++ b/nestkernel/io_manager.cpp @@ -38,11 +38,12 @@ #include "logging.h" // Includes from nestkernel: -#include "io_manager_impl.h" #include "kernel_manager.h" +#include "logging_manager.h" #include "recording_backend_ascii.h" #include "recording_backend_memory.h" #include "recording_backend_screen.h" + #ifdef HAVE_MPI #include "recording_backend_mpi.h" #include "stimulation_backend_mpi.h" diff --git a/nestkernel/io_manager.h b/nestkernel/io_manager.h index ea4be311bd..a6dcb20a4b 100644 --- a/nestkernel/io_manager.h +++ b/nestkernel/io_manager.h @@ -24,16 +24,23 @@ #define IO_MANAGER_H // C++ includes: +#include #include +#include // Includes from libnestutil: +#include "exceptions.h" #include "manager_interface.h" -#include "recording_backend.h" -#include "stimulation_backend.h" +// NOTE: Use forward declarations to avoid circular dependencies namespace nest { +class RecordingBackend; +class StimulationBackend; +class RecordingDevice; +class StimulationDevice; +class Event; /** * Manager to handle everything related to input and output. @@ -166,24 +173,52 @@ class IOManager : public ManagerInterface std::map< Name, StimulationBackend* > stimulation_backends_; }; -} // namespace nest - inline const std::string& -nest::IOManager::get_data_path() const +IOManager::get_data_path() const { return data_path_; } inline const std::string& -nest::IOManager::get_data_prefix() const +IOManager::get_data_prefix() const { return data_prefix_; } inline bool -nest::IOManager::overwrite_files() const +IOManager::overwrite_files() const { return overwrite_files_; } +template < class RecordingBackendT > +void +IOManager::register_recording_backend( const Name name ) +{ + if ( recording_backends_.find( name ) != recording_backends_.end() ) + { + throw BackendAlreadyRegistered( name.toString() ); + } + + RecordingBackendT* recording_backend = new RecordingBackendT(); + recording_backend->pre_run_hook(); + recording_backends_.insert( std::make_pair( name, recording_backend ) ); +} + +template < class StimulationBackendT > +void +IOManager::register_stimulation_backend( const Name name ) +{ + if ( stimulation_backends_.find( name ) != stimulation_backends_.end() ) + { + throw BackendAlreadyRegistered( name.toString() ); + } + + StimulationBackendT* stimulation_backend = new StimulationBackendT(); + stimulation_backend->pre_run_hook(); + stimulation_backends_.insert( std::make_pair( name, stimulation_backend ) ); +} + +} // namespace nest + #endif /* #ifndef IO_MANAGER_H */ diff --git a/nestkernel/io_manager_impl.h b/nestkernel/io_manager_impl.h deleted file mode 100644 index 9c631b4345..0000000000 --- a/nestkernel/io_manager_impl.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * io_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef IO_MANAGER_IMPL_H -#define IO_MANAGER_IMPL_H - -#include "io_manager.h" - -namespace nest -{ - -template < class RecordingBackendT > -void -IOManager::register_recording_backend( const Name name ) -{ - if ( recording_backends_.find( name ) != recording_backends_.end() ) - { - throw BackendAlreadyRegistered( name.toString() ); - } - - RecordingBackendT* recording_backend = new RecordingBackendT(); - recording_backend->pre_run_hook(); - recording_backends_.insert( std::make_pair( name, recording_backend ) ); -} - -template < class StimulationBackendT > -void -IOManager::register_stimulation_backend( const Name name ) -{ - if ( stimulation_backends_.find( name ) != stimulation_backends_.end() ) - { - throw BackendAlreadyRegistered( name.toString() ); - } - - StimulationBackendT* stimulation_backend = new StimulationBackendT(); - stimulation_backend->pre_run_hook(); - stimulation_backends_.insert( std::make_pair( name, stimulation_backend ) ); -} - -} // namespace nest - -#endif /* #ifndef IO_MANAGER_IMPL_H */ diff --git a/nestkernel/kernel_manager.cpp b/nestkernel/kernel_manager.cpp index e31cea89b4..b4cb7329ba 100644 --- a/nestkernel/kernel_manager.cpp +++ b/nestkernel/kernel_manager.cpp @@ -21,12 +21,29 @@ */ #include "kernel_manager.h" -#include "stopwatch_impl.h" -nest::KernelManager* nest::KernelManager::kernel_manager_instance_ = nullptr; +// Include concrete manager headers only in the implementation. +#include "connection_manager.h" +#include "event_delivery_manager.h" +#include "io_manager.h" +#include "logging_manager.h" +#include "model_manager.h" +#include "modelrange_manager.h" +#include "module_manager.h" +#include "mpi_manager.h" +#include "music_manager.h" +#include "node_manager.h" +#include "random_manager.h" +#include "simulation_manager.h" +#include "sp_manager.h" +#include "vp_manager.h" + +namespace nest +{ +KernelManager* KernelManager::kernel_manager_instance_ = nullptr; void -nest::KernelManager::create_kernel_manager() +KernelManager::create_kernel_manager() { #pragma omp master { @@ -40,28 +57,28 @@ nest::KernelManager::create_kernel_manager() } void -nest::KernelManager::destroy_kernel_manager() +KernelManager::destroy_kernel_manager() { kernel_manager_instance_->logging_manager.set_logging_level( M_QUIET ); delete kernel_manager_instance_; } -nest::KernelManager::KernelManager() +KernelManager::KernelManager() : fingerprint_( 0 ) - , logging_manager() - , mpi_manager() - , vp_manager() - , module_manager() - , random_manager() - , simulation_manager() - , modelrange_manager() - , connection_manager() - , sp_manager() - , event_delivery_manager() - , io_manager() - , model_manager() - , music_manager() - , node_manager() + , logging_manager( *new LoggingManager() ) + , mpi_manager( *new MPIManager() ) + , vp_manager( *new VPManager() ) + , module_manager( *new ModuleManager() ) + , random_manager( *new RandomManager() ) + , simulation_manager( *new SimulationManager() ) + , modelrange_manager( *new ModelRangeManager() ) + , connection_manager( *new ConnectionManager() ) + , sp_manager( *new SPManager() ) + , event_delivery_manager( *new EventDeliveryManager() ) + , io_manager( *new IOManager() ) + , model_manager( *new ModelManager() ) + , music_manager( *new MUSICManager() ) + , node_manager( *new NodeManager() ) , managers( { &logging_manager, &mpi_manager, &vp_manager, @@ -80,22 +97,27 @@ nest::KernelManager::KernelManager() { } -nest::KernelManager::~KernelManager() +KernelManager::~KernelManager() { + if ( initialized_ ) + { + finalize(); + } + + for ( auto manager : managers ) + { + delete manager; + } } void -nest::KernelManager::initialize() +KernelManager::initialize() { for ( auto& manager : managers ) { manager->initialize( /* adjust_number_of_threads_or_rng_only */ false ); } - sw_omp_synchronization_construction_.reset(); - sw_omp_synchronization_simulation_.reset(); - sw_mpi_synchronization_.reset(); - ++fingerprint_; initialized_ = true; FULL_LOGGING_ONLY( dump_.open( @@ -103,47 +125,44 @@ nest::KernelManager::initialize() } void -nest::KernelManager::prepare() +KernelManager::prepare() { - for ( auto& manager : managers ) + for ( auto manager : managers ) { manager->prepare(); } - - sw_omp_synchronization_simulation_.reset(); - sw_mpi_synchronization_.reset(); } void -nest::KernelManager::cleanup() +KernelManager::cleanup() { - for ( auto&& m_it = managers.rbegin(); m_it != managers.rend(); ++m_it ) + for ( auto it = managers.rbegin(); it != managers.rend(); ++it ) { - ( *m_it )->cleanup(); + ( *it )->cleanup(); } } void -nest::KernelManager::finalize() +KernelManager::finalize() { FULL_LOGGING_ONLY( dump_.close(); ) - for ( auto&& m_it = managers.rbegin(); m_it != managers.rend(); ++m_it ) + for ( auto it = managers.rbegin(); it != managers.rend(); ++it ) { - ( *m_it )->finalize( /* adjust_number_of_threads_or_rng_only */ false ); + ( *it )->finalize( /* adjust_number_of_threads_or_rng_only */ false ); } initialized_ = false; } void -nest::KernelManager::reset() +KernelManager::reset() { finalize(); initialize(); } void -nest::KernelManager::change_number_of_threads( size_t new_num_threads ) +KernelManager::change_number_of_threads( size_t new_num_threads ) { // Inputs are checked in VPManager::set_status(). // Just double check here that all values are legal. @@ -152,10 +171,9 @@ nest::KernelManager::change_number_of_threads( size_t new_num_threads ) assert( not simulation_manager.has_been_simulated() ); assert( not sp_manager.is_structural_plasticity_enabled() or new_num_threads == 1 ); - // Finalize in reverse order of initialization with old thread number set - for ( auto mgr_it = managers.rbegin(); mgr_it != managers.rend(); ++mgr_it ) + for ( auto it = managers.rbegin(); it != managers.rend(); ++it ) { - ( *mgr_it )->finalize( /* adjust_number_of_threads_or_rng_only */ true ); + ( *it )->finalize( /* adjust_number_of_threads_or_rng_only */ true ); } vp_manager.set_num_threads( new_num_threads ); @@ -176,14 +194,10 @@ nest::KernelManager::change_number_of_threads( size_t new_num_threads ) kernel().simulation_manager.reset_timers_for_dynamics(); kernel().event_delivery_manager.reset_timers_for_preparation(); kernel().event_delivery_manager.reset_timers_for_dynamics(); - - sw_omp_synchronization_construction_.reset(); - sw_omp_synchronization_simulation_.reset(); - sw_mpi_synchronization_.reset(); } void -nest::KernelManager::set_status( const DictionaryDatum& dict ) +KernelManager::set_status( const DictionaryDatum& dict ) { assert( is_initialized() ); @@ -194,7 +208,7 @@ nest::KernelManager::set_status( const DictionaryDatum& dict ) } void -nest::KernelManager::get_status( DictionaryDatum& dict ) +KernelManager::get_status( DictionaryDatum& dict ) { assert( is_initialized() ); @@ -202,16 +216,10 @@ nest::KernelManager::get_status( DictionaryDatum& dict ) { manager->get_status( dict ); } - - sw_omp_synchronization_construction_.get_status( - dict, names::time_omp_synchronization_construction, names::time_omp_synchronization_construction_cpu ); - sw_omp_synchronization_simulation_.get_status( - dict, names::time_omp_synchronization_simulation, names::time_omp_synchronization_simulation_cpu ); - sw_mpi_synchronization_.get_status( dict, names::time_mpi_synchronization, names::time_mpi_synchronization_cpu ); } void -nest::KernelManager::write_to_dump( const std::string& msg ) +KernelManager::write_to_dump( const std::string& msg ) { #pragma omp critical // In critical section to avoid any garbling of output. @@ -219,3 +227,5 @@ nest::KernelManager::write_to_dump( const std::string& msg ) dump_ << msg << std::endl << std::flush; } } + +} // namespace nest diff --git a/nestkernel/kernel_manager.h b/nestkernel/kernel_manager.h index abd225e836..05411cbb65 100644 --- a/nestkernel/kernel_manager.h +++ b/nestkernel/kernel_manager.h @@ -23,29 +23,9 @@ #ifndef KERNEL_MANAGER_H #define KERNEL_MANAGER_H -// Includes from libnestutil -#include "config.h" - -// Includes from nestkernel: -#include "connection_manager.h" -#include "event_delivery_manager.h" -#include "io_manager.h" -#include "logging_manager.h" -#include "model_manager.h" -#include "modelrange_manager.h" -#include "module_manager.h" -#include "mpi_manager.h" -#include "music_manager.h" -#include "node_manager.h" -#include "random_manager.h" -#include "simulation_manager.h" -#include "sp_manager.h" -#include "vp_manager.h" - // Includes from sli: #include "dictdatum.h" -#include "compose.hpp" #include /** @BeginDocumentation @@ -188,9 +168,26 @@ namespace nest { +// Forward declarations to avoid pulling all manager headers here. +class LoggingManager; +class MPIManager; +class VPManager; +class ModuleManager; +class RandomManager; +class SimulationManager; +class ModelRangeManager; +class ConnectionManager; +class SPManager; +class EventDeliveryManager; +class IOManager; +class ModelManager; +class MUSICManager; +class NodeManager; + +class ManagerInterface; + class KernelManager { -private: KernelManager(); ~KernelManager(); @@ -272,41 +269,21 @@ class KernelManager * NodeManager is last to ensure all model structures are in place before it is initialized. * @{ */ - LoggingManager logging_manager; - MPIManager mpi_manager; - VPManager vp_manager; - ModuleManager module_manager; - RandomManager random_manager; - SimulationManager simulation_manager; - ModelRangeManager modelrange_manager; - ConnectionManager connection_manager; - SPManager sp_manager; - EventDeliveryManager event_delivery_manager; - IOManager io_manager; - ModelManager model_manager; - MUSICManager music_manager; - NodeManager node_manager; - /**@}*/ - - //! Get the stopwatch to measure the time each thread is idle during network construction. - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded >& - get_omp_synchronization_construction_stopwatch() - { - return sw_omp_synchronization_construction_; - } - - //! Get the stopwatch to measure the time each thread is idle during simulation. - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded >& - get_omp_synchronization_simulation_stopwatch() - { - return sw_omp_synchronization_simulation_; - } - - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::MasterOnly >& - get_mpi_synchronization_stopwatch() - { - return sw_mpi_synchronization_; - } + // Property-like access to managers (public references). + LoggingManager& logging_manager; + MPIManager& mpi_manager; + VPManager& vp_manager; + ModuleManager& module_manager; + RandomManager& random_manager; + SimulationManager& simulation_manager; + ModelRangeManager& modelrange_manager; + ConnectionManager& connection_manager; + SPManager& sp_manager; + EventDeliveryManager& event_delivery_manager; + IOManager& io_manager; + ModelManager& model_manager; + MUSICManager& music_manager; + NodeManager& node_manager; private: //! All managers, order determines initialization and finalization order (latter backwards) @@ -314,39 +291,35 @@ class KernelManager bool initialized_; //!< true if the kernel is initialized std::ofstream dump_; //!< for FULL_LOGGING output - - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_omp_synchronization_construction_; - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_omp_synchronization_simulation_; - Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::MasterOnly > sw_mpi_synchronization_; }; KernelManager& kernel(); -} // namespace nest - -inline nest::KernelManager& -nest::KernelManager::get_kernel_manager() +inline KernelManager& +KernelManager::get_kernel_manager() { assert( kernel_manager_instance_ ); return *kernel_manager_instance_; } -inline nest::KernelManager& -nest::kernel() +inline KernelManager& +kernel() { return KernelManager::get_kernel_manager(); } inline bool -nest::KernelManager::is_initialized() const +KernelManager::is_initialized() const { return initialized_; } inline unsigned long -nest::KernelManager::get_fingerprint() const +KernelManager::get_fingerprint() const { return fingerprint_; } +} // namespace nest + #endif /* KERNEL_MANAGER_H */ diff --git a/nestkernel/layer.cpp b/nestkernel/layer.cpp index d8236b26a7..ccd766c5f4 100644 --- a/nestkernel/layer.cpp +++ b/nestkernel/layer.cpp @@ -34,11 +34,9 @@ #include "integerdatum.h" // Includes from spatial: -#include "connection_creator_impl.h" #include "free_layer.h" #include "grid_layer.h" #include "layer_impl.h" -#include "mask_impl.h" #include "spatial.h" namespace nest diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 7adf212118..8e425e896d 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -34,7 +34,9 @@ #include "nest_types.h" // Includes from sli: +#include "booldatum.h" #include "dictutils.h" +#include "grid_mask.h" // Includes from spatial: #include "connection_creator.h" @@ -44,6 +46,9 @@ namespace nest { +template < int D > +class GridLayer; + class AbstractLayer; typedef std::shared_ptr< AbstractLayer > AbstractLayerPTR; @@ -522,174 +527,6 @@ class MaskedLayer MaskDatum mask_; }; -inline void -AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) -{ - node_collection_ = node_collection; -} - - -inline NodeCollectionPTR -AbstractLayer::get_node_collection() -{ - return node_collection_; -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( node_collection ); - - check_mask_( layer, allow_oversized ); -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - Layer< D >& target, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( - target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); - - check_mask_( target, allow_oversized ); - mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); -} - -template < int D > -inline MaskedLayer< D >::~MaskedLayer() -{ -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::begin( const Position< D >& anchor ) -{ - try - { - return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); - } - catch ( std::bad_cast& e ) - { - throw BadProperty( "Mask is incompatible with layer." ); - } -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::end() -{ - return ntree_->masked_end(); -} - -template < int D > -inline Layer< D >::Layer() -{ - // Default center (0,0) and extent (1,1) - for ( int i = 0; i < D; ++i ) - { - lower_left_[ i ] = -0.5; - extent_[ i ] = 1.0; - } -} - -template < int D > -inline Layer< D >::Layer( const Layer& other_layer ) - : AbstractLayer( other_layer ) - , lower_left_( other_layer.lower_left_ ) - , extent_( other_layer.extent_ ) - , periodic_( other_layer.periodic_ ) -{ -} - -template < int D > -inline Layer< D >::~Layer() -{ - if ( cached_ntree_md_ == get_metadata() ) - { - clear_ntree_cache_(); - } - - if ( cached_vector_md_ == get_metadata() ) - { - clear_vector_cache_(); - } -} - -template < int D > -inline Position< D > -Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const -{ - return compute_displacement( from_pos, get_position( to_lid ) ); -} - -template < int D > -inline std::vector< double > -Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const -{ - return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); -} - -template < int D > -inline double -Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const -{ - return compute_displacement( from_pos, lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const -{ - return compute_displacement( Position< D >( from_pos ), lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const -{ - double squared_displacement = 0; - for ( unsigned int i = 0; i < D; ++i ) - { - const double displacement = compute_displacement( from_pos, to_pos, i ); - squared_displacement += displacement * displacement; - } - return std::sqrt( squared_displacement ); -} - -template < int D > -inline std::vector< double > -Layer< D >::get_position_vector( const size_t sind ) const -{ - return get_position( sind ).get_vector(); -} - -template < int D > -inline void -Layer< D >::clear_ntree_cache_() const -{ - cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); - cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); -} - -template < int D > -inline void -Layer< D >::clear_vector_cache_() const -{ - if ( cached_vector_ != 0 ) - { - delete cached_vector_; - } - cached_vector_ = 0; - cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); -} - } // namespace nest #endif diff --git a/nestkernel/layer_impl.h b/nestkernel/layer_impl.h index e24f571d08..7f96d0e44b 100644 --- a/nestkernel/layer_impl.h +++ b/nestkernel/layer_impl.h @@ -19,23 +19,179 @@ * along with NEST. If not, see . * */ +#include "grid_layer.h" +#include "layer.h" -#ifndef LAYER_IMPL_H -#define LAYER_IMPL_H +namespace nest +{ -#include "layer.h" +inline void +AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) +{ + node_collection_ = node_collection; +} -// Includes from nestkernel: -#include "booldatum.h" -#include "nest_datums.h" -#include "node_collection.h" -// Includes from spatial: -#include "grid_layer.h" -#include "grid_mask.h" +inline NodeCollectionPTR +AbstractLayer::get_node_collection() +{ + return node_collection_; +} -namespace nest +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( node_collection ); + + check_mask_( layer, allow_oversized ); +} + +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + Layer< D >& target, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( + target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); + + check_mask_( target, allow_oversized ); + mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); +} + +template < int D > +inline MaskedLayer< D >::~MaskedLayer() { +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::begin( const Position< D >& anchor ) +{ + try + { + return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); + } + catch ( std::bad_cast& e ) + { + throw BadProperty( "Mask is incompatible with layer." ); + } +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::end() +{ + return ntree_->masked_end(); +} + +template < int D > +inline Layer< D >::Layer() +{ + // Default center (0,0) and extent (1,1) + for ( int i = 0; i < D; ++i ) + { + lower_left_[ i ] = -0.5; + extent_[ i ] = 1.0; + } +} + +template < int D > +inline Layer< D >::Layer( const Layer& other_layer ) + : AbstractLayer( other_layer ) + , lower_left_( other_layer.lower_left_ ) + , extent_( other_layer.extent_ ) + , periodic_( other_layer.periodic_ ) +{ +} + +template < int D > +inline Layer< D >::~Layer() +{ + if ( cached_ntree_md_ == get_metadata() ) + { + clear_ntree_cache_(); + } + + if ( cached_vector_md_ == get_metadata() ) + { + clear_vector_cache_(); + } +} + +template < int D > +inline Position< D > +Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const +{ + return compute_displacement( from_pos, get_position( to_lid ) ); +} + +template < int D > +inline std::vector< double > +Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const +{ + return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); +} + +template < int D > +inline double +Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const +{ + return compute_displacement( from_pos, lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const +{ + return compute_displacement( Position< D >( from_pos ), lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const +{ + double squared_displacement = 0; + for ( unsigned int i = 0; i < D; ++i ) + { + const double displacement = compute_displacement( from_pos, to_pos, i ); + squared_displacement += displacement * displacement; + } + return std::sqrt( squared_displacement ); +} + +template < int D > +inline std::vector< double > +Layer< D >::get_position_vector( const size_t sind ) const +{ + return get_position( sind ).get_vector(); +} + +template < int D > +inline void +Layer< D >::clear_ntree_cache_() const +{ + cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); + cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); +} + +template < int D > +inline void +Layer< D >::clear_vector_cache_() const +{ + if ( cached_vector_ != 0 ) + { + delete cached_vector_; + } + cached_vector_ = 0; + cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); +} template < int D > std::shared_ptr< Ntree< D, size_t > > Layer< D >::cached_ntree_; @@ -443,5 +599,3 @@ MaskedLayer< D >::check_mask_( Layer< D >& layer, bool allow_oversized ) } } // namespace nest - -#endif diff --git a/nestkernel/mask.h b/nestkernel/mask.h index 33b4c20c3b..813960cf75 100644 --- a/nestkernel/mask.h +++ b/nestkernel/mask.h @@ -1008,6 +1008,488 @@ EllipseMask< D >::EllipseMask( const DictionaryDatum& d ) create_bbox_(); } +template < int D > +AbstractMask* +Mask< D >::intersect_mask( const AbstractMask& other ) const +{ + const Mask* other_d = dynamic_cast< const Mask* >( &other ); + if ( other_d == 0 ) + { + throw BadProperty( "Masks must have same number of dimensions." ); + } + return new IntersectionMask< D >( *this, *other_d ); +} + +template < int D > +AbstractMask* +Mask< D >::union_mask( const AbstractMask& other ) const +{ + const Mask* other_d = dynamic_cast< const Mask* >( &other ); + if ( other_d == 0 ) + { + throw BadProperty( "Masks must have same number of dimensions." ); + } + return new UnionMask< D >( *this, *other_d ); +} + +template < int D > +AbstractMask* +Mask< D >::minus_mask( const AbstractMask& other ) const +{ + const Mask* other_d = dynamic_cast< const Mask* >( &other ); + if ( other_d == 0 ) + { + throw BadProperty( "Masks must have same number of dimensions." ); + } + return new DifferenceMask< D >( *this, *other_d ); +} + +template < int D > +bool +Mask< D >::inside( const std::vector< double >& pt ) const +{ + return inside( Position< D >( pt ) ); +} + +template < int D > +bool +Mask< D >::outside( const Box< D >& b ) const +{ + Box< D > bb = get_bbox(); + for ( int i = 0; i < D; ++i ) + { + if ( b.upper_right[ i ] < bb.lower_left[ i ] or b.lower_left[ i ] > bb.upper_right[ i ] ) + { + return true; + } + } + return false; +} + +template < int D > +bool +BoxMask< D >::inside( const Box< D >& b ) const +{ + return ( inside( b.lower_left ) and inside( b.upper_right ) ); +} + +template < int D > +bool +BoxMask< D >::outside( const Box< D >& b ) const +{ + // Note: There could be some inconsistencies with the boundaries. For the + // inside() function we had to add an epsilon because of rounding errors that + // can occur if node IDs are on the boundary if we have rotation. This might lead + // to overlap of the inside and outside functions. None of the tests have + // picked up any problems with this potential overlap as of yet (autumn 2017), + // so we don't know if it is an actual problem. + for ( int i = 0; i < D; ++i ) + { + if ( b.upper_right[ i ] < min_values_[ i ] or b.lower_left[ i ] > max_values_[ i ] ) + { + return true; + } + } + return false; +} + +template < int D > +Box< D > +BoxMask< D >::get_bbox() const +{ + return Box< D >( min_values_, max_values_ ); +} + +template < int D > +Mask< D >* +BoxMask< D >::clone() const +{ + return new BoxMask( *this ); +} + +template < int D > +DictionaryDatum +BoxMask< D >::get_dict() const +{ + DictionaryDatum d( new Dictionary ); + DictionaryDatum maskd( new Dictionary ); + def< DictionaryDatum >( d, get_name(), maskd ); + def< std::vector< double > >( maskd, names::lower_left, lower_left_.get_vector() ); + def< std::vector< double > >( maskd, names::upper_right, upper_right_.get_vector() ); + def< double >( maskd, names::azimuth_angle, azimuth_angle_ ); + def< double >( maskd, names::polar_angle, polar_angle_ ); + return d; +} + +template < int D > +bool +BallMask< D >::inside( const Position< D >& p ) const +{ + // Optimizing by trying to avoid expensive calculations. + double dim_sum = 0; + // First check each dimension + for ( int i = 0; i < D; ++i ) + { + const double di = std::abs( p[ i ] - center_[ i ] ); + if ( di > radius_ ) + { + return false; + } + dim_sum += di; + } + // Next, check if we are inside a diamond (rotated square), which fits inside the ball. + if ( dim_sum <= radius_ ) + { + return true; + } + // Point must be somewhere between the ball mask edge and the diamond edge, + // revert to expensive calculation in this case. + return ( p - center_ ).length() <= radius_; +} + +template < int D > +bool +BallMask< D >::outside( const Box< D >& b ) const +{ + // Currently only checks if the box is outside the bounding box of + // the ball. This could be made more refined. + for ( int i = 0; i < D; ++i ) + { + if ( b.upper_right[ i ] < center_[ i ] - radius_ or b.lower_left[ i ] > center_[ i ] + radius_ ) + { + return true; + } + } + return false; +} + +template < int D > +Box< D > +BallMask< D >::get_bbox() const +{ + Box< D > bb( center_, center_ ); + for ( int i = 0; i < D; ++i ) + { + bb.lower_left[ i ] -= radius_; + bb.upper_right[ i ] += radius_; + } + return bb; +} + +template < int D > +Mask< D >* +BallMask< D >::clone() const +{ + return new BallMask( *this ); +} + +template < int D > +DictionaryDatum +BallMask< D >::get_dict() const +{ + DictionaryDatum d( new Dictionary ); + DictionaryDatum maskd( new Dictionary ); + def< DictionaryDatum >( d, get_name(), maskd ); + def< double >( maskd, names::radius, radius_ ); + def< std::vector< double > >( maskd, names::anchor, center_.get_vector() ); + return d; +} + +template < int D > +void +EllipseMask< D >::create_bbox_() +{ + // Currently assumes 3D when constructing the radius vector. This could be + // avoided with more if tests, but the vector is only made once and is not + // big. The construction of the box is done in accordance with the actual + // dimensions. + std::vector< double > radii( 3 ); + if ( azimuth_angle_ == 0.0 and polar_angle_ == 0.0 ) + { + radii[ 0 ] = major_axis_ / 2.0; + radii[ 1 ] = minor_axis_ / 2.0; + radii[ 2 ] = polar_axis_ / 2.0; + } + else + { + // If the ellipse or ellipsoid is tilted, we make the boundary box + // quadratic, with the length of the sides equal to the axis with greatest + // length. This could be more refined. + const double greatest_semi_axis = std::max( major_axis_, polar_axis_ ) / 2.0; + radii[ 0 ] = greatest_semi_axis; + radii[ 1 ] = greatest_semi_axis; + radii[ 2 ] = greatest_semi_axis; + } + + for ( int i = 0; i < D; ++i ) + { + bbox_.lower_left[ i ] = center_[ i ] - radii[ i ]; + bbox_.upper_right[ i ] = center_[ i ] + radii[ i ]; + } +} + +template < int D > +bool +EllipseMask< D >::outside( const Box< D >& b ) const +{ + // Currently only checks if the box is outside the bounding box of + // the ellipse. This could be made more refined. + + const Box< D >& bb = bbox_; + + for ( int i = 0; i < D; ++i ) + { + if ( b.upper_right[ i ] < bb.lower_left[ i ] or b.lower_left[ i ] > bb.upper_right[ i ] ) + { + return true; + } + } + return false; +} + +template < int D > +Box< D > +EllipseMask< D >::get_bbox() const +{ + return bbox_; +} + +template < int D > +Mask< D >* +EllipseMask< D >::clone() const +{ + return new EllipseMask( *this ); +} + +template < int D > +DictionaryDatum +EllipseMask< D >::get_dict() const +{ + DictionaryDatum d( new Dictionary ); + DictionaryDatum maskd( new Dictionary ); + def< DictionaryDatum >( d, get_name(), maskd ); + def< double >( maskd, names::major_axis, major_axis_ ); + def< double >( maskd, names::minor_axis, minor_axis_ ); + def< double >( maskd, names::polar_axis, polar_axis_ ); + def< std::vector< double > >( maskd, names::anchor, center_.get_vector() ); + def< double >( maskd, names::azimuth_angle, azimuth_angle_ ); + def< double >( maskd, names::polar_angle, polar_angle_ ); + return d; +} + + +template < int D > +bool +IntersectionMask< D >::inside( const Position< D >& p ) const +{ + return mask1_->inside( p ) and mask2_->inside( p ); +} + +template < int D > +bool +IntersectionMask< D >::inside( const Box< D >& b ) const +{ + return mask1_->inside( b ) and mask2_->inside( b ); +} + +template < int D > +bool +IntersectionMask< D >::outside( const Box< D >& b ) const +{ + return mask1_->outside( b ) or mask2_->outside( b ); +} + +template < int D > +Box< D > +IntersectionMask< D >::get_bbox() const +{ + Box< D > bb = mask1_->get_bbox(); + Box< D > bb2 = mask2_->get_bbox(); + for ( int i = 0; i < D; ++i ) + { + if ( bb2.lower_left[ i ] > bb.lower_left[ i ] ) + { + bb.lower_left[ i ] = bb2.lower_left[ i ]; + } + if ( bb2.upper_right[ i ] < bb.upper_right[ i ] ) + { + bb.upper_right[ i ] = bb2.upper_right[ i ]; + } + } + return bb; +} + +template < int D > +Mask< D >* +IntersectionMask< D >::clone() const +{ + return new IntersectionMask( *this ); +} + +template < int D > +bool +UnionMask< D >::inside( const Position< D >& p ) const +{ + return mask1_->inside( p ) or mask2_->inside( p ); +} + +template < int D > +bool +UnionMask< D >::inside( const Box< D >& b ) const +{ + return mask1_->inside( b ) or mask2_->inside( b ); +} + +template < int D > +bool +UnionMask< D >::outside( const Box< D >& b ) const +{ + return mask1_->outside( b ) and mask2_->outside( b ); +} + +template < int D > +Box< D > +UnionMask< D >::get_bbox() const +{ + Box< D > bb = mask1_->get_bbox(); + Box< D > bb2 = mask2_->get_bbox(); + for ( int i = 0; i < D; ++i ) + { + if ( bb2.lower_left[ i ] < bb.lower_left[ i ] ) + { + bb.lower_left[ i ] = bb2.lower_left[ i ]; + } + if ( bb2.upper_right[ i ] > bb.upper_right[ i ] ) + { + bb.upper_right[ i ] = bb2.upper_right[ i ]; + } + } + return bb; +} + +template < int D > +Mask< D >* +UnionMask< D >::clone() const +{ + return new UnionMask( *this ); +} + +template < int D > +bool +DifferenceMask< D >::inside( const Position< D >& p ) const +{ + return mask1_->inside( p ) and not mask2_->inside( p ); +} + +template < int D > +bool +DifferenceMask< D >::inside( const Box< D >& b ) const +{ + return mask1_->inside( b ) and mask2_->outside( b ); +} + +template < int D > +bool +DifferenceMask< D >::outside( const Box< D >& b ) const +{ + return mask1_->outside( b ) or mask2_->inside( b ); +} + +template < int D > +Box< D > +DifferenceMask< D >::get_bbox() const +{ + return mask1_->get_bbox(); +} + +template < int D > +Mask< D >* +DifferenceMask< D >::clone() const +{ + return new DifferenceMask( *this ); +} + +template < int D > +bool +ConverseMask< D >::inside( const Position< D >& p ) const +{ + return m_->inside( -p ); +} + +template < int D > +bool +ConverseMask< D >::inside( const Box< D >& b ) const +{ + return m_->inside( Box< D >( -b.upper_right, -b.lower_left ) ); +} + +template < int D > +bool +ConverseMask< D >::outside( const Box< D >& b ) const +{ + return m_->outside( Box< D >( -b.upper_right, -b.lower_left ) ); +} + +template < int D > +Box< D > +ConverseMask< D >::get_bbox() const +{ + Box< D > bb = m_->get_bbox(); + return Box< D >( -bb.upper_right, -bb.lower_left ); +} + +template < int D > +Mask< D >* +ConverseMask< D >::clone() const +{ + return new ConverseMask( *this ); +} + +template < int D > +bool +AnchoredMask< D >::inside( const Position< D >& p ) const +{ + return m_->inside( p - anchor_ ); +} + +template < int D > +bool +AnchoredMask< D >::inside( const Box< D >& b ) const +{ + return m_->inside( Box< D >( b.lower_left - anchor_, b.upper_right - anchor_ ) ); +} + +template < int D > +bool +AnchoredMask< D >::outside( const Box< D >& b ) const +{ + return m_->outside( Box< D >( b.lower_left - anchor_, b.upper_right - anchor_ ) ); +} + +template < int D > +Box< D > +AnchoredMask< D >::get_bbox() const +{ + Box< D > bb = m_->get_bbox(); + return Box< D >( bb.lower_left + anchor_, bb.upper_right + anchor_ ); +} + +template < int D > +Mask< D >* +AnchoredMask< D >::clone() const +{ + return new AnchoredMask( *this ); +} + +template < int D > +DictionaryDatum +AnchoredMask< D >::get_dict() const +{ + DictionaryDatum d = m_->get_dict(); + def< std::vector< double > >( d, names::anchor, anchor_.get_vector() ); + return d; +} + } // namespace nest #endif diff --git a/nestkernel/mask_impl.h b/nestkernel/mask_impl.h deleted file mode 100644 index d62bab2e58..0000000000 --- a/nestkernel/mask_impl.h +++ /dev/null @@ -1,515 +0,0 @@ -/* - * mask_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef MASK_IMPL_H -#define MASK_IMPL_H - -#include "mask.h" - -namespace nest -{ - -template < int D > -AbstractMask* -Mask< D >::intersect_mask( const AbstractMask& other ) const -{ - const Mask* other_d = dynamic_cast< const Mask* >( &other ); - if ( other_d == 0 ) - { - throw BadProperty( "Masks must have same number of dimensions." ); - } - return new IntersectionMask< D >( *this, *other_d ); -} - -template < int D > -AbstractMask* -Mask< D >::union_mask( const AbstractMask& other ) const -{ - const Mask* other_d = dynamic_cast< const Mask* >( &other ); - if ( other_d == 0 ) - { - throw BadProperty( "Masks must have same number of dimensions." ); - } - return new UnionMask< D >( *this, *other_d ); -} - -template < int D > -AbstractMask* -Mask< D >::minus_mask( const AbstractMask& other ) const -{ - const Mask* other_d = dynamic_cast< const Mask* >( &other ); - if ( other_d == 0 ) - { - throw BadProperty( "Masks must have same number of dimensions." ); - } - return new DifferenceMask< D >( *this, *other_d ); -} - -template < int D > -bool -Mask< D >::inside( const std::vector< double >& pt ) const -{ - return inside( Position< D >( pt ) ); -} - -template < int D > -bool -Mask< D >::outside( const Box< D >& b ) const -{ - Box< D > bb = get_bbox(); - for ( int i = 0; i < D; ++i ) - { - if ( b.upper_right[ i ] < bb.lower_left[ i ] or b.lower_left[ i ] > bb.upper_right[ i ] ) - { - return true; - } - } - return false; -} - -template < int D > -bool -BoxMask< D >::inside( const Box< D >& b ) const -{ - return ( inside( b.lower_left ) and inside( b.upper_right ) ); -} - -template < int D > -bool -BoxMask< D >::outside( const Box< D >& b ) const -{ - // Note: There could be some inconsistencies with the boundaries. For the - // inside() function we had to add an epsilon because of rounding errors that - // can occur if node IDs are on the boundary if we have rotation. This might lead - // to overlap of the inside and outside functions. None of the tests have - // picked up any problems with this potential overlap as of yet (autumn 2017), - // so we don't know if it is an actual problem. - for ( int i = 0; i < D; ++i ) - { - if ( b.upper_right[ i ] < min_values_[ i ] or b.lower_left[ i ] > max_values_[ i ] ) - { - return true; - } - } - return false; -} - -template < int D > -Box< D > -BoxMask< D >::get_bbox() const -{ - return Box< D >( min_values_, max_values_ ); -} - -template < int D > -Mask< D >* -BoxMask< D >::clone() const -{ - return new BoxMask( *this ); -} - -template < int D > -DictionaryDatum -BoxMask< D >::get_dict() const -{ - DictionaryDatum d( new Dictionary ); - DictionaryDatum maskd( new Dictionary ); - def< DictionaryDatum >( d, get_name(), maskd ); - def< std::vector< double > >( maskd, names::lower_left, lower_left_.get_vector() ); - def< std::vector< double > >( maskd, names::upper_right, upper_right_.get_vector() ); - def< double >( maskd, names::azimuth_angle, azimuth_angle_ ); - def< double >( maskd, names::polar_angle, polar_angle_ ); - return d; -} - -template < int D > -bool -BallMask< D >::inside( const Position< D >& p ) const -{ - // Optimizing by trying to avoid expensive calculations. - double dim_sum = 0; - // First check each dimension - for ( int i = 0; i < D; ++i ) - { - const double di = std::abs( p[ i ] - center_[ i ] ); - if ( di > radius_ ) - { - return false; - } - dim_sum += di; - } - // Next, check if we are inside a diamond (rotated square), which fits inside the ball. - if ( dim_sum <= radius_ ) - { - return true; - } - // Point must be somewhere between the ball mask edge and the diamond edge, - // revert to expensive calculation in this case. - return ( p - center_ ).length() <= radius_; -} - -template < int D > -bool -BallMask< D >::outside( const Box< D >& b ) const -{ - // Currently only checks if the box is outside the bounding box of - // the ball. This could be made more refined. - for ( int i = 0; i < D; ++i ) - { - if ( b.upper_right[ i ] < center_[ i ] - radius_ or b.lower_left[ i ] > center_[ i ] + radius_ ) - { - return true; - } - } - return false; -} - -template < int D > -Box< D > -BallMask< D >::get_bbox() const -{ - Box< D > bb( center_, center_ ); - for ( int i = 0; i < D; ++i ) - { - bb.lower_left[ i ] -= radius_; - bb.upper_right[ i ] += radius_; - } - return bb; -} - -template < int D > -Mask< D >* -BallMask< D >::clone() const -{ - return new BallMask( *this ); -} - -template < int D > -DictionaryDatum -BallMask< D >::get_dict() const -{ - DictionaryDatum d( new Dictionary ); - DictionaryDatum maskd( new Dictionary ); - def< DictionaryDatum >( d, get_name(), maskd ); - def< double >( maskd, names::radius, radius_ ); - def< std::vector< double > >( maskd, names::anchor, center_.get_vector() ); - return d; -} - -template < int D > -void -EllipseMask< D >::create_bbox_() -{ - // Currently assumes 3D when constructing the radius vector. This could be - // avoided with more if tests, but the vector is only made once and is not - // big. The construction of the box is done in accordance with the actual - // dimensions. - std::vector< double > radii( 3 ); - if ( azimuth_angle_ == 0.0 and polar_angle_ == 0.0 ) - { - radii[ 0 ] = major_axis_ / 2.0; - radii[ 1 ] = minor_axis_ / 2.0; - radii[ 2 ] = polar_axis_ / 2.0; - } - else - { - // If the ellipse or ellipsoid is tilted, we make the boundary box - // quadratic, with the length of the sides equal to the axis with greatest - // length. This could be more refined. - const double greatest_semi_axis = std::max( major_axis_, polar_axis_ ) / 2.0; - radii[ 0 ] = greatest_semi_axis; - radii[ 1 ] = greatest_semi_axis; - radii[ 2 ] = greatest_semi_axis; - } - - for ( int i = 0; i < D; ++i ) - { - bbox_.lower_left[ i ] = center_[ i ] - radii[ i ]; - bbox_.upper_right[ i ] = center_[ i ] + radii[ i ]; - } -} - -template < int D > -bool -EllipseMask< D >::outside( const Box< D >& b ) const -{ - // Currently only checks if the box is outside the bounding box of - // the ellipse. This could be made more refined. - - const Box< D >& bb = bbox_; - - for ( int i = 0; i < D; ++i ) - { - if ( b.upper_right[ i ] < bb.lower_left[ i ] or b.lower_left[ i ] > bb.upper_right[ i ] ) - { - return true; - } - } - return false; -} - -template < int D > -Box< D > -EllipseMask< D >::get_bbox() const -{ - return bbox_; -} - -template < int D > -Mask< D >* -EllipseMask< D >::clone() const -{ - return new EllipseMask( *this ); -} - -template < int D > -DictionaryDatum -EllipseMask< D >::get_dict() const -{ - DictionaryDatum d( new Dictionary ); - DictionaryDatum maskd( new Dictionary ); - def< DictionaryDatum >( d, get_name(), maskd ); - def< double >( maskd, names::major_axis, major_axis_ ); - def< double >( maskd, names::minor_axis, minor_axis_ ); - def< double >( maskd, names::polar_axis, polar_axis_ ); - def< std::vector< double > >( maskd, names::anchor, center_.get_vector() ); - def< double >( maskd, names::azimuth_angle, azimuth_angle_ ); - def< double >( maskd, names::polar_angle, polar_angle_ ); - return d; -} - - -template < int D > -bool -IntersectionMask< D >::inside( const Position< D >& p ) const -{ - return mask1_->inside( p ) and mask2_->inside( p ); -} - -template < int D > -bool -IntersectionMask< D >::inside( const Box< D >& b ) const -{ - return mask1_->inside( b ) and mask2_->inside( b ); -} - -template < int D > -bool -IntersectionMask< D >::outside( const Box< D >& b ) const -{ - return mask1_->outside( b ) or mask2_->outside( b ); -} - -template < int D > -Box< D > -IntersectionMask< D >::get_bbox() const -{ - Box< D > bb = mask1_->get_bbox(); - Box< D > bb2 = mask2_->get_bbox(); - for ( int i = 0; i < D; ++i ) - { - if ( bb2.lower_left[ i ] > bb.lower_left[ i ] ) - { - bb.lower_left[ i ] = bb2.lower_left[ i ]; - } - if ( bb2.upper_right[ i ] < bb.upper_right[ i ] ) - { - bb.upper_right[ i ] = bb2.upper_right[ i ]; - } - } - return bb; -} - -template < int D > -Mask< D >* -IntersectionMask< D >::clone() const -{ - return new IntersectionMask( *this ); -} - -template < int D > -bool -UnionMask< D >::inside( const Position< D >& p ) const -{ - return mask1_->inside( p ) or mask2_->inside( p ); -} - -template < int D > -bool -UnionMask< D >::inside( const Box< D >& b ) const -{ - return mask1_->inside( b ) or mask2_->inside( b ); -} - -template < int D > -bool -UnionMask< D >::outside( const Box< D >& b ) const -{ - return mask1_->outside( b ) and mask2_->outside( b ); -} - -template < int D > -Box< D > -UnionMask< D >::get_bbox() const -{ - Box< D > bb = mask1_->get_bbox(); - Box< D > bb2 = mask2_->get_bbox(); - for ( int i = 0; i < D; ++i ) - { - if ( bb2.lower_left[ i ] < bb.lower_left[ i ] ) - { - bb.lower_left[ i ] = bb2.lower_left[ i ]; - } - if ( bb2.upper_right[ i ] > bb.upper_right[ i ] ) - { - bb.upper_right[ i ] = bb2.upper_right[ i ]; - } - } - return bb; -} - -template < int D > -Mask< D >* -UnionMask< D >::clone() const -{ - return new UnionMask( *this ); -} - -template < int D > -bool -DifferenceMask< D >::inside( const Position< D >& p ) const -{ - return mask1_->inside( p ) and not mask2_->inside( p ); -} - -template < int D > -bool -DifferenceMask< D >::inside( const Box< D >& b ) const -{ - return mask1_->inside( b ) and mask2_->outside( b ); -} - -template < int D > -bool -DifferenceMask< D >::outside( const Box< D >& b ) const -{ - return mask1_->outside( b ) or mask2_->inside( b ); -} - -template < int D > -Box< D > -DifferenceMask< D >::get_bbox() const -{ - return mask1_->get_bbox(); -} - -template < int D > -Mask< D >* -DifferenceMask< D >::clone() const -{ - return new DifferenceMask( *this ); -} - -template < int D > -bool -ConverseMask< D >::inside( const Position< D >& p ) const -{ - return m_->inside( -p ); -} - -template < int D > -bool -ConverseMask< D >::inside( const Box< D >& b ) const -{ - return m_->inside( Box< D >( -b.upper_right, -b.lower_left ) ); -} - -template < int D > -bool -ConverseMask< D >::outside( const Box< D >& b ) const -{ - return m_->outside( Box< D >( -b.upper_right, -b.lower_left ) ); -} - -template < int D > -Box< D > -ConverseMask< D >::get_bbox() const -{ - Box< D > bb = m_->get_bbox(); - return Box< D >( -bb.upper_right, -bb.lower_left ); -} - -template < int D > -Mask< D >* -ConverseMask< D >::clone() const -{ - return new ConverseMask( *this ); -} - -template < int D > -bool -AnchoredMask< D >::inside( const Position< D >& p ) const -{ - return m_->inside( p - anchor_ ); -} - -template < int D > -bool -AnchoredMask< D >::inside( const Box< D >& b ) const -{ - return m_->inside( Box< D >( b.lower_left - anchor_, b.upper_right - anchor_ ) ); -} - -template < int D > -bool -AnchoredMask< D >::outside( const Box< D >& b ) const -{ - return m_->outside( Box< D >( b.lower_left - anchor_, b.upper_right - anchor_ ) ); -} - -template < int D > -Box< D > -AnchoredMask< D >::get_bbox() const -{ - Box< D > bb = m_->get_bbox(); - return Box< D >( bb.lower_left + anchor_, bb.upper_right + anchor_ ); -} - -template < int D > -Mask< D >* -AnchoredMask< D >::clone() const -{ - return new AnchoredMask( *this ); -} - -template < int D > -DictionaryDatum -AnchoredMask< D >::get_dict() const -{ - DictionaryDatum d = m_->get_dict(); - def< std::vector< double > >( d, names::anchor, anchor_.get_vector() ); - return d; -} - -} // namespace nest - -#endif diff --git a/nestkernel/model.cpp b/nestkernel/model.cpp index 08d2a342a3..9779816cc5 100644 --- a/nestkernel/model.cpp +++ b/nestkernel/model.cpp @@ -31,6 +31,7 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" +#include "model_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/model_manager.cpp b/nestkernel/model_manager.cpp index a656285f13..fc79cc56e4 100644 --- a/nestkernel/model_manager.cpp +++ b/nestkernel/model_manager.cpp @@ -31,12 +31,9 @@ #include "compose.hpp" // Includes from nestkernel: -#include "connector_model_impl.h" -#include "genericmodel_impl.h" +#include "connection_manager.h" #include "kernel_manager.h" -#include "model_manager_impl.h" #include "proxynode.h" -#include "vp_manager_impl.h" // Includes from models: #include "models.h" diff --git a/nestkernel/model_manager.h b/nestkernel/model_manager.h index 924535a780..41b636715d 100644 --- a/nestkernel/model_manager.h +++ b/nestkernel/model_manager.h @@ -27,18 +27,23 @@ #include // Includes from nestkernel: -#include "connector_model.h" +#include "connection_label.h" +#include "connection_manager.h" +#include "connector_model_impl.h" #include "genericmodel.h" +#include "kernel_manager.h" +#include "logging_manager.h" #include "manager_interface.h" #include "model.h" -#include "nest.h" +#include "modelrange_manager.h" #include "nest_time.h" -#include "nest_timeconverter.h" #include "nest_types.h" #include "node.h" +#include "target_identifier.h" + +// Includes from libnestutil: +#include "compose.hpp" -// Includes from sli: -#include "dictutils.h" namespace nest { @@ -326,6 +331,88 @@ ModelManager::get_secondary_event_prototype( const synindex syn_id, const size_t return *get_connection_model( syn_id, tid ).get_secondary_event(); } +template < class ModelT > +size_t +ModelManager::register_node_model( const Name& name, std::string deprecation_info ) +{ + if ( modeldict_->known( name ) ) + { + std::string msg = String::compose( "A model called '%1' already exists. Please choose a different name!", name ); + throw NamingConflict( msg ); + } + + Model* model = new GenericModel< ModelT >( name.toString(), deprecation_info ); + return register_node_model_( model ); +} + +template < template < typename targetidentifierT > class ConnectionT > +void +ModelManager::register_connection_model( const std::string& name ) +{ + // Required to check which variants to create + ConnectorModel const* const dummy_model = + new GenericConnectorModel< ConnectionT< TargetIdentifierPtrRport > >( "dummy" ); + + register_specific_connection_model_< ConnectionT< TargetIdentifierPtrRport > >( name ); + if ( dummy_model->has_property( ConnectionModelProperties::SUPPORTS_HPC ) ) + { + register_specific_connection_model_< ConnectionT< TargetIdentifierIndex > >( name + "_hpc" ); + } + if ( dummy_model->has_property( ConnectionModelProperties::SUPPORTS_LBL ) ) + { + register_specific_connection_model_< ConnectionLabel< ConnectionT< TargetIdentifierPtrRport > > >( name + "_lbl" ); + } + + delete dummy_model; +} + +template < typename CompleteConnectionT > +void +ModelManager::register_specific_connection_model_( const std::string& name ) +{ + kernel().vp_manager.assert_single_threaded(); + + if ( synapsedict_->known( name ) ) + { + std::string msg = + String::compose( "A synapse type called '%1' already exists.\nPlease choose a different name!", name ); + throw NamingConflict( msg ); + } + + const auto new_syn_id = get_num_connection_models(); + if ( new_syn_id >= invalid_synindex ) + { + const std::string msg = String::compose( + "CopyModel cannot generate another synapse. Maximal synapse model count of %1 exceeded.", MAX_SYN_ID ); + LOG( M_ERROR, "ModelManager::copy_connection_model_", msg ); + throw KernelException( "Synapse model count exceeded" ); + } + + synapsedict_->insert( name, new_syn_id ); + +#pragma omp parallel + { + ConnectorModel* conn_model = new GenericConnectorModel< CompleteConnectionT >( name ); + conn_model->set_syn_id( new_syn_id ); + if ( not conn_model->has_property( ConnectionModelProperties::IS_PRIMARY ) ) + { + conn_model->get_secondary_event()->add_syn_id( new_syn_id ); + } + connection_models_.at( kernel().vp_manager.get_thread_id() ).push_back( conn_model ); + kernel().connection_manager.resize_connections(); + } // end of parallel section +} + +inline Node* +ModelManager::get_proxy_node( size_t tid, size_t node_id ) +{ + const int model_id = kernel().modelrange_manager.get_model_id( node_id ); + Node* proxy = proxy_nodes_[ tid ].at( model_id ); + proxy->set_node_id_( node_id ); + proxy->set_vp( kernel().vp_manager.node_id_to_vp( node_id ) ); + return proxy; +} + } // namespace nest #endif /* MODEL_MANAGER_H */ diff --git a/nestkernel/model_manager_impl.h b/nestkernel/model_manager_impl.h index d9e8d8e6c7..e69de29bb2 100644 --- a/nestkernel/model_manager_impl.h +++ b/nestkernel/model_manager_impl.h @@ -1,126 +0,0 @@ -/* - * model_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef MODEL_MANAGER_IMPL_H -#define MODEL_MANAGER_IMPL_H - -#include "model_manager.h" - -// Includes from libnestutil: -#include "compose.hpp" -#include "string_utils.h" - -// Includes from nestkernel: -#include "connection_label.h" -#include "kernel_manager.h" -#include "nest.h" -#include "target_identifier.h" - - -namespace nest -{ - -template < class ModelT > -size_t -ModelManager::register_node_model( const Name& name, std::string deprecation_info ) -{ - if ( modeldict_->known( name ) ) - { - std::string msg = String::compose( "A model called '%1' already exists. Please choose a different name!", name ); - throw NamingConflict( msg ); - } - - Model* model = new GenericModel< ModelT >( name.toString(), deprecation_info ); - return register_node_model_( model ); -} - -template < template < typename targetidentifierT > class ConnectionT > -void -ModelManager::register_connection_model( const std::string& name ) -{ - // Required to check which variants to create - ConnectorModel const* const dummy_model = - new GenericConnectorModel< ConnectionT< TargetIdentifierPtrRport > >( "dummy" ); - - register_specific_connection_model_< ConnectionT< TargetIdentifierPtrRport > >( name ); - if ( dummy_model->has_property( ConnectionModelProperties::SUPPORTS_HPC ) ) - { - register_specific_connection_model_< ConnectionT< TargetIdentifierIndex > >( name + "_hpc" ); - } - if ( dummy_model->has_property( ConnectionModelProperties::SUPPORTS_LBL ) ) - { - register_specific_connection_model_< ConnectionLabel< ConnectionT< TargetIdentifierPtrRport > > >( name + "_lbl" ); - } - - delete dummy_model; -} - -template < typename CompleteConnectionT > -void -ModelManager::register_specific_connection_model_( const std::string& name ) -{ - kernel().vp_manager.assert_single_threaded(); - - if ( synapsedict_->known( name ) ) - { - std::string msg = - String::compose( "A synapse type called '%1' already exists.\nPlease choose a different name!", name ); - throw NamingConflict( msg ); - } - - const auto new_syn_id = get_num_connection_models(); - if ( new_syn_id >= invalid_synindex ) - { - const std::string msg = String::compose( - "CopyModel cannot generate another synapse. Maximal synapse model count of %1 exceeded.", MAX_SYN_ID ); - LOG( M_ERROR, "ModelManager::copy_connection_model_", msg ); - throw KernelException( "Synapse model count exceeded" ); - } - - synapsedict_->insert( name, new_syn_id ); - -#pragma omp parallel - { - ConnectorModel* conn_model = new GenericConnectorModel< CompleteConnectionT >( name ); - conn_model->set_syn_id( new_syn_id ); - if ( not conn_model->has_property( ConnectionModelProperties::IS_PRIMARY ) ) - { - conn_model->get_secondary_event()->add_syn_id( new_syn_id ); - } - connection_models_.at( kernel().vp_manager.get_thread_id() ).push_back( conn_model ); - kernel().connection_manager.resize_connections(); - } // end of parallel section -} - -inline Node* -ModelManager::get_proxy_node( size_t tid, size_t node_id ) -{ - const int model_id = kernel().modelrange_manager.get_model_id( node_id ); - Node* proxy = proxy_nodes_[ tid ].at( model_id ); - proxy->set_node_id_( node_id ); - proxy->set_vp( kernel().vp_manager.node_id_to_vp( node_id ) ); - return proxy; -} - -} // namespace nest - -#endif /* #ifndef MODEL_MANAGER_IMPL_H */ diff --git a/nestkernel/modelrange_manager.cpp b/nestkernel/modelrange_manager.cpp index 9b33323a87..29b2ac09b3 100644 --- a/nestkernel/modelrange_manager.cpp +++ b/nestkernel/modelrange_manager.cpp @@ -28,6 +28,7 @@ // Includes from nestkernel: #include "kernel_manager.h" #include "model.h" +#include "model_manager.h" namespace nest diff --git a/nestkernel/mpi_manager.cpp b/nestkernel/mpi_manager.cpp index 5da18cc3ac..48b2aab7cd 100644 --- a/nestkernel/mpi_manager.cpp +++ b/nestkernel/mpi_manager.cpp @@ -26,16 +26,23 @@ #include // Includes from libnestutil: -#include "stopwatch_impl.h" // Includes from nestkernel: #include "kernel_manager.h" -#include "mpi_manager_impl.h" #include "nest_types.h" +#include "logging.h" +#include "logging_manager.h" +#include "music_manager.h" +#include "nest_names.h" +#include "stopwatch.h" + // Includes from sli: #include "dictutils.h" +namespace nest +{ + #ifdef HAVE_MPI template <> @@ -1110,3 +1117,4 @@ nest::MPIManager::communicate_recv_counts_secondary_events() } #endif /* #ifdef HAVE_MPI */ +} // namespace nest diff --git a/nestkernel/mpi_manager.h b/nestkernel/mpi_manager.h index a8f4fcea7e..f70bb09623 100644 --- a/nestkernel/mpi_manager.h +++ b/nestkernel/mpi_manager.h @@ -35,16 +35,15 @@ // C++ includes: #include #include -#include #include #include #include // Includes from libnestutil: #include "manager_interface.h" -#include "stopwatch.h" // Includes from nestkernel: +#include "kernel_manager.h" #include "nest_types.h" #include "spike_data.h" #include "target_data.h" @@ -104,11 +103,6 @@ class MPIManager : public ManagerInterface */ size_t get_process_id_of_vp( const size_t vp ) const; - /* - * Return the process id of the node with the specified node ID. - */ - size_t get_process_id_of_node_id( const size_t node_id ) const; - /** * Finalize MPI communication (needs to be separate from MPIManager::finalize * when compiled with MUSIC since spikes can arrive and handlers called here) @@ -762,6 +756,48 @@ MPIManager::communicate_off_grid_spike_data_Alltoall( std::vector< D >& send_buf communicate_Alltoall( send_buffer, recv_buffer, send_recv_count_off_grid_spike_data_in_int_per_rank ); } + +inline size_t +nest::MPIManager::get_process_id_of_vp( const size_t vp ) const +{ + return vp % num_processes_; +} + +#ifdef HAVE_MPI + +// Variable to hold the MPI communicator to use. +#ifdef HAVE_MUSIC +extern MPI::Intracomm comm; +#else /* #ifdef HAVE_MUSIC */ +extern MPI_Comm comm; +#endif /* #ifdef HAVE_MUSIC */ + +template < typename T > +struct MPI_Type +{ + static MPI_Datatype type; +}; + +template < typename T > +void +nest::MPIManager::communicate_Allgatherv( std::vector< T >& send_buffer, + std::vector< T >& recv_buffer, + std::vector< int >& displacements, + std::vector< int >& recv_counts ) +{ + // attempt Allgather + MPI_Allgatherv( &( *send_buffer.begin() ), + send_buffer.size(), + MPI_Type< T >::type, + &recv_buffer[ 0 ], + &recv_counts[ 0 ], + &displacements[ 0 ], + MPI_Type< T >::type, + comm ); +} + +#endif /* HAVE_MPI */ + } #endif /* MPI_MANAGER_H */ diff --git a/nestkernel/mpi_manager_impl.h b/nestkernel/mpi_manager_impl.h deleted file mode 100644 index 1f596adc3c..0000000000 --- a/nestkernel/mpi_manager_impl.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * mpi_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef MPI_MANAGER_IMPL_H -#define MPI_MANAGER_IMPL_H - -#include "config.h" - -#ifdef HAVE_MPI -// C includes: -#include -#endif /* #ifdef HAVE_MPI */ - -#include "mpi_manager.h" - -// Includes from nestkernel: -#include "kernel_manager.h" - -inline size_t -nest::MPIManager::get_process_id_of_vp( const size_t vp ) const -{ - return vp % num_processes_; -} - -#ifdef HAVE_MPI - -// Variable to hold the MPI communicator to use. -#ifdef HAVE_MUSIC -extern MPI::Intracomm comm; -#else /* #ifdef HAVE_MUSIC */ -extern MPI_Comm comm; -#endif /* #ifdef HAVE_MUSIC */ - -template < typename T > -struct MPI_Type -{ - static MPI_Datatype type; -}; - -template < typename T > -void -nest::MPIManager::communicate_Allgatherv( std::vector< T >& send_buffer, - std::vector< T >& recv_buffer, - std::vector< int >& displacements, - std::vector< int >& recv_counts ) -{ - // attempt Allgather - MPI_Allgatherv( &( *send_buffer.begin() ), - send_buffer.size(), - MPI_Type< T >::type, - &recv_buffer[ 0 ], - &recv_counts[ 0 ], - &displacements[ 0 ], - MPI_Type< T >::type, - comm ); -} - -inline size_t -nest::MPIManager::get_process_id_of_node_id( const size_t node_id ) const -{ - return node_id % kernel().vp_manager.get_num_virtual_processes() % num_processes_; -} - -#else // HAVE_MPI - - -inline size_t -nest::MPIManager::get_process_id_of_node_id( const size_t ) const -{ - return 0; -} - -#endif /* HAVE_MPI */ - -#endif /* MPI_MANAGER_IMPL_H */ diff --git a/nestkernel/music_event_handler.cpp b/nestkernel/music_event_handler.cpp index 9f0a7dc616..1407adea8a 100644 --- a/nestkernel/music_event_handler.cpp +++ b/nestkernel/music_event_handler.cpp @@ -31,6 +31,8 @@ // Includes from nestkernel: #include "event.h" #include "kernel_manager.h" +#include "logging_manager.h" +#include "music_manager.h" #include "nest_types.h" namespace nest diff --git a/nestkernel/music_manager.cpp b/nestkernel/music_manager.cpp index ec237eca7d..deedb47277 100644 --- a/nestkernel/music_manager.cpp +++ b/nestkernel/music_manager.cpp @@ -30,7 +30,10 @@ #endif // Includes from nestkernel: +#include "compose.hpp" #include "kernel_manager.h" +#include "logging.h" +#include "logging_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/music_rate_in_handler.cpp b/nestkernel/music_rate_in_handler.cpp index 0ebfe55f41..55a4089858 100644 --- a/nestkernel/music_rate_in_handler.cpp +++ b/nestkernel/music_rate_in_handler.cpp @@ -29,8 +29,10 @@ #include "logging.h" // Includes from nestkernel: -#include "event.h" +#include "connection_manager.h" #include "kernel_manager.h" +#include "logging_manager.h" +#include "music_manager.h" #include "nest_types.h" namespace nest diff --git a/nestkernel/nest.cpp b/nestkernel/nest.cpp index 2add49bcb6..d050d932bb 100644 --- a/nestkernel/nest.cpp +++ b/nestkernel/nest.cpp @@ -26,10 +26,17 @@ #include // Includes from nestkernel: +#include "connection_manager.h" #include "exceptions.h" +#include "io_manager.h" #include "kernel_manager.h" -#include "mpi_manager_impl.h" +#include "logging_manager.h" +#include "model_manager.h" +#include "node_manager.h" #include "parameter.h" +#include "random_manager.h" +#include "simulation_manager.h" +#include "sp_manager.h" // Includes from sli: #include "sliexceptions.h" diff --git a/nestkernel/nest.h b/nestkernel/nest.h index e49c7dcb5e..5bd652200b 100644 --- a/nestkernel/nest.h +++ b/nestkernel/nest.h @@ -23,17 +23,13 @@ #ifndef NEST_H #define NEST_H -// C++ includes: -#include - // Includes from libnestutil: -#include "enum_bitfield.h" #include "logging.h" // Includes from nestkernel: +#include "kernel_manager.h" +#include "model_manager.h" #include "nest_datums.h" -#include "nest_time.h" -#include "nest_types.h" // Includes from sli: #include "arraydatum.h" @@ -190,6 +186,20 @@ std::vector< double > apply( const ParameterDatum& param, const DictionaryDatum& Datum* node_collection_array_index( const Datum* datum, const long* array, unsigned long n ); Datum* node_collection_array_index( const Datum* datum, const bool* array, unsigned long n ); +template < template < typename > class ConnectorModelT > +void +register_connection_model( const std::string& name ) +{ + kernel().model_manager.register_connection_model< ConnectorModelT >( name ); +} + +template < typename NodeModelT > +void +register_node_model( const std::string& name, std::string deprecation_info ) +{ + kernel().model_manager.register_node_model< NodeModelT >( name, deprecation_info ); +} + } diff --git a/nestkernel/nest_extension_interface.h b/nestkernel/nest_extension_interface.h index ff4d519a21..624317dbe1 100644 --- a/nestkernel/nest_extension_interface.h +++ b/nestkernel/nest_extension_interface.h @@ -26,19 +26,12 @@ // Includes from nestkernel; placed here so module developer does not need to // include them manually #include "config.h" -#include "connection_manager_impl.h" -#include "connector_model_impl.h" #include "exceptions.h" #include "genericmodel.h" -#include "genericmodel_impl.h" -#include "io_manager_impl.h" #include "kernel_manager.h" #include "model.h" -#include "model_manager_impl.h" #include "nest.h" -#include "nest_impl.h" #include "nestmodule.h" -#include "sp_manager_impl.h" #include "target_identifier.h" // C++ includes diff --git a/nestkernel/nestmodule.cpp b/nestkernel/nestmodule.cpp index 77e8d3ae7d..7a7f3449ce 100644 --- a/nestkernel/nestmodule.cpp +++ b/nestkernel/nestmodule.cpp @@ -29,23 +29,18 @@ #include "logging.h" // Includes from nestkernel: -#include "conn_builder.h" -#include "connection_creator_impl.h" #include "free_layer.h" -#include "genericmodel.h" -#include "grid_layer.h" #include "grid_mask.h" #include "kernel_manager.h" -#include "layer.h" #include "layer_impl.h" +#include "logging_manager.h" #include "mask.h" -#include "mask_impl.h" -#include "model_manager_impl.h" +#include "module_manager.h" +#include "music_manager.h" #include "nest.h" #include "nest_datums.h" -#include "nest_types.h" -#include "node.h" #include "parameter.h" +#include "sp_manager.h" #include "spatial.h" // Includes from sli: diff --git a/nestkernel/node.cpp b/nestkernel/node.cpp index 6f54cc1075..602c6431a9 100644 --- a/nestkernel/node.cpp +++ b/nestkernel/node.cpp @@ -29,6 +29,8 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" +#include "model_manager.h" +#include "node_manager.h" // Includes from sli: #include "arraydatum.h" diff --git a/nestkernel/node.h b/nestkernel/node.h index 5c41113f43..a6c4504747 100644 --- a/nestkernel/node.h +++ b/nestkernel/node.h @@ -26,9 +26,7 @@ // C++ includes: #include #include -#include #include -#include #include // Includes from nestkernel: diff --git a/nestkernel/node_collection.cpp b/nestkernel/node_collection.cpp index b4323921f4..57a75c908b 100644 --- a/nestkernel/node_collection.cpp +++ b/nestkernel/node_collection.cpp @@ -27,14 +27,14 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "mpi_manager_impl.h" +#include "model_manager.h" +#include "modelrange_manager.h" #include "node.h" -#include "vp_manager_impl.h" // C++ includes: +#include "numeric" // accumulate #include // copy #include // lcm -#include // accumulate namespace nest diff --git a/nestkernel/node_manager.cpp b/nestkernel/node_manager.cpp index ca5f28ed98..1de01dba85 100644 --- a/nestkernel/node_manager.cpp +++ b/nestkernel/node_manager.cpp @@ -31,13 +31,15 @@ #include "logging.h" // Includes from nestkernel: +#include "connection_manager.h" +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "logging_manager.h" #include "model.h" -#include "model_manager_impl.h" +#include "model_manager.h" +#include "modelrange_manager.h" #include "node.h" -#include "secondary_event_impl.h" #include "vp_manager.h" -#include "vp_manager_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/ntree.h b/nestkernel/ntree.h index 256f16deec..14e38df718 100644 --- a/nestkernel/ntree.h +++ b/nestkernel/ntree.h @@ -486,6 +486,501 @@ Ntree< D, T, max_capacity, max_depth >::insert( iterator, const std::pair< Posit return insert( val.first, val.second ); } +template < int D, class T, int max_capacity, int max_depth > +Ntree< D, T, max_capacity, max_depth >::iterator::iterator( Ntree& q ) + : ntree_( &q ) + , top_( &q ) + , node_( 0 ) +{ + // First leaf + while ( not ntree_->is_leaf() ) + { + ntree_ = ntree_->children_[ 0 ]; + } + // Find the first non-empty leaf + while ( ntree_->nodes_.empty() ) + { + + next_leaf_(); + if ( ntree_ == 0 ) + { + break; + } + } +} + +template < int D, class T, int max_capacity, int max_depth > +typename Ntree< D, T, max_capacity, max_depth >::iterator& +Ntree< D, T, max_capacity, max_depth >::iterator::operator++() +{ + node_++; + + while ( node_ >= ntree_->nodes_.size() ) + { + + next_leaf_(); + + node_ = 0; + if ( ntree_ == 0 ) + { + break; + } + } + + return *this; +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::iterator::next_leaf_() +{ + + // If we are on the last subntree, move up + while ( ntree_ and ( ntree_ != top_ ) and ntree_->my_subquad_ == N - 1 ) + { + ntree_ = ntree_->parent_; + } + + // Since we stop at the top, this should never happen! + assert( ntree_ != 0 ); + + // If we have reached the top, mark as invalid and return + if ( ntree_ == top_ ) + { + ntree_ = 0; + return; + } + + // Move to next sibling + ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; + + // Move down if this is not a leaf. + while ( not ntree_->is_leaf() ) + { + ntree_ = ntree_->children_[ 0 ]; + } +} + +// Proper mod which returns non-negative numbers +static inline double +mod( double x, double p ) +{ + x = std::fmod( x, p ); + if ( x < 0 ) + { + x += p; + } + return x; +} + +template < int D, class T, int max_capacity, int max_depth > +Ntree< D, T, max_capacity, max_depth >::masked_iterator::masked_iterator( Ntree< D, T, max_capacity, max_depth >& q, + const Mask< D >& mask, + const Position< D >& anchor ) + : ntree_( &q ) + , top_( &q ) + , allin_top_( 0 ) + , node_( 0 ) + , mask_( &mask ) + , anchor_( anchor ) + , anchors_() + , current_anchor_( 0 ) +{ + if ( ntree_->periodic_.any() ) + { + Box< D > mask_bb = mask_->get_bbox(); + + // Move lower left corner of mask into main image of layer + for ( int i = 0; i < D; ++i ) + { + if ( ntree_->periodic_[ i ] ) + { + anchor_[ i ] = + nest::mod( anchor_[ i ] + mask_bb.lower_left[ i ] - ntree_->lower_left_[ i ], ntree_->extent_[ i ] ) + - mask_bb.lower_left[ i ] + ntree_->lower_left_[ i ]; + } + } + anchors_.push_back( anchor_ ); + + // Add extra anchors for each dimension where this is needed + // (Assumes that the mask is not wider than the layer) + for ( int i = 0; i < D; ++i ) + { + if ( ntree_->periodic_[ i ] ) + { + int n = anchors_.size(); + if ( ( anchor_[ i ] + mask_bb.upper_right[ i ] - ntree_->lower_left_[ i ] ) > ntree_->extent_[ i ] ) + { + for ( int j = 0; j < n; ++j ) + { + Position< D > p = anchors_[ j ]; + p[ i ] -= ntree_->extent_[ i ]; + anchors_.push_back( p ); + } + } + } + } + } + + init_(); +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::masked_iterator::init_() +{ + node_ = 0; + allin_top_ = 0; + ntree_ = top_; + + if ( mask_->outside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) + { + + next_anchor_(); + } + else + { + + if ( mask_->inside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) + { + first_leaf_inside_(); + } + else + { + first_leaf_(); + } + + if ( ntree_->nodes_.empty() or ( not mask_->inside( ntree_->nodes_[ node_ ].first - anchor_ ) ) ) + { + ++( *this ); + } + } +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::masked_iterator::next_anchor_() +{ + ++current_anchor_; + if ( current_anchor_ >= anchors_.size() ) + { + // Done. Mark as invalid. + ntree_ = 0; + node_ = 0; + } + else + { + anchor_ = anchors_[ current_anchor_ ]; + init_(); + } +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::masked_iterator::next_leaf_() +{ + + // There are two states: the initial state, and "all in". In the + // all in state, we are in a subtree which is completely inside + // the mask. The allin_top_ is the top of this subtree. When + // exiting the subtree, the state changes to the initial + // state. In the initial state, we must check each quadrant to + // see if it is completely inside or outside the mask. If inside, + // we go all in. If outside, we move on to the next leaf. If + // neither, keep going until we find a leaf. Upon exiting from + // this function, we are either done (ntree_==0), or on a leaf + // node which at least intersects with the mask. If allin_top_!=0, + // the leaf is completely inside the mask. + + if ( allin_top_ ) + { + // state: all in + + // If we are on the last subtree, move up + while ( ntree_ and ( ntree_ != allin_top_ ) and ntree_->my_subquad_ == N - 1 ) + { + ntree_ = ntree_->parent_; + } + + // Since we stop at the top, this should never happen! + assert( ntree_ != 0 ); + + // If we reached the allin_top_, we are no longer all in. + if ( ntree_ != allin_top_ ) + { + + // Move to next sibling + ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; + + // Move down if this is not a leaf. + while ( not ntree_->is_leaf() ) + { + ntree_ = ntree_->children_[ 0 ]; + } + return; + } + + allin_top_ = 0; + // Will continue as not all in. + } + + // state: Not all in + + do + { + + // If we are on the last subtree, move up + while ( ntree_ and ( ntree_ != top_ ) and ntree_->my_subquad_ == N - 1 ) + { + ntree_ = ntree_->parent_; + } + + // Since we stop at the top, this should never happen! + assert( ntree_ != 0 ); + + // If we have reached the top, mark as invalid and return + if ( ntree_ == top_ ) + { + return next_anchor_(); + } + + // Move to next sibling + ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; + // Create anchored position in two steps to avoid creating a new Position object. + anchored_position_ = ntree_->lower_left_; + anchored_position_ -= anchor_; + + if ( mask_->inside( Box< D >( anchored_position_, anchored_position_ + ntree_->extent_ ) ) ) + { + return first_leaf_inside_(); + } + + } while ( mask_->outside( Box< D >( anchored_position_, anchored_position_ + ntree_->extent_ ) ) ); + + return first_leaf_(); +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::masked_iterator::first_leaf_() +{ + while ( not ntree_->is_leaf() ) + { + + ntree_ = ntree_->children_[ 0 ]; + + if ( mask_->inside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) + { + return first_leaf_inside_(); + } + + if ( mask_->outside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) + { + return next_leaf_(); + } + } +} + + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::masked_iterator::first_leaf_inside_() +{ + + allin_top_ = ntree_; + + while ( not ntree_->is_leaf() ) + { + ntree_ = ntree_->children_[ 0 ]; + } +} + +template < int D, class T, int max_capacity, int max_depth > +typename Ntree< D, T, max_capacity, max_depth >::masked_iterator& +Ntree< D, T, max_capacity, max_depth >::masked_iterator::operator++() +{ + ++node_; + + if ( allin_top_ == 0 ) + { + while ( + ( node_ < ntree_->nodes_.size() ) and ( not anchored_position_inside_mask( ntree_->nodes_[ node_ ].first ) ) ) + { + ++node_; + } + } + + while ( node_ >= ntree_->nodes_.size() ) + { + next_leaf_(); + + node_ = 0; + if ( ntree_ == 0 ) + { + break; + } + + if ( allin_top_ == 0 ) + { + while ( + ( node_ < ntree_->nodes_.size() ) and ( not anchored_position_inside_mask( ntree_->nodes_[ node_ ].first ) ) ) + { + ++node_; + } + } + } + + return *this; +} + +template < int D, class T, int max_capacity, int max_depth > +int +Ntree< D, T, max_capacity, max_depth >::subquad_( const Position< D >& pos ) +{ + int r = 0; + for ( int i = 0; i < D; ++i ) + { + // Comparing against an epsilon value in case there are round-off errors. + // Using a negative epsilon value because the round-off error may go both ways + // and the difference we check against may therefore be +/- 10^-16. + const bool in_left_half = + ( ( lower_left_[ i ] + extent_[ i ] / 2 ) - pos[ i ] ) > -std::numeric_limits< double >::epsilon(); + r += ( 1 << i ) * ( in_left_half ? 0 : 1 ); + } + + return r; +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::append_nodes_( std::vector< std::pair< Position< D >, T > >& v ) +{ + if ( leaf_ ) + { + std::copy( nodes_.begin(), nodes_.end(), std::back_inserter( v ) ); + } + else + { + for ( int i = 0; i < N; ++i ) + { + children_[ i ]->append_nodes_( v ); + } + } +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::append_nodes_( std::vector< std::pair< Position< D >, T > >& v, + const Mask< D >& mask, + const Position< D >& anchor ) +{ + if ( mask.outside( Box< D >( lower_left_ - anchor, lower_left_ - anchor + extent_ ) ) ) + { + return; + } + if ( mask.inside( Box< D >( lower_left_ - anchor, lower_left_ - anchor + extent_ ) ) ) + { + return append_nodes_( v ); + } + if ( leaf_ ) + { + + for ( typename std::vector< std::pair< Position< D >, T > >::iterator i = nodes_.begin(); i != nodes_.end(); ++i ) + { + if ( mask.inside( i->first - anchor ) ) + { + v.push_back( *i ); + } + } + } + else + { + for ( int i = 0; i < N; ++i ) + { + children_[ i ]->append_nodes_( v, mask, anchor ); + } + } +} + +template < int D, class T, int max_capacity, int max_depth > +typename Ntree< D, T, max_capacity, max_depth >::iterator +Ntree< D, T, max_capacity, max_depth >::insert( Position< D > pos, const T& node ) +{ + if ( periodic_.any() ) + { + // Map position into standard range when using periodic b.c. Only necessary when + // inserting positions during source driven connect when target has periodic b.c. + // May be inefficient. + for ( int i = 0; i < D; ++i ) + { + if ( periodic_[ i ] ) + { + pos[ i ] = lower_left_[ i ] + std::fmod( pos[ i ] - lower_left_[ i ], extent_[ i ] ); + if ( pos[ i ] < lower_left_[ i ] ) + { + pos[ i ] += extent_[ i ]; + } + } + } + } + + if ( leaf_ and ( nodes_.size() >= max_capacity ) and my_depth_ < max_depth ) + { + split_(); + } + if ( leaf_ ) + { + + for ( int i = 0; i < D; ++i ) + { + // Comparing against an epsilon value in case there are round-off errors. + // Using a negative epsilon value because the round-off error may go both ways + // and the difference we check against may therefore be +/- 10^-16. + assert( ( pos - lower_left_ )[ i ] > -std::numeric_limits< double >::epsilon() + and ( lower_left_ + extent_ - pos )[ i ] > -std::numeric_limits< double >::epsilon() ); + } + + nodes_.push_back( std::pair< Position< D >, T >( pos, node ) ); + + return iterator( *this, nodes_.size() - 1 ); + } + else + { + + return children_[ subquad_( pos ) ]->insert( pos, node ); + } +} + +template < int D, class T, int max_capacity, int max_depth > +void +Ntree< D, T, max_capacity, max_depth >::split_() +{ + assert( leaf_ ); + + for ( int j = 0; j < N; ++j ) + { + Position< D > lower_left = lower_left_; + for ( int i = 0; i < D; ++i ) + { + if ( j & ( 1 << i ) ) + { + lower_left[ i ] += extent_[ i ] * 0.5; + } + } + + children_[ j ] = new Ntree< D, T, max_capacity, max_depth >( lower_left, extent_ * 0.5, 0, this, j ); + } + + for ( typename std::vector< std::pair< Position< D >, T > >::iterator i = nodes_.begin(); i != nodes_.end(); ++i ) + { + children_[ subquad_( i->first ) ]->insert( i->first, i->second ); + } + + nodes_.clear(); + + leaf_ = false; +} + } // namespace nest #endif diff --git a/nestkernel/ntree_impl.h b/nestkernel/ntree_impl.h deleted file mode 100644 index a478097df9..0000000000 --- a/nestkernel/ntree_impl.h +++ /dev/null @@ -1,532 +0,0 @@ -/* - * ntree_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef NTREE_IMPL_H -#define NTREE_IMPL_H - -#include - -#include "ntree.h" - -// Includes from spatial: -#include "mask.h" - -namespace nest -{ - -template < int D, class T, int max_capacity, int max_depth > -Ntree< D, T, max_capacity, max_depth >::iterator::iterator( Ntree& q ) - : ntree_( &q ) - , top_( &q ) - , node_( 0 ) -{ - // First leaf - while ( not ntree_->is_leaf() ) - { - ntree_ = ntree_->children_[ 0 ]; - } - // Find the first non-empty leaf - while ( ntree_->nodes_.empty() ) - { - - next_leaf_(); - if ( ntree_ == 0 ) - { - break; - } - } -} - -template < int D, class T, int max_capacity, int max_depth > -typename Ntree< D, T, max_capacity, max_depth >::iterator& -Ntree< D, T, max_capacity, max_depth >::iterator::operator++() -{ - node_++; - - while ( node_ >= ntree_->nodes_.size() ) - { - - next_leaf_(); - - node_ = 0; - if ( ntree_ == 0 ) - { - break; - } - } - - return *this; -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::iterator::next_leaf_() -{ - - // If we are on the last subntree, move up - while ( ntree_ and ( ntree_ != top_ ) and ntree_->my_subquad_ == N - 1 ) - { - ntree_ = ntree_->parent_; - } - - // Since we stop at the top, this should never happen! - assert( ntree_ != 0 ); - - // If we have reached the top, mark as invalid and return - if ( ntree_ == top_ ) - { - ntree_ = 0; - return; - } - - // Move to next sibling - ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; - - // Move down if this is not a leaf. - while ( not ntree_->is_leaf() ) - { - ntree_ = ntree_->children_[ 0 ]; - } -} - -// Proper mod which returns non-negative numbers -static inline double -mod( double x, double p ) -{ - x = std::fmod( x, p ); - if ( x < 0 ) - { - x += p; - } - return x; -} - -template < int D, class T, int max_capacity, int max_depth > -Ntree< D, T, max_capacity, max_depth >::masked_iterator::masked_iterator( Ntree< D, T, max_capacity, max_depth >& q, - const Mask< D >& mask, - const Position< D >& anchor ) - : ntree_( &q ) - , top_( &q ) - , allin_top_( 0 ) - , node_( 0 ) - , mask_( &mask ) - , anchor_( anchor ) - , anchors_() - , current_anchor_( 0 ) -{ - if ( ntree_->periodic_.any() ) - { - Box< D > mask_bb = mask_->get_bbox(); - - // Move lower left corner of mask into main image of layer - for ( int i = 0; i < D; ++i ) - { - if ( ntree_->periodic_[ i ] ) - { - anchor_[ i ] = - nest::mod( anchor_[ i ] + mask_bb.lower_left[ i ] - ntree_->lower_left_[ i ], ntree_->extent_[ i ] ) - - mask_bb.lower_left[ i ] + ntree_->lower_left_[ i ]; - } - } - anchors_.push_back( anchor_ ); - - // Add extra anchors for each dimension where this is needed - // (Assumes that the mask is not wider than the layer) - for ( int i = 0; i < D; ++i ) - { - if ( ntree_->periodic_[ i ] ) - { - int n = anchors_.size(); - if ( ( anchor_[ i ] + mask_bb.upper_right[ i ] - ntree_->lower_left_[ i ] ) > ntree_->extent_[ i ] ) - { - for ( int j = 0; j < n; ++j ) - { - Position< D > p = anchors_[ j ]; - p[ i ] -= ntree_->extent_[ i ]; - anchors_.push_back( p ); - } - } - } - } - } - - init_(); -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::masked_iterator::init_() -{ - node_ = 0; - allin_top_ = 0; - ntree_ = top_; - - if ( mask_->outside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) - { - - next_anchor_(); - } - else - { - - if ( mask_->inside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) - { - first_leaf_inside_(); - } - else - { - first_leaf_(); - } - - if ( ntree_->nodes_.empty() or ( not mask_->inside( ntree_->nodes_[ node_ ].first - anchor_ ) ) ) - { - ++( *this ); - } - } -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::masked_iterator::next_anchor_() -{ - ++current_anchor_; - if ( current_anchor_ >= anchors_.size() ) - { - // Done. Mark as invalid. - ntree_ = 0; - node_ = 0; - } - else - { - anchor_ = anchors_[ current_anchor_ ]; - init_(); - } -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::masked_iterator::next_leaf_() -{ - - // There are two states: the initial state, and "all in". In the - // all in state, we are in a subtree which is completely inside - // the mask. The allin_top_ is the top of this subtree. When - // exiting the subtree, the state changes to the initial - // state. In the initial state, we must check each quadrant to - // see if it is completely inside or outside the mask. If inside, - // we go all in. If outside, we move on to the next leaf. If - // neither, keep going until we find a leaf. Upon exiting from - // this function, we are either done (ntree_==0), or on a leaf - // node which at least intersects with the mask. If allin_top_!=0, - // the leaf is completely inside the mask. - - if ( allin_top_ ) - { - // state: all in - - // If we are on the last subtree, move up - while ( ntree_ and ( ntree_ != allin_top_ ) and ntree_->my_subquad_ == N - 1 ) - { - ntree_ = ntree_->parent_; - } - - // Since we stop at the top, this should never happen! - assert( ntree_ != 0 ); - - // If we reached the allin_top_, we are no longer all in. - if ( ntree_ != allin_top_ ) - { - - // Move to next sibling - ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; - - // Move down if this is not a leaf. - while ( not ntree_->is_leaf() ) - { - ntree_ = ntree_->children_[ 0 ]; - } - return; - } - - allin_top_ = 0; - // Will continue as not all in. - } - - // state: Not all in - - do - { - - // If we are on the last subtree, move up - while ( ntree_ and ( ntree_ != top_ ) and ntree_->my_subquad_ == N - 1 ) - { - ntree_ = ntree_->parent_; - } - - // Since we stop at the top, this should never happen! - assert( ntree_ != 0 ); - - // If we have reached the top, mark as invalid and return - if ( ntree_ == top_ ) - { - return next_anchor_(); - } - - // Move to next sibling - ntree_ = ntree_->parent_->children_[ ntree_->my_subquad_ + 1 ]; - // Create anchored position in two steps to avoid creating a new Position object. - anchored_position_ = ntree_->lower_left_; - anchored_position_ -= anchor_; - - if ( mask_->inside( Box< D >( anchored_position_, anchored_position_ + ntree_->extent_ ) ) ) - { - return first_leaf_inside_(); - } - - } while ( mask_->outside( Box< D >( anchored_position_, anchored_position_ + ntree_->extent_ ) ) ); - - return first_leaf_(); -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::masked_iterator::first_leaf_() -{ - while ( not ntree_->is_leaf() ) - { - - ntree_ = ntree_->children_[ 0 ]; - - if ( mask_->inside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) - { - return first_leaf_inside_(); - } - - if ( mask_->outside( Box< D >( ntree_->lower_left_ - anchor_, ntree_->lower_left_ - anchor_ + ntree_->extent_ ) ) ) - { - return next_leaf_(); - } - } -} - - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::masked_iterator::first_leaf_inside_() -{ - - allin_top_ = ntree_; - - while ( not ntree_->is_leaf() ) - { - ntree_ = ntree_->children_[ 0 ]; - } -} - -template < int D, class T, int max_capacity, int max_depth > -typename Ntree< D, T, max_capacity, max_depth >::masked_iterator& -Ntree< D, T, max_capacity, max_depth >::masked_iterator::operator++() -{ - ++node_; - - if ( allin_top_ == 0 ) - { - while ( - ( node_ < ntree_->nodes_.size() ) and ( not anchored_position_inside_mask( ntree_->nodes_[ node_ ].first ) ) ) - { - ++node_; - } - } - - while ( node_ >= ntree_->nodes_.size() ) - { - next_leaf_(); - - node_ = 0; - if ( ntree_ == 0 ) - { - break; - } - - if ( allin_top_ == 0 ) - { - while ( - ( node_ < ntree_->nodes_.size() ) and ( not anchored_position_inside_mask( ntree_->nodes_[ node_ ].first ) ) ) - { - ++node_; - } - } - } - - return *this; -} - -template < int D, class T, int max_capacity, int max_depth > -int -Ntree< D, T, max_capacity, max_depth >::subquad_( const Position< D >& pos ) -{ - int r = 0; - for ( int i = 0; i < D; ++i ) - { - // Comparing against an epsilon value in case there are round-off errors. - // Using a negative epsilon value because the round-off error may go both ways - // and the difference we check against may therefore be +/- 10^-16. - const bool in_left_half = - ( ( lower_left_[ i ] + extent_[ i ] / 2 ) - pos[ i ] ) > -std::numeric_limits< double >::epsilon(); - r += ( 1 << i ) * ( in_left_half ? 0 : 1 ); - } - - return r; -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::append_nodes_( std::vector< std::pair< Position< D >, T > >& v ) -{ - if ( leaf_ ) - { - std::copy( nodes_.begin(), nodes_.end(), std::back_inserter( v ) ); - } - else - { - for ( int i = 0; i < N; ++i ) - { - children_[ i ]->append_nodes_( v ); - } - } -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::append_nodes_( std::vector< std::pair< Position< D >, T > >& v, - const Mask< D >& mask, - const Position< D >& anchor ) -{ - if ( mask.outside( Box< D >( lower_left_ - anchor, lower_left_ - anchor + extent_ ) ) ) - { - return; - } - if ( mask.inside( Box< D >( lower_left_ - anchor, lower_left_ - anchor + extent_ ) ) ) - { - return append_nodes_( v ); - } - if ( leaf_ ) - { - - for ( typename std::vector< std::pair< Position< D >, T > >::iterator i = nodes_.begin(); i != nodes_.end(); ++i ) - { - if ( mask.inside( i->first - anchor ) ) - { - v.push_back( *i ); - } - } - } - else - { - for ( int i = 0; i < N; ++i ) - { - children_[ i ]->append_nodes_( v, mask, anchor ); - } - } -} - -template < int D, class T, int max_capacity, int max_depth > -typename Ntree< D, T, max_capacity, max_depth >::iterator -Ntree< D, T, max_capacity, max_depth >::insert( Position< D > pos, const T& node ) -{ - if ( periodic_.any() ) - { - // Map position into standard range when using periodic b.c. Only necessary when - // inserting positions during source driven connect when target has periodic b.c. - // May be inefficient. - for ( int i = 0; i < D; ++i ) - { - if ( periodic_[ i ] ) - { - pos[ i ] = lower_left_[ i ] + std::fmod( pos[ i ] - lower_left_[ i ], extent_[ i ] ); - if ( pos[ i ] < lower_left_[ i ] ) - { - pos[ i ] += extent_[ i ]; - } - } - } - } - - if ( leaf_ and ( nodes_.size() >= max_capacity ) and my_depth_ < max_depth ) - { - split_(); - } - if ( leaf_ ) - { - - for ( int i = 0; i < D; ++i ) - { - // Comparing against an epsilon value in case there are round-off errors. - // Using a negative epsilon value because the round-off error may go both ways - // and the difference we check against may therefore be +/- 10^-16. - assert( ( pos - lower_left_ )[ i ] > -std::numeric_limits< double >::epsilon() - and ( lower_left_ + extent_ - pos )[ i ] > -std::numeric_limits< double >::epsilon() ); - } - - nodes_.push_back( std::pair< Position< D >, T >( pos, node ) ); - - return iterator( *this, nodes_.size() - 1 ); - } - else - { - - return children_[ subquad_( pos ) ]->insert( pos, node ); - } -} - -template < int D, class T, int max_capacity, int max_depth > -void -Ntree< D, T, max_capacity, max_depth >::split_() -{ - assert( leaf_ ); - - for ( int j = 0; j < N; ++j ) - { - Position< D > lower_left = lower_left_; - for ( int i = 0; i < D; ++i ) - { - if ( j & ( 1 << i ) ) - { - lower_left[ i ] += extent_[ i ] * 0.5; - } - } - - children_[ j ] = new Ntree< D, T, max_capacity, max_depth >( lower_left, extent_ * 0.5, 0, this, j ); - } - - for ( typename std::vector< std::pair< Position< D >, T > >::iterator i = nodes_.begin(); i != nodes_.end(); ++i ) - { - children_[ subquad_( i->first ) ]->insert( i->first, i->second ); - } - - nodes_.clear(); - - leaf_ = false; -} -} - -#endif diff --git a/nestkernel/parameter.cpp b/nestkernel/parameter.cpp index f5d5301f35..3346b4101c 100644 --- a/nestkernel/parameter.cpp +++ b/nestkernel/parameter.cpp @@ -22,12 +22,11 @@ #include +#include "nest.h" #include "node.h" #include "node_collection.h" -#include "spatial.h" -#include "vp_manager_impl.h" - #include "parameter.h" +#include "spatial.h" namespace nest diff --git a/nestkernel/per_thread_bool_indicator.cpp b/nestkernel/per_thread_bool_indicator.cpp index 4032a7320f..29622d9db6 100644 --- a/nestkernel/per_thread_bool_indicator.cpp +++ b/nestkernel/per_thread_bool_indicator.cpp @@ -24,7 +24,8 @@ // Includes from nestkernel #include "kernel_manager.h" -#include "stopwatch_impl.h" + +#include "simulation_manager.h" namespace nest { @@ -65,7 +66,7 @@ PerThreadBoolIndicator::initialize( const size_t num_threads, const bool status bool PerThreadBoolIndicator::all_false() const { - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); // We need two barriers here to ensure that no thread can continue and change the result // before all threads have determined the result. #pragma omp barrier @@ -74,42 +75,42 @@ PerThreadBoolIndicator::all_false() const bool ret = ( are_true_ == 0 ); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::all_true() const { - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ == size_ ); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_false() const { - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ < size_ ); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_true() const { - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ > 0 ); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); return ret; } diff --git a/nestkernel/proxynode.cpp b/nestkernel/proxynode.cpp index af4b624a57..43488457a2 100644 --- a/nestkernel/proxynode.cpp +++ b/nestkernel/proxynode.cpp @@ -25,6 +25,7 @@ // Includes from nestkernel: #include "connection.h" #include "kernel_manager.h" +#include "model_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/random_manager.cpp b/nestkernel/random_manager.cpp index db0a58fd65..a7f3173f4c 100644 --- a/nestkernel/random_manager.cpp +++ b/nestkernel/random_manager.cpp @@ -27,9 +27,12 @@ #include // Includes from nestkernel: +#include "dictutils.h" #include "kernel_manager.h" +#include "mpi_manager.h" +#include "nest_names.h" #include "random_generators.h" -#include "vp_manager_impl.h" +#include "vp_manager.h" // Includes from libnestutil: #ifdef HAVE_RANDOM123 diff --git a/nestkernel/recording_backend_ascii.cpp b/nestkernel/recording_backend_ascii.cpp index 773aa36a82..b7f6d8ff54 100644 --- a/nestkernel/recording_backend_ascii.cpp +++ b/nestkernel/recording_backend_ascii.cpp @@ -24,13 +24,17 @@ #include "compose.hpp" // Includes from nestkernel: +#include "io_manager.h" +#include "logging.h" +#include "logging_manager.h" +#include "node_manager.h" +#include "recording_backend_ascii.h" #include "recording_device.h" -#include "vp_manager_impl.h" +#include "simulation_manager.h" // includes from sli: #include "dictutils.h" -#include "recording_backend_ascii.h" const unsigned int nest::RecordingBackendASCII::ASCII_REC_BACKEND_VERSION = 2; diff --git a/nestkernel/recording_backend_memory.cpp b/nestkernel/recording_backend_memory.cpp index 3ff816c685..6c484f6e66 100644 --- a/nestkernel/recording_backend_memory.cpp +++ b/nestkernel/recording_backend_memory.cpp @@ -21,10 +21,9 @@ */ // Includes from nestkernel: -#include "recording_device.h" -#include "vp_manager_impl.h" - #include "recording_backend_memory.h" +#include "recording_device.h" +#include "simulation_manager.h" nest::RecordingBackendMemory::RecordingBackendMemory() { diff --git a/nestkernel/recording_backend_mpi.cpp b/nestkernel/recording_backend_mpi.cpp index 3c048211a0..127adb1647 100644 --- a/nestkernel/recording_backend_mpi.cpp +++ b/nestkernel/recording_backend_mpi.cpp @@ -26,6 +26,9 @@ // Includes from nestkernel: #include "exceptions.h" +#include "io_manager.h" +#include "logging.h" +#include "logging_manager.h" #include "recording_backend_mpi.h" #include "recording_device.h" diff --git a/nestkernel/recording_backend_sionlib.cpp b/nestkernel/recording_backend_sionlib.cpp index 5aabb26ebc..33753117e5 100644 --- a/nestkernel/recording_backend_sionlib.cpp +++ b/nestkernel/recording_backend_sionlib.cpp @@ -36,10 +36,8 @@ #include "../nest/neststartup.h" // Includes from nestkernel: -#include "recording_device.h" -#include "vp_manager_impl.h" - #include "recording_backend_sionlib.h" +#include "recording_device.h" const unsigned int nest::RecordingBackendSIONlib::SIONLIB_REC_BACKEND_VERSION = 2; const unsigned int nest::RecordingBackendSIONlib::DEV_NAME_BUFFERSIZE = 32; diff --git a/nestkernel/recording_device.cpp b/nestkernel/recording_device.cpp index 205fde2e6b..c827d4da90 100644 --- a/nestkernel/recording_device.cpp +++ b/nestkernel/recording_device.cpp @@ -21,10 +21,11 @@ */ // Includes from libnestutil: +#include "recording_device.h" #include "compose.hpp" +#include "io_manager.h" #include "kernel_manager.h" - -#include "recording_device.h" +#include "simulation_manager.h" nest::RecordingDevice::RecordingDevice() : DeviceNode() diff --git a/nestkernel/ring_buffer.cpp b/nestkernel/ring_buffer.cpp index 48ee802b1c..1c0275ed2e 100644 --- a/nestkernel/ring_buffer.cpp +++ b/nestkernel/ring_buffer.cpp @@ -21,6 +21,7 @@ */ #include "ring_buffer.h" +#include "connection_manager.h" nest::RingBuffer::RingBuffer() : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), 0.0 ) diff --git a/nestkernel/ring_buffer.h b/nestkernel/ring_buffer.h index 9e7328f766..e4bbf70ddf 100644 --- a/nestkernel/ring_buffer.h +++ b/nestkernel/ring_buffer.h @@ -29,6 +29,7 @@ #include // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" #include "nest_time.h" #include "nest_types.h" @@ -422,6 +423,36 @@ MultiChannelInputBuffer< num_channels >::size() const return buffer_.size(); } +template < unsigned int num_channels > +MultiChannelInputBuffer< num_channels >::MultiChannelInputBuffer() + : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), + std::array< double, num_channels >() ) +{ +} + +template < unsigned int num_channels > +void +MultiChannelInputBuffer< num_channels >::resize() +{ + const size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); + if ( buffer_.size() != size ) + { + buffer_.resize( size, std::array< double, num_channels >() ); + } +} + +template < unsigned int num_channels > +void +MultiChannelInputBuffer< num_channels >::clear() +{ + resize(); // does nothing if size is fine + // set all elements to 0.0 + for ( size_t slot = 0; slot < buffer_.size(); ++slot ) + { + reset_values_all_channels( slot ); + } +} + } // namespace nest diff --git a/nestkernel/ring_buffer_impl.h b/nestkernel/ring_buffer_impl.h deleted file mode 100644 index 76db8a6fb6..0000000000 --- a/nestkernel/ring_buffer_impl.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * ring_buffer_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef RING_BUFFER_IMPL_H -#define RING_BUFFER_IMPL_H - -#include "ring_buffer.h" - -template < unsigned int num_channels > -nest::MultiChannelInputBuffer< num_channels >::MultiChannelInputBuffer() - : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), - std::array< double, num_channels >() ) -{ -} - -template < unsigned int num_channels > -void -nest::MultiChannelInputBuffer< num_channels >::resize() -{ - const size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); - if ( buffer_.size() != size ) - { - buffer_.resize( size, std::array< double, num_channels >() ); - } -} - -template < unsigned int num_channels > -void -nest::MultiChannelInputBuffer< num_channels >::clear() -{ - resize(); // does nothing if size is fine - // set all elements to 0.0 - for ( size_t slot = 0; slot < buffer_.size(); ++slot ) - { - reset_values_all_channels( slot ); - } -} - -#endif diff --git a/nestkernel/secondary_event.h b/nestkernel/secondary_event.h index 00676e68f1..746e8bc0e4 100644 --- a/nestkernel/secondary_event.h +++ b/nestkernel/secondary_event.h @@ -472,6 +472,31 @@ SICEvent::clone() const return new SICEvent( *this ); } +template < typename DataType, typename Subclass > +void +DataSecondaryEvent< DataType, Subclass >::add_syn_id( const synindex synid ) +{ + kernel().vp_manager.assert_thread_parallel(); + + // This is done during connection model cloning, which happens thread-parallel. + // To not risk trashing the set data structure, we let only master register the + // new synid. This is not performance critical and avoiding collisions elsewhere + // would be more difficult, so we do it here in a master section. +#pragma omp master + { + supported_syn_ids_.insert( synid ); + } +#pragma omp barrier +} + +template < typename DataType, typename Subclass > +void +DataSecondaryEvent< DataType, Subclass >::set_coeff_length( const size_t coeff_length ) +{ + kernel().vp_manager.assert_single_threaded(); + coeff_length_ = coeff_length; +} + } // namespace nest #endif /* #ifndef SECONDARY_EVENT_H */ diff --git a/nestkernel/secondary_event_impl.h b/nestkernel/secondary_event_impl.h deleted file mode 100644 index 1ad6fe92ee..0000000000 --- a/nestkernel/secondary_event_impl.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * secondary_event_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#include "secondary_event.h" - -// Includes from nestkernel -#include "kernel_manager.h" - -template < typename DataType, typename Subclass > -void -nest::DataSecondaryEvent< DataType, Subclass >::add_syn_id( const nest::synindex synid ) -{ - kernel().vp_manager.assert_thread_parallel(); - - // This is done during connection model cloning, which happens thread-parallel. - // To not risk trashing the set data structure, we let only master register the - // new synid. This is not performance critical and avoiding collisions elsewhere - // would be more difficult, so we do it here in a master section. -#pragma omp master - { - supported_syn_ids_.insert( synid ); - } -#pragma omp barrier -} - -template < typename DataType, typename Subclass > -void -nest::DataSecondaryEvent< DataType, Subclass >::set_coeff_length( const size_t coeff_length ) -{ - kernel().vp_manager.assert_single_threaded(); - coeff_length_ = coeff_length; -} diff --git a/nestkernel/simulation_manager.cpp b/nestkernel/simulation_manager.cpp index 9f23d7f89b..3ec93ffc11 100644 --- a/nestkernel/simulation_manager.cpp +++ b/nestkernel/simulation_manager.cpp @@ -34,10 +34,16 @@ #include "numerics.h" // Includes from nestkernel: -#include "connection_manager_impl.h" #include "event_delivery_manager.h" +#include "io_manager.h" #include "kernel_manager.h" -#include "stopwatch_impl.h" +#include "logging_manager.h" +#include "model_manager.h" +#include "music_manager.h" +#include "node_manager.h" +#include "random_manager.h" +#include "sp_manager.h" +#include // Includes from sli: #include "dictutils.h" @@ -72,6 +78,10 @@ nest::SimulationManager::SimulationManager() void nest::SimulationManager::initialize( const bool adjust_number_of_threads_or_rng_only ) { + sw_omp_synchronization_construction_.reset(); + sw_omp_synchronization_simulation_.reset(); + sw_mpi_synchronization_.reset(); + if ( adjust_number_of_threads_or_rng_only ) { return; @@ -483,6 +493,12 @@ nest::SimulationManager::get_status( DictionaryDatum& d ) sw_deliver_spike_data_.get_status( d, names::time_deliver_spike_data, names::time_deliver_spike_data_cpu ); sw_deliver_secondary_data_.get_status( d, names::time_deliver_secondary_data, names::time_deliver_secondary_data_cpu ); + sw_omp_synchronization_construction_.get_status( + d, names::time_omp_synchronization_construction, names::time_omp_synchronization_construction_cpu ); + sw_omp_synchronization_simulation_.get_status( + d, names::time_omp_synchronization_simulation, names::time_omp_synchronization_simulation_cpu ); + sw_mpi_synchronization_.get_status( d, names::time_mpi_synchronization, names::time_mpi_synchronization_cpu ); + def< double >( d, names::eprop_update_interval, eprop_update_interval_ ); def< double >( d, names::eprop_learning_window, eprop_learning_window_ ); def< bool >( d, names::eprop_reset_neurons_on_update, eprop_reset_neurons_on_update_ ); @@ -507,6 +523,9 @@ nest::SimulationManager::prepare() "earlier error. Please run ResetKernel first." ); } + sw_omp_synchronization_simulation_.reset(); + sw_mpi_synchronization_.reset(); + // reset profiling timers reset_timers_for_dynamics(); kernel().event_delivery_manager.reset_timers_for_dynamics(); @@ -734,9 +753,9 @@ nest::SimulationManager::call_update_() void nest::SimulationManager::update_connection_infrastructure( const size_t tid ) { - kernel().get_omp_synchronization_construction_stopwatch().start(); + get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + get_omp_synchronization_construction_stopwatch().stop(); sw_communicate_prepare_.start(); @@ -746,9 +765,9 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) kernel().connection_manager.collect_compressed_spike_data( tid ); sw_gather_target_data_.stop(); - kernel().get_omp_synchronization_construction_stopwatch().start(); + get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier // wait for all threads to finish sorting - kernel().get_omp_synchronization_construction_stopwatch().stop(); + get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { @@ -763,15 +782,15 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) if ( kernel().connection_manager.secondary_connections_exist() ) { - kernel().get_omp_synchronization_construction_stopwatch().start(); + get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + get_omp_synchronization_construction_stopwatch().stop(); kernel().connection_manager.compute_compressed_secondary_recv_buffer_positions( tid ); - kernel().get_omp_synchronization_construction_stopwatch().start(); + get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { @@ -805,9 +824,9 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) kernel().connection_manager.compress_secondary_send_buffer_pos( tid ); } - kernel().get_omp_synchronization_construction_stopwatch().start(); + get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { kernel().connection_manager.clear_compressed_spike_data_map(); @@ -885,9 +904,9 @@ nest::SimulationManager::update_() // MUSIC *before* MUSIC time is advanced // wait until all threads are done -> synchronize - kernel().get_omp_synchronization_simulation_stopwatch().start(); + get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_simulation_stopwatch().stop(); + get_omp_synchronization_simulation_stopwatch().stop(); // the following block is executed by the master thread only // the other threads are enforced to wait at the end of the block #pragma omp master @@ -950,9 +969,9 @@ nest::SimulationManager::update_() done.push_back( done_p ); } // parallel section ends, wait until all threads are done -> synchronize - kernel().get_omp_synchronization_simulation_stopwatch().start(); + get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_simulation_stopwatch().stop(); + get_omp_synchronization_simulation_stopwatch().stop(); // the following block is executed by a single thread // the other threads wait at the end of the block @@ -1012,9 +1031,9 @@ nest::SimulationManager::update_() Node* node = i->get_node(); node->update_synaptic_elements( Time( Time::step( clock_.get_steps() + from_step_ ) ).get_ms() ); } - kernel().get_omp_synchronization_simulation_stopwatch().start(); + get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_simulation_stopwatch().stop(); + get_omp_synchronization_simulation_stopwatch().stop(); #pragma omp single { kernel().sp_manager.update_structural_plasticity(); @@ -1051,9 +1070,9 @@ nest::SimulationManager::update_() sw_update_.stop(); // parallel section ends, wait until all threads are done -> synchronize - kernel().get_omp_synchronization_simulation_stopwatch().start(); + get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_simulation_stopwatch().stop(); + get_omp_synchronization_simulation_stopwatch().stop(); // the following block is executed by the master thread only // the other threads are enforced to wait at the end of the block @@ -1099,9 +1118,9 @@ nest::SimulationManager::update_() #ifdef HAVE_SIONLIB kernel().io_manager.post_step_hook(); // enforce synchronization after post-step activities of the recording backends - kernel().get_omp_synchronization_simulation_stopwatch().start(); + get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_simulation_stopwatch().stop(); + get_omp_synchronization_simulation_stopwatch().stop(); #endif const double end_current_update = sw_simulate_.elapsed(); diff --git a/nestkernel/simulation_manager.h b/nestkernel/simulation_manager.h index 1c83bea508..c7eed7cc23 100644 --- a/nestkernel/simulation_manager.h +++ b/nestkernel/simulation_manager.h @@ -189,6 +189,26 @@ class SimulationManager : public ManagerInterface Time get_eprop_learning_window() const; bool get_eprop_reset_neurons_on_update() const; + //! Get the stopwatch to measure the time each thread is idle during network construction. + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded >& + get_omp_synchronization_construction_stopwatch() + { + return sw_omp_synchronization_construction_; + } + + //! Get the stopwatch to measure the time each thread is idle during simulation. + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded >& + get_omp_synchronization_simulation_stopwatch() + { + return sw_omp_synchronization_simulation_; + } + + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::MasterOnly >& + get_mpi_synchronization_stopwatch() + { + return sw_mpi_synchronization_; + } + private: void call_update_(); //!< actually run simulation, aka wrap update_ void update_(); //! actually perform simulation @@ -238,6 +258,10 @@ class SimulationManager : public ManagerInterface Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_deliver_spike_data_; Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_deliver_secondary_data_; + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_omp_synchronization_construction_; + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::Threaded > sw_omp_synchronization_simulation_; + Stopwatch< StopwatchGranularity::Detailed, StopwatchParallelism::MasterOnly > sw_mpi_synchronization_; + double eprop_update_interval_; double eprop_learning_window_; bool eprop_reset_neurons_on_update_; diff --git a/nestkernel/slice_ring_buffer.cpp b/nestkernel/slice_ring_buffer.cpp index ab12ca3aa4..6a6c4563ac 100644 --- a/nestkernel/slice_ring_buffer.cpp +++ b/nestkernel/slice_ring_buffer.cpp @@ -26,6 +26,9 @@ #include #include +#include "connection_manager.h" +#include "event_delivery_manager.h" + nest::SliceRingBuffer::SliceRingBuffer() : refract_( std::numeric_limits< long >::max(), 0, 0 ) { diff --git a/nestkernel/slice_ring_buffer.h b/nestkernel/slice_ring_buffer.h index 62999fbc90..e14bfe9926 100644 --- a/nestkernel/slice_ring_buffer.h +++ b/nestkernel/slice_ring_buffer.h @@ -33,8 +33,8 @@ #include "config.h" // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" -#include "nest_types.h" namespace nest { diff --git a/nestkernel/sonata_connector.cpp b/nestkernel/sonata_connector.cpp index b44ae89640..59b7583c66 100644 --- a/nestkernel/sonata_connector.cpp +++ b/nestkernel/sonata_connector.cpp @@ -30,7 +30,6 @@ // Includes from nestkernel: #include "kernel_manager.h" -#include "vp_manager_impl.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/source_table.cpp b/nestkernel/source_table.cpp index d9fcd7af1c..f82fca8312 100644 --- a/nestkernel/source_table.cpp +++ b/nestkernel/source_table.cpp @@ -25,12 +25,9 @@ // Includes from nestkernel: #include "connection_manager.h" -#include "connection_manager_impl.h" #include "kernel_manager.h" -#include "mpi_manager_impl.h" +#include "model_manager.h" #include "source_table.h" -#include "stopwatch_impl.h" -#include "vp_manager_impl.h" nest::SourceTable::SourceTable() { @@ -230,9 +227,9 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t } } } - kernel().get_omp_synchronization_construction_stopwatch().start(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().get_omp_synchronization_construction_stopwatch().stop(); + kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { @@ -245,7 +242,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t cit != ( *unique_secondary_source_node_id_syn_id ).end(); ++cit ) { - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( cit->first ); + const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( cit->first ); const size_t event_size = kernel().model_manager.get_secondary_event_prototype( cit->second, tid ).size(); buffer_pos_of_source_node_id_syn_id.insert( @@ -280,7 +277,7 @@ nest::SourceTable::source_should_be_processed_( const size_t rank_start, const size_t rank_end, const Source& source ) const { - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source.get_node_id() ); + const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( source.get_node_id() ); return not( source.is_processed() or source.is_disabled() @@ -412,7 +409,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // communicated via MPI, so we prepare to return the relevant data // set the source rank - source_rank = kernel().mpi_manager.get_process_id_of_node_id( current_source.get_node_id() ); + source_rank = kernel().vp_manager.get_process_id_of_node_id( current_source.get_node_id() ); if ( not populate_target_data_fields_( current_position, current_source, source_rank, next_target_data ) ) { diff --git a/nestkernel/sp_manager.cpp b/nestkernel/sp_manager.cpp index 4705b7de64..294fb6ec0c 100644 --- a/nestkernel/sp_manager.cpp +++ b/nestkernel/sp_manager.cpp @@ -28,11 +28,15 @@ // Includes from nestkernel: #include "conn_builder.h" #include "conn_parameter.h" +#include "connection_manager.h" #include "connector_base.h" #include "connector_model.h" #include "kernel_manager.h" +#include "logging.h" +#include "logging_manager.h" +#include "model_manager.h" +#include "nest.h" #include "nest_names.h" -#include "sp_manager_impl.h" namespace nest { diff --git a/nestkernel/sp_manager.h b/nestkernel/sp_manager.h index 79cc92a72a..90b3828de3 100644 --- a/nestkernel/sp_manager.h +++ b/nestkernel/sp_manager.h @@ -229,6 +229,18 @@ SPManager::get_structural_plasticity_update_interval() const return structural_plasticity_update_interval_; } +template < typename GrowthCurve > +void +SPManager::register_growth_curve( const std::string& name ) +{ + assert( not growthcurvedict_->known( name ) ); + GenericGrowthCurveFactory* nc = new GrowthCurveFactory< GrowthCurve >(); + assert( nc ); + const int id = growthcurve_factories_.size(); + growthcurve_factories_.push_back( nc ); + growthcurvedict_->insert( name, id ); +} + } // namespace nest #endif /* #ifndef SP_MANAGER_H */ diff --git a/nestkernel/sp_manager_impl.h b/nestkernel/sp_manager_impl.h deleted file mode 100644 index 791ac78db2..0000000000 --- a/nestkernel/sp_manager_impl.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * sp_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef SP_MANAGER_IMPL_H -#define SP_MANAGER_IMPL_H - -#include "sp_manager.h" - -// C++ includes: -#include - -// Includes from nestkernel: -#include "growth_curve.h" -#include "growth_curve_factory.h" - -namespace nest -{ - -template < typename GrowthCurve > -void -SPManager::register_growth_curve( const std::string& name ) -{ - assert( not growthcurvedict_->known( name ) ); - GenericGrowthCurveFactory* nc = new GrowthCurveFactory< GrowthCurve >(); - assert( nc ); - const int id = growthcurve_factories_.size(); - growthcurve_factories_.push_back( nc ); - growthcurvedict_->insert( name, id ); -} - -} // namespace nest - -#endif /* SP_MANAGER_IMPL_H */ diff --git a/nestkernel/sparse_node_array.cpp b/nestkernel/sparse_node_array.cpp index 6154eace5d..b2385a8471 100644 --- a/nestkernel/sparse_node_array.cpp +++ b/nestkernel/sparse_node_array.cpp @@ -26,7 +26,6 @@ #include "exceptions.h" #include "kernel_manager.h" #include "node.h" -#include "vp_manager_impl.h" nest::SparseNodeArray::NodeEntry::NodeEntry( Node& node, size_t node_id ) diff --git a/nestkernel/spatial.cpp b/nestkernel/spatial.cpp index 603cd814d3..f776ba2216 100644 --- a/nestkernel/spatial.cpp +++ b/nestkernel/spatial.cpp @@ -41,7 +41,6 @@ // Includes from spatial: #include "grid_layer.h" -#include "layer_impl.h" namespace nest diff --git a/nestkernel/stimulation_backend_mpi.cpp b/nestkernel/stimulation_backend_mpi.cpp index 8f908d24ce..14c1ace62f 100644 --- a/nestkernel/stimulation_backend_mpi.cpp +++ b/nestkernel/stimulation_backend_mpi.cpp @@ -26,7 +26,11 @@ #include // Includes from nestkernel: +#include "connection_manager.h" +#include "io_manager.h" #include "kernel_manager.h" +#include "logging.h" +#include "logging_manager.h" #include "stimulation_backend.h" #include "stimulation_backend_mpi.h" #include "stimulation_device.h" diff --git a/nestkernel/stimulation_device.cpp b/nestkernel/stimulation_device.cpp index ab3c800c9b..77a7eb70b8 100644 --- a/nestkernel/stimulation_device.cpp +++ b/nestkernel/stimulation_device.cpp @@ -23,6 +23,7 @@ // Includes from nestkernel: #include "stimulation_device.h" +#include "io_manager.h" #include "kernel_manager.h" diff --git a/nestkernel/stopwatch.h b/nestkernel/stopwatch.h index 57b2c3ee10..637bb8ad80 100644 --- a/nestkernel/stopwatch.h +++ b/nestkernel/stopwatch.h @@ -23,21 +23,18 @@ #ifndef STOPWATCH_H #define STOPWATCH_H -// C includes: -#include - // C++ includes: #include "arraydatum.h" #include "dictdatum.h" #include "dictutils.h" #include -#include -#include #include #include // Includes from nestkernel: #include "exceptions.h" +#include "kernel_manager.h" +#include "vp_manager.h" namespace nest { @@ -524,5 +521,89 @@ class Stopwatch< detailed_timer, std::vector< timers::StopwatchTimer< CLOCK_THREAD_CPUTIME_ID > > cputime_timers_; }; +template < StopwatchGranularity detailed_timer > +void +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() +{ + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].start(); + cputime_timers_[ kernel().vp_manager.get_thread_id() ].start(); +} + +template < StopwatchGranularity detailed_timer > +void +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() +{ + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); + cputime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); +} + +template < StopwatchGranularity detailed_timer > +bool +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const +{ + kernel().vp_manager.assert_thread_parallel(); + + return walltime_timers_[ kernel().vp_manager.get_thread_id() ].is_running_(); +} + +template < StopwatchGranularity detailed_timer > +double +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t + timeunit ) const +{ + kernel().vp_manager.assert_thread_parallel(); + + return walltime_timers_[ kernel().vp_manager.get_thread_id() ].elapsed( timeunit ); +} + +template < StopwatchGranularity detailed_timer > +void +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::print( const std::string& msg, + timers::timeunit_t timeunit, + std::ostream& os ) const +{ + kernel().vp_manager.assert_thread_parallel(); + + walltime_timers_[ kernel().vp_manager.get_thread_id() ].print( msg, timeunit, os ); +} + +template < StopwatchGranularity detailed_timer > +void +Stopwatch< detailed_timer, + StopwatchParallelism::Threaded, + std::enable_if_t< use_threaded_timers + and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() +{ + kernel().vp_manager.assert_single_threaded(); + + const size_t num_threads = kernel().vp_manager.get_num_threads(); + walltime_timers_.resize( num_threads ); + cputime_timers_.resize( num_threads ); + for ( size_t i = 0; i < num_threads; ++i ) + { + walltime_timers_[ i ].reset(); + cputime_timers_[ i ].reset(); + } +} + } /* namespace nest */ #endif /* STOPWATCH_H */ diff --git a/nestkernel/stopwatch_impl.h b/nestkernel/stopwatch_impl.h deleted file mode 100644 index 123804117e..0000000000 --- a/nestkernel/stopwatch_impl.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * stopwatch_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#include "kernel_manager.h" -#include "stopwatch.h" - -namespace nest -{ - -template < StopwatchGranularity detailed_timer > -void -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() -{ - kernel().vp_manager.assert_thread_parallel(); - - walltime_timers_[ kernel().vp_manager.get_thread_id() ].start(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].start(); -} - -template < StopwatchGranularity detailed_timer > -void -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() -{ - kernel().vp_manager.assert_thread_parallel(); - - walltime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); -} - -template < StopwatchGranularity detailed_timer > -bool -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const -{ - kernel().vp_manager.assert_thread_parallel(); - - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].is_running_(); -} - -template < StopwatchGranularity detailed_timer > -double -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t - timeunit ) const -{ - kernel().vp_manager.assert_thread_parallel(); - - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].elapsed( timeunit ); -} - -template < StopwatchGranularity detailed_timer > -void -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::print( const std::string& msg, - timers::timeunit_t timeunit, - std::ostream& os ) const -{ - kernel().vp_manager.assert_thread_parallel(); - - walltime_timers_[ kernel().vp_manager.get_thread_id() ].print( msg, timeunit, os ); -} - -template < StopwatchGranularity detailed_timer > -void -Stopwatch< detailed_timer, - StopwatchParallelism::Threaded, - std::enable_if_t< use_threaded_timers - and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() -{ - kernel().vp_manager.assert_single_threaded(); - - const size_t num_threads = kernel().vp_manager.get_num_threads(); - walltime_timers_.resize( num_threads ); - cputime_timers_.resize( num_threads ); - for ( size_t i = 0; i < num_threads; ++i ) - { - walltime_timers_[ i ].reset(); - cputime_timers_[ i ].reset(); - } -} -} diff --git a/nestkernel/synaptic_element.cpp b/nestkernel/synaptic_element.cpp index cd51c16861..a06e866f4f 100644 --- a/nestkernel/synaptic_element.cpp +++ b/nestkernel/synaptic_element.cpp @@ -25,6 +25,8 @@ // Includes from nestkernel: #include "exceptions.h" #include "kernel_manager.h" +#include "nest_names.h" +#include "sp_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/target_identifier.h b/nestkernel/target_identifier.h index 82e5356260..94f9bbb88c 100644 --- a/nestkernel/target_identifier.h +++ b/nestkernel/target_identifier.h @@ -30,6 +30,8 @@ #include "compose.hpp" #include "kernel_manager.h" +#include "node.h" +#include "node_manager.h" namespace nest { diff --git a/nestkernel/target_table.cpp b/nestkernel/target_table.cpp index d9d06a8c4e..10af1c991e 100644 --- a/nestkernel/target_table.cpp +++ b/nestkernel/target_table.cpp @@ -23,6 +23,9 @@ // Includes from nestkernel: #include "target_table.h" #include "kernel_manager.h" +#include "model_manager.h" +#include "mpi_manager.h" +#include "vp_manager.h" // Includes from libnestutil #include "vector_util.h" diff --git a/nestkernel/target_table_devices.cpp b/nestkernel/target_table_devices.cpp index da106e9b43..c162239f00 100644 --- a/nestkernel/target_table_devices.cpp +++ b/nestkernel/target_table_devices.cpp @@ -21,21 +21,25 @@ */ // Includes from nestkernel: +#include "target_table_devices.h" #include "connector_base.h" +#include "connector_model.h" #include "kernel_manager.h" -#include "target_table_devices_impl.h" -#include "vp_manager_impl.h" +#include "model_manager.h" -nest::TargetTableDevices::TargetTableDevices() +namespace nest +{ + +TargetTableDevices::TargetTableDevices() { } -nest::TargetTableDevices::~TargetTableDevices() +TargetTableDevices::~TargetTableDevices() { } void -nest::TargetTableDevices::initialize() +TargetTableDevices::initialize() { const size_t num_threads = kernel().vp_manager.get_num_threads(); target_to_devices_.resize( num_threads ); @@ -44,7 +48,7 @@ nest::TargetTableDevices::initialize() } void -nest::TargetTableDevices::finalize() +TargetTableDevices::finalize() { for ( size_t tid = 0; tid < target_to_devices_.size(); ++tid ) { @@ -74,7 +78,7 @@ nest::TargetTableDevices::finalize() } void -nest::TargetTableDevices::resize_to_number_of_neurons() +TargetTableDevices::resize_to_number_of_neurons() { #pragma omp parallel { @@ -86,7 +90,7 @@ nest::TargetTableDevices::resize_to_number_of_neurons() } void -nest::TargetTableDevices::resize_to_number_of_synapse_types() +TargetTableDevices::resize_to_number_of_synapse_types() { kernel().vp_manager.assert_thread_parallel(); @@ -104,7 +108,7 @@ nest::TargetTableDevices::resize_to_number_of_synapse_types() } void -nest::TargetTableDevices::get_connections_to_devices_( const size_t requested_source_node_id, +TargetTableDevices::get_connections_to_devices_( const size_t requested_source_node_id, const size_t requested_target_node_id, const size_t tid, const synindex syn_id, @@ -130,7 +134,7 @@ nest::TargetTableDevices::get_connections_to_devices_( const size_t requested_so } void -nest::TargetTableDevices::get_connections_to_device_for_lid_( const size_t lid, +TargetTableDevices::get_connections_to_device_for_lid_( const size_t lid, const size_t requested_target_node_id, const size_t tid, const synindex syn_id, @@ -150,7 +154,7 @@ nest::TargetTableDevices::get_connections_to_device_for_lid_( const size_t lid, } void -nest::TargetTableDevices::get_connections_from_devices_( const size_t requested_source_node_id, +TargetTableDevices::get_connections_from_devices_( const size_t requested_source_node_id, const size_t requested_target_node_id, const size_t tid, const synindex syn_id, @@ -181,7 +185,7 @@ nest::TargetTableDevices::get_connections_from_devices_( const size_t requested_ } void -nest::TargetTableDevices::get_connections( const size_t requested_source_node_id, +TargetTableDevices::get_connections( const size_t requested_source_node_id, const size_t requested_target_node_id, const size_t tid, const synindex syn_id, @@ -195,3 +199,46 @@ nest::TargetTableDevices::get_connections( const size_t requested_source_node_id get_connections_from_devices_( requested_source_node_id, requested_target_node_id, tid, syn_id, synapse_label, conns ); } + +void +TargetTableDevices::add_connection_to_device( Node& source, + Node& target, + const size_t source_node_id, + const size_t tid, + const synindex syn_id, + const DictionaryDatum& p, + const double d, + const double w ) +{ + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + assert( lid < target_to_devices_[ tid ].size() ); + assert( syn_id < target_to_devices_[ tid ][ lid ].size() ); + + kernel() + .model_manager.get_connection_model( syn_id, tid ) + .add_connection( source, target, target_to_devices_[ tid ][ lid ], syn_id, p, d, w ); +} + +void +TargetTableDevices::add_connection_from_device( Node& source, + Node& target, + const size_t tid, + const synindex syn_id, + const DictionaryDatum& p, + const double d, + const double w ) +{ + const size_t ldid = source.get_local_device_id(); + assert( ldid != invalid_index ); + assert( ldid < target_from_devices_[ tid ].size() ); + assert( syn_id < target_from_devices_[ tid ][ ldid ].size() ); + + kernel() + .model_manager.get_connection_model( syn_id, tid ) + .add_connection( source, target, target_from_devices_[ tid ][ ldid ], syn_id, p, d, w ); + + // store node ID of sending device + sending_devices_node_ids_[ tid ][ ldid ] = source.get_node_id(); +} + +} diff --git a/nestkernel/target_table_devices.h b/nestkernel/target_table_devices.h index de6d31db3d..6d72f42ad3 100644 --- a/nestkernel/target_table_devices.h +++ b/nestkernel/target_table_devices.h @@ -24,18 +24,16 @@ #define TARGET_TABLE_DEVICES_H // C++ includes: -#include -#include #include // Includes from nestkernel: #include "connection_id.h" #include "connector_base.h" #include "event.h" +#include "kernel_manager.h" #include "nest_types.h" // Includes from SLI: -#include "arraydatum.h" #include "dictdatum.h" namespace nest @@ -264,6 +262,68 @@ TargetTableDevices::is_device_connected( const size_t tid, const size_t lcid ) c return false; } +inline void +TargetTableDevices::send_to_device( const size_t tid, + const size_t source_node_id, + Event& e, + const std::vector< ConnectorModel* >& cm ) +{ + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + for ( std::vector< ConnectorBase* >::iterator it = target_to_devices_[ tid ][ lid ].begin(); + it != target_to_devices_[ tid ][ lid ].end(); + ++it ) + { + if ( *it ) + { + ( *it )->send_to_all( tid, cm, e ); + } + } +} + +inline void +TargetTableDevices::send_to_device( const size_t tid, + const size_t source_node_id, + SecondaryEvent& e, + const std::vector< ConnectorModel* >& cm ) +{ + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + for ( auto& synid : e.get_supported_syn_ids() ) + { + if ( target_to_devices_[ tid ][ lid ][ synid ] ) + { + target_to_devices_[ tid ][ lid ][ synid ]->send_to_all( tid, cm, e ); + } + } +} + +inline void +TargetTableDevices::get_synapse_status_to_device( const size_t tid, + const size_t source_node_id, + const synindex syn_id, + DictionaryDatum& dict, + const size_t lcid ) const +{ + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) + { + target_to_devices_[ tid ][ lid ][ syn_id ]->get_synapse_status( tid, lcid, dict ); + } +} + +inline void +TargetTableDevices::set_synapse_status_to_device( const size_t tid, + const size_t source_node_id, + const synindex syn_id, + ConnectorModel& cm, + const DictionaryDatum& dict, + const size_t lcid ) +{ + const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) + { + target_to_devices_[ tid ][ lid ][ syn_id ]->set_synapse_status( lcid, dict, cm ); + } +} } // namespace nest diff --git a/nestkernel/target_table_devices_impl.h b/nestkernel/target_table_devices_impl.h deleted file mode 100644 index 26a4668fb1..0000000000 --- a/nestkernel/target_table_devices_impl.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * target_table_devices_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef TARGET_TABLE_DEVICES_IMPL_H -#define TARGET_TABLE_DEVICES_IMPL_H - -// Includes from nestkernel: -#include "connector_base.h" -#include "kernel_manager.h" -#include "model_manager.h" -#include "node.h" -#include "target_table_devices.h" -#include "vp_manager_impl.h" - -inline void -nest::TargetTableDevices::add_connection_to_device( Node& source, - Node& target, - const size_t source_node_id, - const size_t tid, - const synindex syn_id, - const DictionaryDatum& p, - const double d, - const double w ) -{ - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - assert( lid < target_to_devices_[ tid ].size() ); - assert( syn_id < target_to_devices_[ tid ][ lid ].size() ); - - kernel() - .model_manager.get_connection_model( syn_id, tid ) - .add_connection( source, target, target_to_devices_[ tid ][ lid ], syn_id, p, d, w ); -} - -inline void -nest::TargetTableDevices::add_connection_from_device( Node& source, - Node& target, - const size_t tid, - const synindex syn_id, - const DictionaryDatum& p, - const double d, - const double w ) -{ - const size_t ldid = source.get_local_device_id(); - assert( ldid != invalid_index ); - assert( ldid < target_from_devices_[ tid ].size() ); - assert( syn_id < target_from_devices_[ tid ][ ldid ].size() ); - - kernel() - .model_manager.get_connection_model( syn_id, tid ) - .add_connection( source, target, target_from_devices_[ tid ][ ldid ], syn_id, p, d, w ); - - // store node ID of sending device - sending_devices_node_ids_[ tid ][ ldid ] = source.get_node_id(); -} - -inline void -nest::TargetTableDevices::send_to_device( const size_t tid, - const size_t source_node_id, - Event& e, - const std::vector< ConnectorModel* >& cm ) -{ - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - for ( std::vector< ConnectorBase* >::iterator it = target_to_devices_[ tid ][ lid ].begin(); - it != target_to_devices_[ tid ][ lid ].end(); - ++it ) - { - if ( *it ) - { - ( *it )->send_to_all( tid, cm, e ); - } - } -} - -inline void -nest::TargetTableDevices::send_to_device( const size_t tid, - const size_t source_node_id, - SecondaryEvent& e, - const std::vector< ConnectorModel* >& cm ) -{ - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - for ( auto& synid : e.get_supported_syn_ids() ) - { - if ( target_to_devices_[ tid ][ lid ][ synid ] ) - { - target_to_devices_[ tid ][ lid ][ synid ]->send_to_all( tid, cm, e ); - } - } -} - -inline void -nest::TargetTableDevices::get_synapse_status_to_device( const size_t tid, - const size_t source_node_id, - const synindex syn_id, - DictionaryDatum& dict, - const size_t lcid ) const -{ - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) - { - target_to_devices_[ tid ][ lid ][ syn_id ]->get_synapse_status( tid, lcid, dict ); - } -} - -inline void -nest::TargetTableDevices::set_synapse_status_to_device( const size_t tid, - const size_t source_node_id, - const synindex syn_id, - ConnectorModel& cm, - const DictionaryDatum& dict, - const size_t lcid ) -{ - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); - if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) - { - target_to_devices_[ tid ][ lid ][ syn_id ]->set_synapse_status( lcid, dict, cm ); - } -} - -#endif /* TARGET_TABLE_DEVICES_IMPL_H */ diff --git a/nestkernel/universal_data_logger.h b/nestkernel/universal_data_logger.h index cb51337b12..f78bf95be7 100644 --- a/nestkernel/universal_data_logger.h +++ b/nestkernel/universal_data_logger.h @@ -29,8 +29,10 @@ // Includes from nestkernel: #include "event.h" +#include "event_delivery_manager.h" +#include "kernel_manager.h" #include "nest_time.h" -#include "nest_types.h" +#include "node.h" #include "recordables_map.h" namespace nest @@ -84,27 +86,21 @@ namespace nest * * ... * - * nest::iaf_cond_alpha::Buffers_::Buffers_(iaf_cond_alpha& n) + * iaf_cond_alpha::Buffers_::Buffers_(iaf_cond_alpha& n) * : logger_(n), ... {} * - * nest::iaf_cond_alpha::Buffers_::Buffers_(const Buffers_&, iaf_cond_alpha& n) + * iaf_cond_alpha::Buffers_::Buffers_(const Buffers_&, iaf_cond_alpha& n) * : logger_(n), ... {} * - * nest::iaf_cond_alpha::iaf_cond_alpha() + * iaf_cond_alpha::iaf_cond_alpha() * : ..., B_(*this) {} * - * nest::iaf_cond_alpha::iaf_cond_alpha(const iaf_cond_alpha& n) + * iaf_cond_alpha::iaf_cond_alpha(const iaf_cond_alpha& n) * : ..., B_(n.B_, *this) {} * @code * * @todo Could HostNode be passed as const& to handle() and record_data()? * - * @note To avoid inclusion problems and code-bloat, the class - * interface is defined in this file, while most of the - * implementation is in the companion universal_data_logger_impl.h. - * As a consequence, calls to UniversalDataLogger members should - * only come from cpp files---do not inline them. - * * @addtogroup Devices */ @@ -235,7 +231,7 @@ class UniversalDataLogger // which typically is in h-files. template < typename HostNode > size_t -nest::UniversalDataLogger< HostNode >::connect_logging_device( const DataLoggingRequest& req, +UniversalDataLogger< HostNode >::connect_logging_device( const DataLoggingRequest& req, const RecordablesMap< HostNode >& rmap ) { // rports are assigned consecutively, the caller may not request specific @@ -266,7 +262,7 @@ nest::UniversalDataLogger< HostNode >::connect_logging_device( const DataLogging } template < typename HostNode > -nest::UniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const DataLoggingRequest& req, +UniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const DataLoggingRequest& req, const RecordablesMap< HostNode >& rmap ) : multimeter_( req.get_sender().get_node_id() ) , num_vars_( 0 ) @@ -342,30 +338,24 @@ nest::UniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const DataLoggi * * ... * - * nest::aeif_cond_beta_multisynapse::Buffers_::Buffers_(aeif_cond_beta_multisynapse& + * aeif_cond_beta_multisynapse::Buffers_::Buffers_(aeif_cond_beta_multisynapse& * n) * : logger_(n), ... {} * - * nest::aeif_cond_beta_multisynapse::Buffers_::Buffers_(const Buffers_&, + * aeif_cond_beta_multisynapse::Buffers_::Buffers_(const Buffers_&, * aeif_cond_beta_multisynapse& n) * : logger_(n), ... {} * - * nest::aeif_cond_beta_multisynapse::aeif_cond_beta_multisynapse() + * aeif_cond_beta_multisynapse::aeif_cond_beta_multisynapse() * : ..., B_(*this) {} * - * nest::aeif_cond_beta_multisynapse::aeif_cond_beta_multisynapse(const + * aeif_cond_beta_multisynapse::aeif_cond_beta_multisynapse(const * aeif_cond_beta_multisynapse& n) * : ..., B_(n.B_, *this) {} * @code * * @todo Could HostNode be passed as const& to handle() and record_data()? * - * @note To avoid inclusion problems and code-bloat, the class - * interface is defined in this file, while most of the - * implementation is in the companion universal_data_logger_impl.h. - * As a consequence, calls to UniversalDataLogger members should - * only come from cpp files---do not inline them. - * * @addtogroup Devices */ @@ -497,7 +487,7 @@ class DynamicUniversalDataLogger // which typically is in h-files. template < typename HostNode > size_t -nest::DynamicUniversalDataLogger< HostNode >::connect_logging_device( const DataLoggingRequest& req, +DynamicUniversalDataLogger< HostNode >::connect_logging_device( const DataLoggingRequest& req, const DynamicRecordablesMap< HostNode >& rmap ) { // rports are assigned consecutively, the caller may not request specific @@ -529,7 +519,7 @@ nest::DynamicUniversalDataLogger< HostNode >::connect_logging_device( const Data } template < typename HostNode > -nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const DataLoggingRequest& req, +DynamicUniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const DataLoggingRequest& req, const DynamicRecordablesMap< HostNode >& rmap ) : multimeter_( req.get_sender().get_node_id() ) , num_vars_( 0 ) @@ -570,6 +560,408 @@ nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::DataLogger_( const Da recording_offset_ = req.get_recording_offset(); } +template < typename HostNode > +DynamicUniversalDataLogger< HostNode >::DynamicUniversalDataLogger( HostNode& host ) + : host_( host ) + , data_loggers_() +{ +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::reset() +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->reset(); + } +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::init() +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->init(); + } +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::record_data( long step ) +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->record_data( host_, step ); + } +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::handle( const DataLoggingRequest& dlr ) +{ + const size_t rport = dlr.get_rport(); + assert( rport >= 1 ); + assert( static_cast< size_t >( rport ) <= data_loggers_.size() ); + data_loggers_[ rport - 1 ].handle( host_, dlr ); +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::DataLogger_::reset() +{ + data_.clear(); + next_rec_step_ = -1; // flag as uninitialized +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::DataLogger_::init() +{ + if ( num_vars_ < 1 ) + { + return; + } // not recording anything + + // Next recording step is in current slice or beyond, indicates that + // buffer is properly initialized. + if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) + { + return; + } + + // If we get here, the buffer has either never been initialized or has been dormant + // during a period when the host node was frozen. We then (re-)initialize. + data_.clear(); + + // store recording time in steps + rec_int_steps_ = recording_interval_.get_steps(); + + // set next recording step to first multiple of rec_int_steps_ + // beyond current time, shifted one to left, since rec_step marks + // left of update intervals, and we want time stamps at right end of + // update interval to be multiples of recording interval. Need to add + // +1 because the division result is rounded down. + next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + + // If offset is not 0, adjust next recording step to account for it by first setting next recording + // step to be offset and then iterating until the variable is greater than current simulation time. + if ( recording_offset_.get_steps() != 0 ) + { + next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left + while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) + { + next_rec_step_ += rec_int_steps_; + } + } + + // number of data points per slice + const long recs_per_slice = static_cast< long >( + std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + + data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); + + next_rec_.resize( 2 ); // just for safety's sake + next_rec_[ 0 ] = next_rec_[ 1 ] = 0; // start at beginning of buffer +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode&, long step ) +{ + if ( num_vars_ < 1 or step < next_rec_step_ ) + { + return; + } + + const size_t wt = kernel().event_delivery_manager.write_toggle(); + + assert( wt < next_rec_.size() ); + assert( wt < data_.size() ); + + // The following assertion may fire if the multimeter connected to + // this logger is frozen. In that case, handle() is not called and + // next_rec_[wt] never reset. The assert() prevents error propagation. + // This is not an exception, since I consider the chance of users + // freezing multimeters very slim. + // See #464 for details. + assert( next_rec_[ wt ] < data_[ wt ].size() ); + + DataLoggingReply::Item& dest = data_[ wt ][ next_rec_[ wt ] ]; + + // set time stamp: step is left end of update interval, so add 1 + dest.timestamp = Time::step( step + 1 ); + + // obtain data through access functions, calling via pointer-to-member + for ( size_t j = 0; j < num_vars_; ++j ) + { + dest.data[ j ] = ( *( node_access_[ j ] ) )(); + } + + next_rec_step_ += rec_int_steps_; + + // We just increment. Construction ensures that we cannot overflow, + // and read-out resets. + // Overflow is possible if the multimeter is frozen, see #464. + // In that case, the assertion above will trigger. + ++next_rec_[ wt ]; +} + +template < typename HostNode > +void +DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const DataLoggingRequest& request ) +{ + if ( num_vars_ < 1 ) + { + return; + } // nothing to do + + // The following assertions will fire if the user forgot to call init() + // on the data logger. + assert( next_rec_.size() == 2 ); + assert( data_.size() == 2 ); + + // get read toggle and start and end of slice + const size_t rt = kernel().event_delivery_manager.read_toggle(); + assert( not data_[ rt ].empty() ); + + // Check if we have valid data, i.e., data with time stamps within the + // past time slice. This may not be the case if the node has been frozen. + // In that case, we still reset the recording marker, to prepare for the next round. + if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) + { + next_rec_[ rt ] = 0; + return; + } + + // If recording interval and min_delay are not commensurable, + // the last entry of data_ will not contain useful data for every + // other slice. We mark this by time stamp -infinity. + // Applying this mark here is less work than initializing all time stamps + // to -infinity after each call to this function. + if ( next_rec_[ rt ] < data_[ rt ].size() ) + { + data_[ rt ][ next_rec_[ rt ] ].timestamp = Time::neg_inf(); + } + + // now create reply event and rigg it + DataLoggingReply reply( data_[ rt ] ); + + // "clear" data + next_rec_[ rt ] = 0; + + reply.set_sender( host ); + reply.set_sender_node_id( host.get_node_id() ); + reply.set_receiver( request.get_sender() ); + reply.set_port( request.get_port() ); + + // send it off + kernel().event_delivery_manager.send_to_node( reply ); +} + +template < typename HostNode > +UniversalDataLogger< HostNode >::UniversalDataLogger( HostNode& host ) + : host_( host ) + , data_loggers_() +{ +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::reset() +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->reset(); + } +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::init() +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->init(); + } +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::record_data( long step ) +{ + for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) + { + it->record_data( host_, step ); + } +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::handle( const DataLoggingRequest& dlr ) +{ + const size_t rport = dlr.get_rport(); + assert( rport >= 1 ); + assert( static_cast< size_t >( rport ) <= data_loggers_.size() ); + data_loggers_[ rport - 1 ].handle( host_, dlr ); +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::DataLogger_::reset() +{ + data_.clear(); + next_rec_step_ = -1; // flag as uninitialized +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::DataLogger_::init() +{ + if ( num_vars_ < 1 ) + { + // not recording anything + return; + } + + // Next recording step is in current slice or beyond, indicates that + // buffer is properly initialized. + if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) + { + return; + } + + // If we get here, the buffer has either never been initialized or has + // been dormant during a period when the host node was frozen. We then (re-)initialize. + data_.clear(); + + // store recording time in steps + rec_int_steps_ = recording_interval_.get_steps(); + + // set next recording step to first multiple of rec_int_steps_ + // beyond current time, shifted one to left, since rec_step marks + // left of update intervals, and we want time stamps at right end of + // update interval to be multiples of recording interval. Need to add + // +1 because the division result is rounded down. + next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + + // If offset is not 0, adjust next recording step to account for it by first setting next recording + // step to be offset and then iterating until the variable is greater than current simulation time. + if ( recording_offset_.get_steps() != 0 ) + { + next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left + while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) + { + next_rec_step_ += rec_int_steps_; + } + } + + // number of data points per slice + const long recs_per_slice = static_cast< long >( + std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + + data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); + + next_rec_.resize( 2 ); // just for safety's sake + next_rec_[ 0 ] = next_rec_[ 1 ] = 0; // start at beginning of buffer +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode& host, long step ) +{ + if ( num_vars_ < 1 or step < next_rec_step_ ) + { + return; + } + + const size_t wt = kernel().event_delivery_manager.write_toggle(); + + assert( wt < next_rec_.size() ); + assert( wt < data_.size() ); + + // The following assertion may fire if the multimeter connected to + // this logger is frozen. In that case, handle() is not called and + // next_rec_[wt] never reset. The assert() prevents error propagation. + // This is not an exception, since I consider the chance of users + // freezing multimeters very slim. + // See #464 for details. + assert( next_rec_[ wt ] < data_[ wt ].size() ); + + DataLoggingReply::Item& dest = data_[ wt ][ next_rec_[ wt ] ]; + + // set time stamp: step is left end of update interval, so add 1 + dest.timestamp = Time::step( step + 1 ); + + // obtain data through access functions, calling via pointer-to-member + for ( size_t j = 0; j < num_vars_; ++j ) + { + dest.data[ j ] = ( ( host ).*( node_access_[ j ] ) )(); + } + + next_rec_step_ += rec_int_steps_; + + // We just increment. Construction ensures that we cannot overflow, + // and read-out resets. + // Overflow is possible if the multimeter is frozen, see #464. + // In that case, the assertion above will trigger. + ++next_rec_[ wt ]; +} + +template < typename HostNode > +void +UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const DataLoggingRequest& request ) +{ + if ( num_vars_ < 1 ) + { + // nothing to do + return; + } + + // The following assertions will fire if the user forgot to call init() + // on the data logger. + assert( next_rec_.size() == 2 ); + assert( data_.size() == 2 ); + + // get read toggle and start and end of slice + const size_t rt = kernel().event_delivery_manager.read_toggle(); + assert( not data_[ rt ].empty() ); + + // Check if we have valid data, i.e., data with time stamps within the + // past time slice. This may not be the case if the node has been frozen. + // In that case, we still reset the recording marker, to prepare for the next round. + if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) + { + next_rec_[ rt ] = 0; + return; + } + + // If recording interval and min_delay are not commensurable, + // the last entry of data_ will not contain useful data for every + // other slice. We mark this by time stamp -infinity. + // Applying this mark here is less work than initializing all time stamps + // to -infinity after each call to this function. + if ( next_rec_[ rt ] < data_[ rt ].size() ) + { + data_[ rt ][ next_rec_[ rt ] ].timestamp = Time::neg_inf(); + } + + // now create reply event and rigg it + DataLoggingReply reply( data_[ rt ] ); + + // "clear" data + next_rec_[ rt ] = 0; + + reply.set_sender( host ); + reply.set_sender_node_id( host.get_node_id() ); + reply.set_receiver( request.get_sender() ); + reply.set_port( request.get_port() ); + + // send it off + kernel().event_delivery_manager.send_to_node( reply ); +} + } // namespace nest #endif /* #ifndef UNIVERSAL_DATA_LOGGER_H */ diff --git a/nestkernel/universal_data_logger_impl.h b/nestkernel/universal_data_logger_impl.h deleted file mode 100644 index 8b22da5872..0000000000 --- a/nestkernel/universal_data_logger_impl.h +++ /dev/null @@ -1,436 +0,0 @@ -/* - * universal_data_logger_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef UNIVERSAL_DATA_LOGGER_IMPL_H -#define UNIVERSAL_DATA_LOGGER_IMPL_H - -#include "universal_data_logger.h" - -// Includes from nestkernel: -#include "event_delivery_manager_impl.h" -#include "kernel_manager.h" -#include "nest_time.h" -#include "node.h" - -template < typename HostNode > -nest::DynamicUniversalDataLogger< HostNode >::DynamicUniversalDataLogger( HostNode& host ) - : host_( host ) - , data_loggers_() -{ -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::reset() -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->reset(); - } -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::init() -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->init(); - } -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::record_data( long step ) -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->record_data( host_, step ); - } -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::handle( const DataLoggingRequest& dlr ) -{ - const size_t rport = dlr.get_rport(); - assert( rport >= 1 ); - assert( static_cast< size_t >( rport ) <= data_loggers_.size() ); - data_loggers_[ rport - 1 ].handle( host_, dlr ); -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::reset() -{ - data_.clear(); - next_rec_step_ = -1; // flag as uninitialized -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::init() -{ - if ( num_vars_ < 1 ) - { - return; - } // not recording anything - - // Next recording step is in current slice or beyond, indicates that - // buffer is properly initialized. - if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) - { - return; - } - - // If we get here, the buffer has either never been initialized or has been dormant - // during a period when the host node was frozen. We then (re-)initialize. - data_.clear(); - - // store recording time in steps - rec_int_steps_ = recording_interval_.get_steps(); - - // set next recording step to first multiple of rec_int_steps_ - // beyond current time, shifted one to left, since rec_step marks - // left of update intervals, and we want time stamps at right end of - // update interval to be multiples of recording interval. Need to add - // +1 because the division result is rounded down. - next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; - - // If offset is not 0, adjust next recording step to account for it by first setting next recording - // step to be offset and then iterating until the variable is greater than current simulation time. - if ( recording_offset_.get_steps() != 0 ) - { - next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) - { - next_rec_step_ += rec_int_steps_; - } - } - - // number of data points per slice - const long recs_per_slice = static_cast< long >( - std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); - - data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); - - next_rec_.resize( 2 ); // just for safety's sake - next_rec_[ 0 ] = next_rec_[ 1 ] = 0; // start at beginning of buffer -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode&, long step ) -{ - if ( num_vars_ < 1 or step < next_rec_step_ ) - { - return; - } - - const size_t wt = kernel().event_delivery_manager.write_toggle(); - - assert( wt < next_rec_.size() ); - assert( wt < data_.size() ); - - // The following assertion may fire if the multimeter connected to - // this logger is frozen. In that case, handle() is not called and - // next_rec_[wt] never reset. The assert() prevents error propagation. - // This is not an exception, since I consider the chance of users - // freezing multimeters very slim. - // See #464 for details. - assert( next_rec_[ wt ] < data_[ wt ].size() ); - - DataLoggingReply::Item& dest = data_[ wt ][ next_rec_[ wt ] ]; - - // set time stamp: step is left end of update interval, so add 1 - dest.timestamp = Time::step( step + 1 ); - - // obtain data through access functions, calling via pointer-to-member - for ( size_t j = 0; j < num_vars_; ++j ) - { - dest.data[ j ] = ( *( node_access_[ j ] ) )(); - } - - next_rec_step_ += rec_int_steps_; - - // We just increment. Construction ensures that we cannot overflow, - // and read-out resets. - // Overflow is possible if the multimeter is frozen, see #464. - // In that case, the assertion above will trigger. - ++next_rec_[ wt ]; -} - -template < typename HostNode > -void -nest::DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const DataLoggingRequest& request ) -{ - if ( num_vars_ < 1 ) - { - return; - } // nothing to do - - // The following assertions will fire if the user forgot to call init() - // on the data logger. - assert( next_rec_.size() == 2 ); - assert( data_.size() == 2 ); - - // get read toggle and start and end of slice - const size_t rt = kernel().event_delivery_manager.read_toggle(); - assert( not data_[ rt ].empty() ); - - // Check if we have valid data, i.e., data with time stamps within the - // past time slice. This may not be the case if the node has been frozen. - // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) - { - next_rec_[ rt ] = 0; - return; - } - - // If recording interval and min_delay are not commensurable, - // the last entry of data_ will not contain useful data for every - // other slice. We mark this by time stamp -infinity. - // Applying this mark here is less work than initializing all time stamps - // to -infinity after each call to this function. - if ( next_rec_[ rt ] < data_[ rt ].size() ) - { - data_[ rt ][ next_rec_[ rt ] ].timestamp = Time::neg_inf(); - } - - // now create reply event and rigg it - DataLoggingReply reply( data_[ rt ] ); - - // "clear" data - next_rec_[ rt ] = 0; - - reply.set_sender( host ); - reply.set_sender_node_id( host.get_node_id() ); - reply.set_receiver( request.get_sender() ); - reply.set_port( request.get_port() ); - - // send it off - kernel().event_delivery_manager.send_to_node( reply ); -} - -template < typename HostNode > -nest::UniversalDataLogger< HostNode >::UniversalDataLogger( HostNode& host ) - : host_( host ) - , data_loggers_() -{ -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::reset() -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->reset(); - } -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::init() -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->init(); - } -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::record_data( long step ) -{ - for ( DLiter_ it = data_loggers_.begin(); it != data_loggers_.end(); ++it ) - { - it->record_data( host_, step ); - } -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::handle( const DataLoggingRequest& dlr ) -{ - const size_t rport = dlr.get_rport(); - assert( rport >= 1 ); - assert( static_cast< size_t >( rport ) <= data_loggers_.size() ); - data_loggers_[ rport - 1 ].handle( host_, dlr ); -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::DataLogger_::reset() -{ - data_.clear(); - next_rec_step_ = -1; // flag as uninitialized -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::DataLogger_::init() -{ - if ( num_vars_ < 1 ) - { - // not recording anything - return; - } - - // Next recording step is in current slice or beyond, indicates that - // buffer is properly initialized. - if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) - { - return; - } - - // If we get here, the buffer has either never been initialized or has - // been dormant during a period when the host node was frozen. We then (re-)initialize. - data_.clear(); - - // store recording time in steps - rec_int_steps_ = recording_interval_.get_steps(); - - // set next recording step to first multiple of rec_int_steps_ - // beyond current time, shifted one to left, since rec_step marks - // left of update intervals, and we want time stamps at right end of - // update interval to be multiples of recording interval. Need to add - // +1 because the division result is rounded down. - next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; - - // If offset is not 0, adjust next recording step to account for it by first setting next recording - // step to be offset and then iterating until the variable is greater than current simulation time. - if ( recording_offset_.get_steps() != 0 ) - { - next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) - { - next_rec_step_ += rec_int_steps_; - } - } - - // number of data points per slice - const long recs_per_slice = static_cast< long >( - std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); - - data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); - - next_rec_.resize( 2 ); // just for safety's sake - next_rec_[ 0 ] = next_rec_[ 1 ] = 0; // start at beginning of buffer -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode& host, long step ) -{ - if ( num_vars_ < 1 or step < next_rec_step_ ) - { - return; - } - - const size_t wt = kernel().event_delivery_manager.write_toggle(); - - assert( wt < next_rec_.size() ); - assert( wt < data_.size() ); - - // The following assertion may fire if the multimeter connected to - // this logger is frozen. In that case, handle() is not called and - // next_rec_[wt] never reset. The assert() prevents error propagation. - // This is not an exception, since I consider the chance of users - // freezing multimeters very slim. - // See #464 for details. - assert( next_rec_[ wt ] < data_[ wt ].size() ); - - DataLoggingReply::Item& dest = data_[ wt ][ next_rec_[ wt ] ]; - - // set time stamp: step is left end of update interval, so add 1 - dest.timestamp = Time::step( step + 1 ); - - // obtain data through access functions, calling via pointer-to-member - for ( size_t j = 0; j < num_vars_; ++j ) - { - dest.data[ j ] = ( ( host ).*( node_access_[ j ] ) )(); - } - - next_rec_step_ += rec_int_steps_; - - // We just increment. Construction ensures that we cannot overflow, - // and read-out resets. - // Overflow is possible if the multimeter is frozen, see #464. - // In that case, the assertion above will trigger. - ++next_rec_[ wt ]; -} - -template < typename HostNode > -void -nest::UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const DataLoggingRequest& request ) -{ - if ( num_vars_ < 1 ) - { - // nothing to do - return; - } - - // The following assertions will fire if the user forgot to call init() - // on the data logger. - assert( next_rec_.size() == 2 ); - assert( data_.size() == 2 ); - - // get read toggle and start and end of slice - const size_t rt = kernel().event_delivery_manager.read_toggle(); - assert( not data_[ rt ].empty() ); - - // Check if we have valid data, i.e., data with time stamps within the - // past time slice. This may not be the case if the node has been frozen. - // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) - { - next_rec_[ rt ] = 0; - return; - } - - // If recording interval and min_delay are not commensurable, - // the last entry of data_ will not contain useful data for every - // other slice. We mark this by time stamp -infinity. - // Applying this mark here is less work than initializing all time stamps - // to -infinity after each call to this function. - if ( next_rec_[ rt ] < data_[ rt ].size() ) - { - data_[ rt ][ next_rec_[ rt ] ].timestamp = Time::neg_inf(); - } - - // now create reply event and rigg it - DataLoggingReply reply( data_[ rt ] ); - - // "clear" data - next_rec_[ rt ] = 0; - - reply.set_sender( host ); - reply.set_sender_node_id( host.get_node_id() ); - reply.set_receiver( request.get_sender() ); - reply.set_port( request.get_port() ); - - // send it off - kernel().event_delivery_manager.send_to_node( reply ); -} - -#endif /* #ifndef UNIVERSAL_DATA_LOGGER_IMPL_H */ diff --git a/nestkernel/urbanczik_archiving_node.h b/nestkernel/urbanczik_archiving_node.h index a53e0f4ab7..40e6f418d1 100644 --- a/nestkernel/urbanczik_archiving_node.h +++ b/nestkernel/urbanczik_archiving_node.h @@ -143,6 +143,102 @@ UrbanczikArchivingNode< urbanczik_parameters >::get_tau_syn_in( int comp ) return urbanczik_params->tau_syn_in[ comp ]; } +template < class urbanczik_parameters > +UrbanczikArchivingNode< urbanczik_parameters >::UrbanczikArchivingNode() + : ArchivingNode() +{ +} + +template < class urbanczik_parameters > +UrbanczikArchivingNode< urbanczik_parameters >::UrbanczikArchivingNode( const UrbanczikArchivingNode& n ) + : ArchivingNode( n ) +{ +} + +template < class urbanczik_parameters > +void +UrbanczikArchivingNode< urbanczik_parameters >::get_status( DictionaryDatum& d ) const +{ + ArchivingNode::get_status( d ); +} + +template < class urbanczik_parameters > +void +UrbanczikArchivingNode< urbanczik_parameters >::set_status( const DictionaryDatum& d ) +{ + ArchivingNode::set_status( d ); +} + +template < class urbanczik_parameters > +void +UrbanczikArchivingNode< urbanczik_parameters >::get_urbanczik_history( double t1, + double t2, + std::deque< histentry_extended >::iterator* start, + std::deque< histentry_extended >::iterator* finish, + int comp ) +{ + *finish = urbanczik_history_[ comp - 1 ].end(); + if ( urbanczik_history_[ comp - 1 ].empty() ) + { + *start = *finish; + return; + } + else + { + std::deque< histentry_extended >::iterator runner = urbanczik_history_[ comp - 1 ].begin(); + // To have a well defined discretization of the integral, we make sure + // that we exclude the entry at t1 but include the one at t2 by subtracting + // a small number so that runner->t_ is never equal to t1 or t2. + while ( ( runner != urbanczik_history_[ comp - 1 ].end() ) and runner->t_ - 1.0e-6 < t1 ) + { + ++runner; + } + *start = runner; + while ( ( runner != urbanczik_history_[ comp - 1 ].end() ) and runner->t_ - 1.0e-6 < t2 ) + { + ( runner->access_counter_ )++; + ++runner; + } + *finish = runner; + } +} + +template < class urbanczik_parameters > +void +UrbanczikArchivingNode< urbanczik_parameters >::write_urbanczik_history( Time const& t_sp, + double V_W, + int n_spikes, + int comp ) +{ + const double t_ms = t_sp.get_ms(); + + const double g_D = urbanczik_params->g_conn[ urbanczik_parameters::SOMA ]; + const double g_L = urbanczik_params->g_L[ urbanczik_parameters::SOMA ]; + const double E_L = urbanczik_params->E_L[ urbanczik_parameters::SOMA ]; + const double V_W_star = ( ( E_L * g_L + V_W * g_D ) / ( g_D + g_L ) ); + + if ( n_incoming_ ) + { + // prune all entries from history which are no longer needed + // except the penultimate one. we might still need it. + while ( urbanczik_history_[ comp - 1 ].size() > 1 ) + { + if ( urbanczik_history_[ comp - 1 ].front().access_counter_ >= n_incoming_ ) + { + urbanczik_history_[ comp - 1 ].pop_front(); + } + else + { + break; + } + } + + double dPI = ( n_spikes - urbanczik_params->phi( V_W_star ) * Time::get_resolution().get_ms() ) + * urbanczik_params->h( V_W_star ); + urbanczik_history_[ comp - 1 ].push_back( histentry_extended( t_ms, dPI, 0 ) ); + } +} + } // namespace nest #endif /* #ifndef URBANCZIK_ARCHIVING_NODE_H */ diff --git a/nestkernel/urbanczik_archiving_node_impl.h b/nestkernel/urbanczik_archiving_node_impl.h deleted file mode 100644 index 62c105fff2..0000000000 --- a/nestkernel/urbanczik_archiving_node_impl.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * urbanczik_archiving_node_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#include "urbanczik_archiving_node.h" - -// Includes from nestkernel: -#include "kernel_manager.h" - -// Includes from sli: -#include "dictutils.h" - -namespace nest -{ - -// member functions for UrbanczikArchivingNode -template < class urbanczik_parameters > -nest::UrbanczikArchivingNode< urbanczik_parameters >::UrbanczikArchivingNode() - : ArchivingNode() -{ -} - -template < class urbanczik_parameters > -nest::UrbanczikArchivingNode< urbanczik_parameters >::UrbanczikArchivingNode( const UrbanczikArchivingNode& n ) - : ArchivingNode( n ) -{ -} - -template < class urbanczik_parameters > -void -nest::UrbanczikArchivingNode< urbanczik_parameters >::get_status( DictionaryDatum& d ) const -{ - ArchivingNode::get_status( d ); -} - -template < class urbanczik_parameters > -void -nest::UrbanczikArchivingNode< urbanczik_parameters >::set_status( const DictionaryDatum& d ) -{ - ArchivingNode::set_status( d ); -} - -template < class urbanczik_parameters > -void -nest::UrbanczikArchivingNode< urbanczik_parameters >::get_urbanczik_history( double t1, - double t2, - std::deque< histentry_extended >::iterator* start, - std::deque< histentry_extended >::iterator* finish, - int comp ) -{ - *finish = urbanczik_history_[ comp - 1 ].end(); - if ( urbanczik_history_[ comp - 1 ].empty() ) - { - *start = *finish; - return; - } - else - { - std::deque< histentry_extended >::iterator runner = urbanczik_history_[ comp - 1 ].begin(); - // To have a well defined discretization of the integral, we make sure - // that we exclude the entry at t1 but include the one at t2 by subtracting - // a small number so that runner->t_ is never equal to t1 or t2. - while ( ( runner != urbanczik_history_[ comp - 1 ].end() ) and runner->t_ - 1.0e-6 < t1 ) - { - ++runner; - } - *start = runner; - while ( ( runner != urbanczik_history_[ comp - 1 ].end() ) and runner->t_ - 1.0e-6 < t2 ) - { - ( runner->access_counter_ )++; - ++runner; - } - *finish = runner; - } -} - -template < class urbanczik_parameters > -void -nest::UrbanczikArchivingNode< urbanczik_parameters >::write_urbanczik_history( Time const& t_sp, - double V_W, - int n_spikes, - int comp ) -{ - const double t_ms = t_sp.get_ms(); - - const double g_D = urbanczik_params->g_conn[ urbanczik_parameters::SOMA ]; - const double g_L = urbanczik_params->g_L[ urbanczik_parameters::SOMA ]; - const double E_L = urbanczik_params->E_L[ urbanczik_parameters::SOMA ]; - const double V_W_star = ( ( E_L * g_L + V_W * g_D ) / ( g_D + g_L ) ); - - if ( n_incoming_ ) - { - // prune all entries from history which are no longer needed - // except the penultimate one. we might still need it. - while ( urbanczik_history_[ comp - 1 ].size() > 1 ) - { - if ( urbanczik_history_[ comp - 1 ].front().access_counter_ >= n_incoming_ ) - { - urbanczik_history_[ comp - 1 ].pop_front(); - } - else - { - break; - } - } - - double dPI = ( n_spikes - urbanczik_params->phi( V_W_star ) * Time::get_resolution().get_ms() ) - * urbanczik_params->h( V_W_star ); - urbanczik_history_[ comp - 1 ].push_back( histentry_extended( t_ms, dPI, 0 ) ); - } -} - -} // of namespace nest diff --git a/nestkernel/vp_manager.cpp b/nestkernel/vp_manager.cpp index 6ebf211e34..e9f3b21770 100644 --- a/nestkernel/vp_manager.cpp +++ b/nestkernel/vp_manager.cpp @@ -29,10 +29,13 @@ #include "logging.h" // Includes from nestkernel: +#include "connection_manager.h" #include "kernel_manager.h" +#include "logging_manager.h" +#include "model_manager.h" #include "mpi_manager.h" -#include "mpi_manager_impl.h" -#include "vp_manager_impl.h" +#include "node_manager.h" +#include "simulation_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/vp_manager.h b/nestkernel/vp_manager.h index c0acc819a0..597560f721 100644 --- a/nestkernel/vp_manager.h +++ b/nestkernel/vp_manager.h @@ -26,12 +26,11 @@ // Includes from libnestutil: #include "manager_interface.h" -// Includes from nestkernel: -#include "nest_types.h" - // Includes from sli: #include "dictdatum.h" +#include "mpi_manager.h" + #ifdef _OPENMP // C includes: #include @@ -90,6 +89,11 @@ class VPManager : public ManagerInterface */ size_t get_OMP_NUM_THREADS() const; + /* + * Return the process id of the node with the specified node ID. + */ + size_t get_process_id_of_node_id( const size_t node_id ) const; + /** * Returns true if the given global node exists on this vp. */ @@ -173,10 +177,9 @@ class VPManager : public ManagerInterface const bool force_singlethreading_; size_t n_threads_; //!< Number of threads per process. }; -} inline size_t -nest::VPManager::get_thread_id() const +VPManager::get_thread_id() const { #ifdef _OPENMP return omp_get_thread_num(); @@ -186,13 +189,13 @@ nest::VPManager::get_thread_id() const } inline size_t -nest::VPManager::get_num_threads() const +VPManager::get_num_threads() const { return n_threads_; } inline void -nest::VPManager::assert_single_threaded() const +VPManager::assert_single_threaded() const { #ifdef _OPENMP assert( omp_get_num_threads() == 1 ); @@ -200,7 +203,7 @@ nest::VPManager::assert_single_threaded() const } inline void -nest::VPManager::assert_thread_parallel() const +VPManager::assert_thread_parallel() const { #ifdef _OPENMP // omp_get_num_threads() returns int @@ -208,5 +211,119 @@ nest::VPManager::assert_thread_parallel() const #endif } +inline size_t +VPManager::get_vp() const +{ + return kernel().mpi_manager.get_rank() + get_thread_id() * kernel().mpi_manager.get_num_processes(); +} + +inline size_t +VPManager::node_id_to_vp( const size_t node_id ) const +{ + return node_id % get_num_virtual_processes(); +} + +inline size_t +VPManager::vp_to_thread( const size_t vp ) const +{ + return vp / kernel().mpi_manager.get_num_processes(); +} + +inline size_t +VPManager::get_num_virtual_processes() const +{ + return get_num_threads() * kernel().mpi_manager.get_num_processes(); +} + +inline bool +VPManager::is_local_vp( const size_t vp ) const +{ + return kernel().mpi_manager.get_process_id_of_vp( vp ) == kernel().mpi_manager.get_rank(); +} + +inline size_t +VPManager::thread_to_vp( const size_t tid ) const +{ + return tid * kernel().mpi_manager.get_num_processes() + kernel().mpi_manager.get_rank(); +} + +inline bool +VPManager::is_node_id_vp_local( const size_t node_id ) const +{ + return ( node_id % get_num_virtual_processes() == static_cast< size_t >( get_vp() ) ); +} + +inline size_t +VPManager::node_id_to_lid( const size_t node_id ) const +{ + // starts at lid 0 for node_ids >= 1 (expected value for neurons, excl. node ID 0) + return std::ceil( static_cast< double >( node_id ) / get_num_virtual_processes() ) - 1; +} + +inline size_t +VPManager::lid_to_node_id( const size_t lid ) const +{ + const size_t vp = get_vp(); + return ( lid + static_cast< size_t >( vp == 0 ) ) * get_num_virtual_processes() + vp; +} + +inline size_t +VPManager::get_num_assigned_ranks_per_thread() const +{ + return std::ceil( static_cast< double >( kernel().mpi_manager.get_num_processes() ) / n_threads_ ); +} + +inline size_t +VPManager::get_start_rank_per_thread( const size_t tid ) const +{ + return tid * get_num_assigned_ranks_per_thread(); +} + +inline size_t +VPManager::get_end_rank_per_thread( const size_t rank_start, const size_t num_assigned_ranks_per_thread ) const +{ + size_t rank_end = rank_start + num_assigned_ranks_per_thread; + + // if we have more threads than ranks, or if ranks can not be + // distributed evenly on threads, we need to make sure, that all + // threads care only about existing ranks + if ( rank_end > kernel().mpi_manager.get_num_processes() ) + { + rank_end = std::max( rank_start, kernel().mpi_manager.get_num_processes() ); + } + + return rank_end; +} + +inline AssignedRanks +VPManager::get_assigned_ranks( const size_t tid ) +{ + AssignedRanks assigned_ranks; + assigned_ranks.begin = get_start_rank_per_thread( tid ); + assigned_ranks.max_size = get_num_assigned_ranks_per_thread(); + assigned_ranks.end = get_end_rank_per_thread( assigned_ranks.begin, assigned_ranks.max_size ); + assigned_ranks.size = assigned_ranks.end - assigned_ranks.begin; + return assigned_ranks; +} + +#ifdef HAVE_MPI + +inline size_t +nest::VPManager::get_process_id_of_node_id( const size_t node_id ) const +{ + return node_id % get_num_virtual_processes() % kernel().mpi_manager.get_num_processes(); +} + +#else // HAVE_MPI + +inline size_t +nest::VPManager::get_process_id_of_node_id( const size_t ) const +{ + return 0; +} + +#endif /* HAVE_MPI */ + +} // namespace nest #endif /* #ifndef VP_MANAGER_H */ diff --git a/nestkernel/vp_manager_impl.h b/nestkernel/vp_manager_impl.h deleted file mode 100644 index e7fa701d3a..0000000000 --- a/nestkernel/vp_manager_impl.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * vp_manager_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - -#ifndef VP_MANAGER_IMPL_H -#define VP_MANAGER_IMPL_H - -#include "vp_manager.h" - -// Includes from nestkernel: -#include "kernel_manager.h" -#include "mpi_manager.h" -#include "mpi_manager_impl.h" - -namespace nest -{ - -inline size_t -VPManager::get_vp() const -{ - return kernel().mpi_manager.get_rank() + get_thread_id() * kernel().mpi_manager.get_num_processes(); -} - -inline size_t -VPManager::node_id_to_vp( const size_t node_id ) const -{ - return node_id % get_num_virtual_processes(); -} - -inline size_t -VPManager::vp_to_thread( const size_t vp ) const -{ - return vp / kernel().mpi_manager.get_num_processes(); -} - -inline size_t -VPManager::get_num_virtual_processes() const -{ - return get_num_threads() * kernel().mpi_manager.get_num_processes(); -} - -inline bool -VPManager::is_local_vp( const size_t vp ) const -{ - return kernel().mpi_manager.get_process_id_of_vp( vp ) == kernel().mpi_manager.get_rank(); -} - -inline size_t -VPManager::thread_to_vp( const size_t tid ) const -{ - return tid * kernel().mpi_manager.get_num_processes() + kernel().mpi_manager.get_rank(); -} - -inline bool -VPManager::is_node_id_vp_local( const size_t node_id ) const -{ - return ( node_id % get_num_virtual_processes() == static_cast< size_t >( get_vp() ) ); -} - -inline size_t -VPManager::node_id_to_lid( const size_t node_id ) const -{ - // starts at lid 0 for node_ids >= 1 (expected value for neurons, excl. node ID 0) - return std::ceil( static_cast< double >( node_id ) / get_num_virtual_processes() ) - 1; -} - -inline size_t -VPManager::lid_to_node_id( const size_t lid ) const -{ - const size_t vp = get_vp(); - return ( lid + static_cast< size_t >( vp == 0 ) ) * get_num_virtual_processes() + vp; -} - -inline size_t -VPManager::get_num_assigned_ranks_per_thread() const -{ - return std::ceil( static_cast< double >( kernel().mpi_manager.get_num_processes() ) / n_threads_ ); -} - -inline size_t -VPManager::get_start_rank_per_thread( const size_t tid ) const -{ - return tid * get_num_assigned_ranks_per_thread(); -} - -inline size_t -VPManager::get_end_rank_per_thread( const size_t rank_start, const size_t num_assigned_ranks_per_thread ) const -{ - size_t rank_end = rank_start + num_assigned_ranks_per_thread; - - // if we have more threads than ranks, or if ranks can not be - // distributed evenly on threads, we need to make sure, that all - // threads care only about existing ranks - if ( rank_end > kernel().mpi_manager.get_num_processes() ) - { - rank_end = std::max( rank_start, kernel().mpi_manager.get_num_processes() ); - } - - return rank_end; -} - -inline AssignedRanks -VPManager::get_assigned_ranks( const size_t tid ) -{ - AssignedRanks assigned_ranks; - assigned_ranks.begin = get_start_rank_per_thread( tid ); - assigned_ranks.max_size = get_num_assigned_ranks_per_thread(); - assigned_ranks.end = get_end_rank_per_thread( assigned_ranks.begin, assigned_ranks.max_size ); - assigned_ranks.size = assigned_ranks.end - assigned_ranks.begin; - return assigned_ranks; -} - -} // namespace nest - -#endif /* VP_MANAGER_IMPL_H */ From 4d545f5837bdd73ba312d872b5e70957f8e01dae Mon Sep 17 00:00:00 2001 From: Jan Vogelsang <47158055+JanVogelsang@users.noreply.github.com> Date: Sun, 10 Aug 2025 13:18:19 +0200 Subject: [PATCH 02/23] Delete models/quantal_stp_synapse_impl.h --- models/quantal_stp_synapse_impl.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 models/quantal_stp_synapse_impl.h diff --git a/models/quantal_stp_synapse_impl.h b/models/quantal_stp_synapse_impl.h deleted file mode 100644 index e69de29bb2..0000000000 From 649a448a834fbeedf43463344856bb47dcce0aff Mon Sep 17 00:00:00 2001 From: Jan Vogelsang <47158055+JanVogelsang@users.noreply.github.com> Date: Sun, 10 Aug 2025 13:44:35 +0200 Subject: [PATCH 03/23] Made previously non-inlinable functions inline --- models/glif_cond.cpp | 8 ------ models/glif_cond.h | 6 ++++ models/glif_psc_double_alpha.cpp | 8 ------ models/glif_psc_double_alpha.h | 6 ++++ models/iaf_bw_2001.cpp | 36 ----------------------- models/iaf_bw_2001.h | 35 +++++++++++++++++++++++ models/iaf_bw_2001_exact.cpp | 49 -------------------------------- models/iaf_bw_2001_exact.h | 48 +++++++++++++++++++++++++++++++ 8 files changed, 95 insertions(+), 101 deletions(-) diff --git a/models/glif_cond.cpp b/models/glif_cond.cpp index 3592cc281e..c6ad3f0895 100644 --- a/models/glif_cond.cpp +++ b/models/glif_cond.cpp @@ -800,12 +800,4 @@ nest::glif_cond::handle( CurrentEvent& e ) e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); } -// TODO JV -// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. -void -nest::glif_cond::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); // the logger does this for us -} - #endif // HAVE_GSL diff --git a/models/glif_cond.h b/models/glif_cond.h index acfbffef2b..3a2a7e1eaf 100644 --- a/models/glif_cond.h +++ b/models/glif_cond.h @@ -527,6 +527,12 @@ glif_cond::set_status( const DictionaryDatum& d ) S_ = stmp; } +inline void +nest::glif_cond::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); // the logger does this for us +} + } // namespace nest #endif // HAVE_GSL diff --git a/models/glif_psc_double_alpha.cpp b/models/glif_psc_double_alpha.cpp index c7168ae582..abf6ff3a6d 100644 --- a/models/glif_psc_double_alpha.cpp +++ b/models/glif_psc_double_alpha.cpp @@ -695,11 +695,3 @@ nest::glif_psc_double_alpha::handle( CurrentEvent& e ) B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); } - -// TODO JV -// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. -void -nest::glif_psc_double_alpha::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); // the logger does this for us -} diff --git a/models/glif_psc_double_alpha.h b/models/glif_psc_double_alpha.h index 69a51a98b0..79934dd8a9 100644 --- a/models/glif_psc_double_alpha.h +++ b/models/glif_psc_double_alpha.h @@ -493,6 +493,12 @@ glif_psc_double_alpha::set_status( const DictionaryDatum& d ) S_ = stmp; } +inline void +glif_psc_double_alpha::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); // the logger does this for us +} + } // namespace nest #endif diff --git a/models/iaf_bw_2001.cpp b/models/iaf_bw_2001.cpp index 34c4ce0974..9c57db6753 100644 --- a/models/iaf_bw_2001.cpp +++ b/models/iaf_bw_2001.cpp @@ -484,41 +484,5 @@ nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) } } -// TODO JV -// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. -void -nest::iaf_bw_2001::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); -} - -void -nest::iaf_bw_2001::handle( SpikeEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - - const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); - - const auto rport = e.get_rport(); - - if ( rport < NMDA ) - { - B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() ); - } - else - { - B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() * e.get_offset() ); - } -} - -void -nest::iaf_bw_2001::handle( CurrentEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); -} - #endif // HAVE_BOOST #endif // HAVE_GSL diff --git a/models/iaf_bw_2001.h b/models/iaf_bw_2001.h index 3efbace816..450b044c07 100644 --- a/models/iaf_bw_2001.h +++ b/models/iaf_bw_2001.h @@ -523,6 +523,41 @@ iaf_bw_2001::set_status( const DictionaryDatum& d ) P_ = ptmp; S_ = stmp; }; + +inline void +iaf_bw_2001::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); +} + +inline void +iaf_bw_2001::handle( SpikeEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + + const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); + + const auto rport = e.get_rport(); + + if ( rport < NMDA ) + { + B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() ); + } + else + { + B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() * e.get_offset() ); + } +} + +inline void +iaf_bw_2001::handle( CurrentEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); +} + } // namespace #endif // HAVE_BOOST diff --git a/models/iaf_bw_2001_exact.cpp b/models/iaf_bw_2001_exact.cpp index 537a21e325..64b67e96db 100644 --- a/models/iaf_bw_2001_exact.cpp +++ b/models/iaf_bw_2001_exact.cpp @@ -515,53 +515,4 @@ nest::iaf_bw_2001_exact::update( Time const& origin, const long from, const long } } -// TODO JV -// Do not move this function as inline to h-file. It depends on universal_data_logger.h being included here. -void -nest::iaf_bw_2001_exact::handle( DataLoggingRequest& e ) -{ - B_.logger_.handle( e ); -} - -void -nest::iaf_bw_2001_exact::handle( SpikeEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - assert( e.get_rport() <= static_cast< int >( B_.spikes_.size() ) ); - - const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); - const auto rport = e.get_rport(); - - if ( rport < NMDA ) - { - B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() ); - } - else - // we need to scale each individual S_j variable by its weight, - // so we store them - { - B_.spikes_[ rport - 1 ].add_value( steps, e.get_multiplicity() ); - // since we scale entire S_j variable by the weight it also affects previous spikes. - // we therefore require them to be constant. - const size_t w_idx = rport - NMDA; - if ( B_.weights_[ w_idx ] == 0 ) - { - B_.weights_[ w_idx ] = e.get_weight(); - } - else if ( B_.weights_[ w_idx ] != e.get_weight() ) - { - throw KernelException( "iaf_bw_2001_exact requires constant weights." ); - } - } -} - -void -nest::iaf_bw_2001_exact::handle( CurrentEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); -} - #endif // HAVE_GSL diff --git a/models/iaf_bw_2001_exact.h b/models/iaf_bw_2001_exact.h index cf650afefe..ff933e19a3 100644 --- a/models/iaf_bw_2001_exact.h +++ b/models/iaf_bw_2001_exact.h @@ -540,6 +540,54 @@ iaf_bw_2001_exact::set_status( const DictionaryDatum& d ) P_ = ptmp; S_ = stmp; }; + +inline void +nest::iaf_bw_2001_exact::handle( DataLoggingRequest& e ) +{ + B_.logger_.handle( e ); +} + +inline void +nest::iaf_bw_2001_exact::handle( SpikeEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + assert( e.get_rport() <= static_cast< int >( B_.spikes_.size() ) ); + + const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); + const auto rport = e.get_rport(); + + if ( rport < NMDA ) + { + B_.spikes_[ rport - 1 ].add_value( steps, e.get_weight() * e.get_multiplicity() ); + } + else + // we need to scale each individual S_j variable by its weight, + // so we store them + { + B_.spikes_[ rport - 1 ].add_value( steps, e.get_multiplicity() ); + // since we scale entire S_j variable by the weight it also affects previous spikes. + // we therefore require them to be constant. + const size_t w_idx = rport - NMDA; + if ( B_.weights_[ w_idx ] == 0 ) + { + B_.weights_[ w_idx ] = e.get_weight(); + } + else if ( B_.weights_[ w_idx ] != e.get_weight() ) + { + throw KernelException( "iaf_bw_2001_exact requires constant weights." ); + } + } +} + +inline void +nest::iaf_bw_2001_exact::handle( CurrentEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); +} + } // namespace #endif // HAVE_GSL From 390064ef4b07f53281e430e9bed4186ff561ab00 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Mon, 11 Aug 2025 14:51:35 +0200 Subject: [PATCH 04/23] Added static modifier --- nestkernel/connector_base.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nestkernel/connector_base.h b/nestkernel/connector_base.h index 55bf7398f3..32a00058e7 100644 --- a/nestkernel/connector_base.h +++ b/nestkernel/connector_base.h @@ -221,7 +221,7 @@ class ConnectorBase virtual void remove_disabled_connections( const size_t first_disabled_index ) = 0; protected: - void prepare_weight_recorder_event( WeightRecorderEvent& wr_e, + static void prepare_weight_recorder_event( WeightRecorderEvent& wr_e, const size_t tid, const synindex syn_id, const unsigned int lcid, From 1eded20645bec9680a7cc98e732403713969fdb6 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Mon, 11 Aug 2025 16:10:32 +0200 Subject: [PATCH 05/23] Simplified get_process_id_of_node_id implementation --- nestkernel/connection_manager.cpp | 4 ++-- nestkernel/mpi_manager.h | 22 ++++++++++++++++++++++ nestkernel/source_table.cpp | 6 +++--- nestkernel/vp_manager.h | 23 ----------------------- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/nestkernel/connection_manager.cpp b/nestkernel/connection_manager.cpp index c6e4cf954e..aa08aac7bd 100644 --- a/nestkernel/connection_manager.cpp +++ b/nestkernel/connection_manager.cpp @@ -1531,7 +1531,7 @@ ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const siz { const size_t source_node_id = source_table_.get_node_id( tid, syn_id, lcid ); const size_t sg_s_id = source_table_.pack_source_node_id_and_syn_id( source_node_id, syn_id ); - const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( source_node_id ); + const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_node_id ); positions[ lcid ] = buffer_pos_of_source_node_id_syn_id_[ sg_s_id ] + kernel().mpi_manager.get_recv_displacement_secondary_events_in_int( source_rank ); @@ -1836,7 +1836,7 @@ ConnectionManager::fill_target_buffer( const size_t tid, while ( source_2_idx != csd_maps.at( syn_id ).end() ) { const auto source_gid = source_2_idx->first; - const auto source_rank = kernel().vp_manager.get_process_id_of_node_id( source_gid ); + const auto source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_gid ); if ( not( rank_start <= source_rank and source_rank < rank_end ) ) { // We are not responsible for this source. diff --git a/nestkernel/mpi_manager.h b/nestkernel/mpi_manager.h index f70bb09623..c64782d834 100644 --- a/nestkernel/mpi_manager.h +++ b/nestkernel/mpi_manager.h @@ -103,6 +103,11 @@ class MPIManager : public ManagerInterface */ size_t get_process_id_of_vp( const size_t vp ) const; + /* + * Return the process id of the node with the specified node ID. + */ + size_t get_process_id_of_node_id( const size_t node_id ) const; + /** * Finalize MPI communication (needs to be separate from MPIManager::finalize * when compiled with MUSIC since spikes can arrive and handlers called here) @@ -765,6 +770,23 @@ nest::MPIManager::get_process_id_of_vp( const size_t vp ) const #ifdef HAVE_MPI +inline size_t +MPIManager::get_process_id_of_node_id( const size_t node_id ) const +{ + return node_id % num_processes_; +} + +#else + +inline size_t +MPIManager::get_process_id_of_node_id( const size_t ) const +{ + return 0; +} + +#endif /* HAVE_MPI */ + +#ifdef HAVE_MPI // Variable to hold the MPI communicator to use. #ifdef HAVE_MUSIC extern MPI::Intracomm comm; diff --git a/nestkernel/source_table.cpp b/nestkernel/source_table.cpp index f82fca8312..e12349785e 100644 --- a/nestkernel/source_table.cpp +++ b/nestkernel/source_table.cpp @@ -242,7 +242,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t cit != ( *unique_secondary_source_node_id_syn_id ).end(); ++cit ) { - const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( cit->first ); + const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( cit->first ); const size_t event_size = kernel().model_manager.get_secondary_event_prototype( cit->second, tid ).size(); buffer_pos_of_source_node_id_syn_id.insert( @@ -277,7 +277,7 @@ nest::SourceTable::source_should_be_processed_( const size_t rank_start, const size_t rank_end, const Source& source ) const { - const size_t source_rank = kernel().vp_manager.get_process_id_of_node_id( source.get_node_id() ); + const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source.get_node_id() ); return not( source.is_processed() or source.is_disabled() @@ -409,7 +409,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // communicated via MPI, so we prepare to return the relevant data // set the source rank - source_rank = kernel().vp_manager.get_process_id_of_node_id( current_source.get_node_id() ); + source_rank = kernel().mpi_manager.get_process_id_of_node_id( current_source.get_node_id() ); if ( not populate_target_data_fields_( current_position, current_source, source_rank, next_target_data ) ) { diff --git a/nestkernel/vp_manager.h b/nestkernel/vp_manager.h index 597560f721..c4227d2596 100644 --- a/nestkernel/vp_manager.h +++ b/nestkernel/vp_manager.h @@ -89,11 +89,6 @@ class VPManager : public ManagerInterface */ size_t get_OMP_NUM_THREADS() const; - /* - * Return the process id of the node with the specified node ID. - */ - size_t get_process_id_of_node_id( const size_t node_id ) const; - /** * Returns true if the given global node exists on this vp. */ @@ -306,24 +301,6 @@ VPManager::get_assigned_ranks( const size_t tid ) return assigned_ranks; } -#ifdef HAVE_MPI - -inline size_t -nest::VPManager::get_process_id_of_node_id( const size_t node_id ) const -{ - return node_id % get_num_virtual_processes() % kernel().mpi_manager.get_num_processes(); -} - -#else // HAVE_MPI - -inline size_t -nest::VPManager::get_process_id_of_node_id( const size_t ) const -{ - return 0; -} - -#endif /* HAVE_MPI */ - } // namespace nest #endif /* #ifndef VP_MANAGER_H */ From 98c62758d18c305507295e46e62c9fe18e85ea5a Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Tue, 12 Aug 2025 18:30:47 +0200 Subject: [PATCH 06/23] Fixed remaining bugs --- models/glif_psc.cpp | 77 ++++++++++--------------------- models/glif_psc.h | 34 +++++++++++++- models/music_cont_in_proxy.cpp | 4 +- models/music_cont_out_proxy.cpp | 5 +- models/music_message_in_proxy.cpp | 5 +- models/music_rate_in_proxy.cpp | 4 +- models/music_rate_out_proxy.cpp | 8 ++-- nestkernel/connector_base_impl.h | 0 nestkernel/model_manager_impl.h | 0 nestkernel/nest_impl.h | 3 -- 10 files changed, 71 insertions(+), 69 deletions(-) delete mode 100644 nestkernel/connector_base_impl.h delete mode 100644 nestkernel/model_manager_impl.h diff --git a/models/glif_psc.cpp b/models/glif_psc.cpp index c0844856c3..4eb7e52ddc 100644 --- a/models/glif_psc.cpp +++ b/models/glif_psc.cpp @@ -38,10 +38,10 @@ using namespace nest; -nest::RecordablesMap< nest::glif_psc > nest::glif_psc::recordablesMap_; - namespace nest { +RecordablesMap< glif_psc > glif_psc::recordablesMap_; + void register_glif_psc( const std::string& name ) { @@ -52,23 +52,22 @@ register_glif_psc( const std::string& name ) // for each quantity to be recorded. template <> void -RecordablesMap< nest::glif_psc >::create() +RecordablesMap< glif_psc >::create() { - insert_( names::V_m, &nest::glif_psc::get_V_m_ ); - insert_( names::ASCurrents_sum, &nest::glif_psc::get_ASCurrents_sum_ ); - insert_( names::I, &nest::glif_psc::get_I_ ); - insert_( names::I_syn, &nest::glif_psc::get_I_syn_ ); - insert_( names::threshold, &nest::glif_psc::get_threshold_ ); - insert_( names::threshold_spike, &nest::glif_psc::get_threshold_spike_ ); - insert_( names::threshold_voltage, &nest::glif_psc::get_threshold_voltage_ ); -} + insert_( names::V_m, &glif_psc::get_V_m_ ); + insert_( names::ASCurrents_sum, &glif_psc::get_ASCurrents_sum_ ); + insert_( names::I, &glif_psc::get_I_ ); + insert_( names::I_syn, &glif_psc::get_I_syn_ ); + insert_( names::threshold, &glif_psc::get_threshold_ ); + insert_( names::threshold_spike, &glif_psc::get_threshold_spike_ ); + insert_( names::threshold_voltage, &glif_psc::get_threshold_voltage_ ); } /* ---------------------------------------------------------------- * Default constructors defining default parameters and state * ---------------------------------------------------------------- */ -nest::glif_psc::Parameters_::Parameters_() +glif_psc::Parameters_::Parameters_() : G_( 9.43 ) // in nS , E_L_( -78.85 ) // in mV , th_inf_( -51.68 - E_L_ ) // in mv, rel to E_L_, - 51.68 - E_L_, i.e., 27.17 @@ -93,7 +92,7 @@ nest::glif_psc::Parameters_::Parameters_() { } -nest::glif_psc::State_::State_( const Parameters_& p ) +glif_psc::State_::State_( const Parameters_& p ) : U_( 0.0 ) // in mV , threshold_( p.th_inf_ ) // in mV , threshold_spike_( 0.0 ) // in mV @@ -117,7 +116,7 @@ nest::glif_psc::State_::State_( const Parameters_& p ) * ---------------------------------------------------------------- */ void -nest::glif_psc::Parameters_::get( DictionaryDatum& d ) const +glif_psc::Parameters_::get( DictionaryDatum& d ) const { def< double >( d, names::V_th, th_inf_ + E_L_ ); def< double >( d, names::g, G_ ); @@ -147,7 +146,7 @@ nest::glif_psc::Parameters_::get( DictionaryDatum& d ) const } double -nest::glif_psc::Parameters_::set( const DictionaryDatum& d, Node* node ) +glif_psc::Parameters_::set( const DictionaryDatum& d, Node* node ) { // if E_L_ is changed, we need to adjust all variables defined relative to // E_L_ @@ -301,7 +300,7 @@ nest::glif_psc::Parameters_::set( const DictionaryDatum& d, Node* node ) } void -nest::glif_psc::State_::get( DictionaryDatum& d, const Parameters_& p ) const +glif_psc::State_::get( DictionaryDatum& d, const Parameters_& p ) const { def< double >( d, names::V_m, U_ + p.E_L_ ); def< std::vector< double > >( d, names::ASCurrents, ASCurrents_ ); @@ -310,7 +309,7 @@ nest::glif_psc::State_::get( DictionaryDatum& d, const Parameters_& p ) const } void -nest::glif_psc::State_::set( const DictionaryDatum& d, const Parameters_& p, double delta_EL, Node* node ) +glif_psc::State_::set( const DictionaryDatum& d, const Parameters_& p, double delta_EL, Node* node ) { if ( updateValueParam< double >( d, names::V_m, U_, node ) ) { @@ -349,12 +348,12 @@ nest::glif_psc::State_::set( const DictionaryDatum& d, const Parameters_& p, dou } } -nest::glif_psc::Buffers_::Buffers_( glif_psc& n ) +glif_psc::Buffers_::Buffers_( glif_psc& n ) : logger_( n ) { } -nest::glif_psc::Buffers_::Buffers_( const Buffers_&, glif_psc& n ) +glif_psc::Buffers_::Buffers_( const Buffers_&, glif_psc& n ) : logger_( n ) { } @@ -363,7 +362,7 @@ nest::glif_psc::Buffers_::Buffers_( const Buffers_&, glif_psc& n ) * Default and copy constructor for node * ---------------------------------------------------------------- */ -nest::glif_psc::glif_psc() +glif_psc::glif_psc() : ArchivingNode() , P_() , S_( P_ ) @@ -372,7 +371,7 @@ nest::glif_psc::glif_psc() recordablesMap_.create(); } -nest::glif_psc::glif_psc( const glif_psc& n ) +glif_psc::glif_psc( const glif_psc& n ) : ArchivingNode( n ) , P_( n.P_ ) , S_( n.S_ ) @@ -385,7 +384,7 @@ nest::glif_psc::glif_psc( const glif_psc& n ) * ---------------------------------------------------------------- */ void -nest::glif_psc::init_buffers_() +glif_psc::init_buffers_() { B_.spikes_.clear(); // includes resize B_.currents_.clear(); // include resize @@ -393,7 +392,7 @@ nest::glif_psc::init_buffers_() } void -nest::glif_psc::pre_run_hook() +glif_psc::pre_run_hook() { B_.logger_.init(); @@ -467,7 +466,7 @@ nest::glif_psc::pre_run_hook() * ---------------------------------------------------------------- */ void -nest::glif_psc::update( Time const& origin, const long from, const long to ) +glif_psc::update( Time const& origin, const long from, const long to ) { double v_old = S_.U_; @@ -596,32 +595,4 @@ nest::glif_psc::update( Time const& origin, const long from, const long to ) } } -size_t -nest::glif_psc::handles_test_event( SpikeEvent&, size_t receptor_type ) -{ - if ( receptor_type <= 0 or receptor_type > P_.n_receptors_() ) - { - throw IncompatibleReceptorType( receptor_type, get_name(), "SpikeEvent" ); - } - - P_.has_connections_ = true; - return receptor_type; -} - -void -nest::glif_psc::handle( SpikeEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - - B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); -} - -void -nest::glif_psc::handle( CurrentEvent& e ) -{ - assert( e.get_delay_steps() > 0 ); - - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); -} +} // namespace nest diff --git a/models/glif_psc.h b/models/glif_psc.h index d07e3a60aa..1215624ac7 100644 --- a/models/glif_psc.h +++ b/models/glif_psc.h @@ -461,12 +461,42 @@ glif_psc::set_status( const DictionaryDatum& d ) S_ = stmp; } -void -nest::glif_psc::handle( DataLoggingRequest& e ) +inline size_t +glif_psc::handles_test_event( SpikeEvent&, size_t receptor_type ) +{ + if ( receptor_type <= 0 or receptor_type > P_.n_receptors_() ) + { + throw IncompatibleReceptorType( receptor_type, get_name(), "SpikeEvent" ); + } + + P_.has_connections_ = true; + return receptor_type; +} + +inline void +glif_psc::handle( DataLoggingRequest& e ) { B_.logger_.handle( e ); // the logger does this for us } +inline void +glif_psc::handle( SpikeEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + + B_.spikes_[ e.get_rport() - 1 ].add_value( + e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); +} + +inline void +glif_psc::handle( CurrentEvent& e ) +{ + assert( e.get_delay_steps() > 0 ); + + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); +} + } // namespace nest #endif diff --git a/models/music_cont_in_proxy.cpp b/models/music_cont_in_proxy.cpp index 41dbb9269a..81494075a6 100644 --- a/models/music_cont_in_proxy.cpp +++ b/models/music_cont_in_proxy.cpp @@ -29,7 +29,6 @@ #include "dict.h" #include "dictutils.h" #include "doubledatum.h" -#include "integerdatum.h" // Includes from libnestutil: #include "compose.hpp" @@ -37,7 +36,10 @@ #include "logging.h" // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "music_manager.h" +#include "nest.h" void nest::register_music_cont_in_proxy( const std::string& name ) diff --git a/models/music_cont_out_proxy.cpp b/models/music_cont_out_proxy.cpp index 1eef0f015c..9398587e3b 100644 --- a/models/music_cont_out_proxy.cpp +++ b/models/music_cont_out_proxy.cpp @@ -29,7 +29,10 @@ #include // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "music_manager.h" +#include "nest.h" #include "nest_datums.h" // Includes from libnestutil: @@ -39,8 +42,6 @@ // Includes from sli: #include "dict.h" #include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" void nest::register_music_cont_out_proxy( const std::string& name ) diff --git a/models/music_message_in_proxy.cpp b/models/music_message_in_proxy.cpp index fd58c5947b..ff3eb4f955 100644 --- a/models/music_message_in_proxy.cpp +++ b/models/music_message_in_proxy.cpp @@ -29,15 +29,16 @@ // Includes from sli: #include "arraydatum.h" -#include "doubledatum.h" -#include "integerdatum.h" // Includes from libnestutil: #include "compose.hpp" #include "logging.h" // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "music_manager.h" +#include "nest.h" void nest::register_music_message_in_proxy( const std::string& name ) diff --git a/models/music_rate_in_proxy.cpp b/models/music_rate_in_proxy.cpp index 70028f3f9f..0aee2c2f2b 100644 --- a/models/music_rate_in_proxy.cpp +++ b/models/music_rate_in_proxy.cpp @@ -29,14 +29,16 @@ #include "dict.h" #include "dictutils.h" #include "doubledatum.h" -#include "integerdatum.h" // Includes from libnestutil: #include "compose.hpp" #include "logging.h" // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "music_manager.h" +#include "nest.h" void nest::register_music_rate_in_proxy( const std::string& name ) diff --git a/models/music_rate_out_proxy.cpp b/models/music_rate_out_proxy.cpp index e12631a0ef..df5f79f344 100644 --- a/models/music_rate_out_proxy.cpp +++ b/models/music_rate_out_proxy.cpp @@ -24,22 +24,20 @@ #ifdef HAVE_MUSIC -// C++ includes: -#include - // Includes from sli: #include "arraydatum.h" #include "dict.h" #include "dictutils.h" -#include "doubledatum.h" -#include "integerdatum.h" // Includes from libnestutil: #include "compose.hpp" #include "logging.h" // Includes from nestkernel: +#include "event_delivery_manager.h" #include "kernel_manager.h" +#include "music_manager.h" +#include "nest.h" /* ---------------------------------------------------------------- * Default constructors defining default parameters and state diff --git a/nestkernel/connector_base_impl.h b/nestkernel/connector_base_impl.h deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nestkernel/model_manager_impl.h b/nestkernel/model_manager_impl.h deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nestkernel/nest_impl.h b/nestkernel/nest_impl.h index 45da1a8808..1bfefd321b 100644 --- a/nestkernel/nest_impl.h +++ b/nestkernel/nest_impl.h @@ -22,10 +22,7 @@ // Includes from nestkernel: -#include "connector_model_impl.h" -#include "genericmodel_impl.h" #include "kernel_manager.h" -#include "model_manager_impl.h" namespace nest { From a63d91cb5ce970350734791aa65704bfd29be47b Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Tue, 12 Aug 2025 18:32:30 +0200 Subject: [PATCH 07/23] Removed nest_impl.h --- nestkernel/nest_impl.h | 43 ------------------------------------------ 1 file changed, 43 deletions(-) delete mode 100644 nestkernel/nest_impl.h diff --git a/nestkernel/nest_impl.h b/nestkernel/nest_impl.h deleted file mode 100644 index 1bfefd321b..0000000000 --- a/nestkernel/nest_impl.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * nest_impl.h - * - * This file is part of NEST. - * - * Copyright (C) 2004 The NEST Initiative - * - * NEST is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * NEST is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with NEST. If not, see . - * - */ - - -// Includes from nestkernel: -#include "kernel_manager.h" - -namespace nest -{ - -template < template < typename > class ConnectorModelT > -void -register_connection_model( const std::string& name ) -{ - kernel().model_manager.register_connection_model< ConnectorModelT >( name ); -} - -template < typename NodeModelT > -void -register_node_model( const std::string& name, std::string deprecation_info ) -{ - kernel().model_manager.register_node_model< NodeModelT >( name, deprecation_info ); -} -} From 957b2f3687fd3963554dc6a219630bba6511c9a2 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Tue, 12 Aug 2025 22:57:47 +0200 Subject: [PATCH 08/23] Moved from a composite singleton to inline globals --- .../guidelines/coding_guidelines_cpp.rst | 28 +-- doc/htmldoc/installation/cmake_options.rst | 2 +- libnestutil/dict_util.h | 4 +- libnestutil/logging.h | 10 +- models/ac_generator.cpp | 4 +- models/aeif_cond_alpha.cpp | 9 +- models/aeif_cond_alpha_astro.cpp | 11 +- models/aeif_cond_alpha_multisynapse.cpp | 8 +- models/aeif_cond_beta_multisynapse.cpp | 8 +- models/aeif_cond_exp.cpp | 9 +- models/aeif_psc_alpha.cpp | 9 +- models/aeif_psc_delta.cpp | 9 +- models/aeif_psc_delta_clopath.cpp | 9 +- models/aeif_psc_exp.cpp | 9 +- models/amat2_psc_exp.cpp | 9 +- models/astrocyte_lr_1994.cpp | 9 +- models/binary_neuron.h | 12 +- models/cm_default.cpp | 8 +- models/cont_delay_synapse.h | 5 +- models/correlomatrix_detector.cpp | 10 +- models/correlomatrix_detector.h | 4 +- models/correlospinmatrix_detector.cpp | 2 +- models/dc_generator.cpp | 2 +- models/eprop_iaf.cpp | 10 +- models/eprop_iaf_adapt.cpp | 10 +- models/eprop_iaf_adapt_bsshslm_2020.cpp | 18 +- models/eprop_iaf_bsshslm_2020.cpp | 18 +- models/eprop_iaf_psc_delta.cpp | 10 +- models/eprop_iaf_psc_delta_adapt.cpp | 10 +- models/eprop_readout.cpp | 12 +- models/eprop_readout.h | 2 +- models/eprop_readout_bsshslm_2020.cpp | 22 +- models/eprop_readout_bsshslm_2020.h | 2 +- models/eprop_synapse.cpp | 2 +- models/eprop_synapse_bsshslm_2020.cpp | 2 +- models/eprop_synapse_bsshslm_2020.h | 4 +- models/gamma_sup_generator.cpp | 2 +- models/gif_cond_exp.cpp | 9 +- models/gif_cond_exp_multisynapse.cpp | 8 +- models/gif_pop_psc_exp.cpp | 9 +- models/gif_psc_exp.cpp | 9 +- models/gif_psc_exp_multisynapse.cpp | 8 +- models/glif_cond.cpp | 9 +- models/glif_psc.cpp | 2 +- models/glif_psc.h | 7 +- models/glif_psc_double_alpha.cpp | 9 +- models/hh_cond_beta_gap_traub.cpp | 31 +-- models/hh_cond_exp_traub.cpp | 9 +- models/hh_psc_alpha.cpp | 9 +- models/hh_psc_alpha_clopath.cpp | 9 +- models/hh_psc_alpha_gap.cpp | 31 +-- models/ht_neuron.cpp | 8 +- models/iaf_bw_2001.cpp | 4 +- models/iaf_bw_2001.h | 6 +- models/iaf_bw_2001_exact.cpp | 2 +- models/iaf_bw_2001_exact.h | 6 +- models/iaf_chs_2007.cpp | 4 +- models/iaf_chxk_2008.cpp | 10 +- models/iaf_cond_alpha.cpp | 10 +- models/iaf_cond_alpha_mc.cpp | 8 +- models/iaf_cond_beta.cpp | 10 +- models/iaf_cond_exp.cpp | 9 +- models/iaf_cond_exp_sfa_rr.cpp | 9 +- models/iaf_psc_alpha.cpp | 12 +- models/iaf_psc_alpha_multisynapse.cpp | 8 +- models/iaf_psc_alpha_ps.cpp | 9 +- models/iaf_psc_delta.cpp | 9 +- models/iaf_psc_delta_ps.cpp | 9 +- models/iaf_psc_exp.cpp | 12 +- models/iaf_psc_exp_htum.cpp | 9 +- models/iaf_psc_exp_multisynapse.cpp | 8 +- models/iaf_psc_exp_ps.cpp | 9 +- models/iaf_psc_exp_ps_lossless.cpp | 9 +- models/iaf_tum_2000.cpp | 12 +- models/ignore_and_fire.cpp | 10 +- models/inhomogeneous_poisson_generator.cpp | 4 +- models/izhikevich.cpp | 9 +- models/jonke_synapse.h | 2 +- models/mat2_psc_exp.cpp | 9 +- models/mip_generator.cpp | 2 +- models/multimeter.cpp | 2 +- models/multimeter.h | 2 +- models/music_cont_in_proxy.cpp | 2 +- models/music_cont_out_proxy.cpp | 12 +- models/music_event_in_proxy.cpp | 14 +- models/music_event_out_proxy.cpp | 2 +- models/music_message_in_proxy.cpp | 2 +- models/music_rate_in_proxy.cpp | 12 +- models/music_rate_out_proxy.cpp | 2 +- models/noise_generator.cpp | 4 +- models/noise_generator.h | 2 +- models/parrot_neuron.cpp | 4 +- models/parrot_neuron_ps.cpp | 4 +- models/poisson_generator.cpp | 2 +- models/poisson_generator_ps.cpp | 2 +- models/pp_cond_exp_mc_urbanczik.cpp | 8 +- models/pp_psc_delta.cpp | 9 +- models/ppd_sup_generator.cpp | 2 +- models/pulsepacket_generator.cpp | 4 +- models/rate_neuron_ipn.h | 16 +- models/rate_neuron_opn.h | 16 +- models/rate_transformer_node.h | 16 +- models/siegert_neuron.cpp | 12 +- models/sinusoidal_gamma_generator.cpp | 8 +- models/sinusoidal_poisson_generator.cpp | 6 +- models/spike_dilutor.cpp | 6 +- models/spike_generator.cpp | 2 +- models/spike_generator.h | 2 +- models/spike_recorder.cpp | 2 +- models/spike_train_injector.cpp | 4 +- models/spike_train_injector.h | 2 +- models/spin_detector.cpp | 2 +- models/stdp_dopamine_synapse.cpp | 4 +- models/stdp_dopamine_synapse.h | 10 +- models/stdp_facetshw_synapse_hom.h | 2 +- models/stdp_nn_pre_centered_synapse.h | 2 +- models/stdp_nn_restr_synapse.h | 2 +- models/stdp_nn_symm_synapse.h | 2 +- models/stdp_pl_synapse_hom.h | 2 +- models/stdp_synapse.h | 2 +- models/stdp_synapse_hom.h | 2 +- models/stdp_triplet_synapse.h | 2 +- models/step_current_generator.cpp | 2 +- models/step_rate_generator.cpp | 4 +- models/vogels_sprekeler_synapse.h | 2 +- models/volume_transmitter.cpp | 17 +- models/weight_recorder.cpp | 2 +- nest/neststartup.cpp | 7 +- nestkernel/archiving_node.cpp | 17 +- nestkernel/buffer_resize_log.cpp | 2 +- nestkernel/clopath_archiving_node.cpp | 4 +- nestkernel/common_synapse_properties.cpp | 4 +- nestkernel/conn_builder.cpp | 212 +++++++++--------- nestkernel/conn_builder.h | 2 +- nestkernel/conn_builder_conngen.cpp | 10 +- nestkernel/connection.h | 2 +- nestkernel/connection_creator.cpp | 14 +- nestkernel/connection_creator.h | 65 +++--- nestkernel/connection_manager.cpp | 210 ++++++++--------- nestkernel/connector_base.cpp | 2 +- nestkernel/connector_model.cpp | 2 +- nestkernel/connector_model_impl.h | 18 +- nestkernel/delay_checker.cpp | 14 +- nestkernel/eprop_archiving_node.h | 2 +- nestkernel/eprop_archiving_node_recurrent.h | 2 +- nestkernel/event.cpp | 2 +- nestkernel/event_delivery_manager.cpp | 200 +++++++++-------- nestkernel/event_delivery_manager.h | 30 +-- nestkernel/free_layer.h | 12 +- nestkernel/kernel_manager.cpp | 87 ++----- nestkernel/kernel_manager.h | 88 +++----- nestkernel/layer.cpp | 4 +- nestkernel/layer_impl.h | 4 +- nestkernel/model.cpp | 4 +- nestkernel/model_manager.cpp | 37 +-- nestkernel/model_manager.h | 10 +- nestkernel/modelrange_manager.cpp | 2 +- nestkernel/module_manager.cpp | 2 +- nestkernel/mpi_manager.cpp | 15 +- nestkernel/music_event_handler.cpp | 2 +- nestkernel/music_rate_in_handler.cpp | 4 +- nestkernel/nest.cpp | 78 +++---- nestkernel/nest.h | 4 +- nestkernel/nestmodule.cpp | 83 +++---- nestkernel/node.cpp | 6 +- nestkernel/node_collection.cpp | 135 +++++------ nestkernel/node_manager.cpp | 117 +++++----- nestkernel/parameter.cpp | 16 +- nestkernel/per_thread_bool_indicator.cpp | 18 +- nestkernel/proxynode.cpp | 18 +- nestkernel/random_manager.cpp | 14 +- nestkernel/recording_backend_ascii.cpp | 14 +- nestkernel/recording_backend_memory.cpp | 4 +- nestkernel/recording_backend_mpi.cpp | 14 +- nestkernel/recording_backend_screen.cpp | 2 +- nestkernel/recording_backend_sionlib.cpp | 31 +-- nestkernel/recording_device.cpp | 19 +- nestkernel/ring_buffer.cpp | 20 +- nestkernel/ring_buffer.h | 20 +- nestkernel/secondary_event.h | 4 +- nestkernel/send_buffer_position.cpp | 10 +- nestkernel/simulation_manager.cpp | 188 ++++++++-------- nestkernel/slice_ring_buffer.cpp | 11 +- nestkernel/slice_ring_buffer.h | 2 +- nestkernel/sonata_connector.cpp | 23 +- nestkernel/source_table.cpp | 84 +++---- nestkernel/sp_manager.cpp | 86 +++---- nestkernel/sparse_node_array.cpp | 3 +- nestkernel/spatial.cpp | 24 +- nestkernel/stimulation_backend_mpi.cpp | 26 ++- nestkernel/stimulation_device.cpp | 6 +- nestkernel/stopwatch.h | 28 +-- nestkernel/synaptic_element.cpp | 6 +- nestkernel/target_identifier.h | 4 +- nestkernel/target_table.cpp | 10 +- nestkernel/target_table_devices.cpp | 38 ++-- nestkernel/target_table_devices.h | 8 +- nestkernel/universal_data_logger.h | 34 +-- nestkernel/vp_manager.cpp | 20 +- nestkernel/vp_manager.h | 17 +- 200 files changed, 1663 insertions(+), 1617 deletions(-) diff --git a/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst b/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst index e906a612b7..2308936a61 100644 --- a/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst +++ b/doc/htmldoc/developer_space/guidelines/coding_guidelines_cpp.rst @@ -876,10 +876,10 @@ For example, the ``stopwatch.h`` file could look like: std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].start(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].start(); + walltime_timers_[ kernel::manager().get_thread_id() ].start(); + cputime_timers_[ kernel::manager().get_thread_id() ].start(); } template < StopwatchGranularity detailed_timer > @@ -889,10 +889,10 @@ For example, the ``stopwatch.h`` file could look like: std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); + walltime_timers_[ kernel::manager().get_thread_id() ].stop(); + cputime_timers_[ kernel::manager().get_thread_id() ].stop(); } template < StopwatchGranularity detailed_timer > @@ -902,9 +902,9 @@ For example, the ``stopwatch.h`` file could look like: std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager().assert_thread_parallel(); - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].is_running_(); + return walltime_timers_[ kernel::manager().get_thread_id() ].is_running_(); } template < StopwatchGranularity detailed_timer > @@ -915,9 +915,9 @@ For example, the ``stopwatch.h`` file could look like: and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t timeunit ) const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager().assert_thread_parallel(); - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].elapsed( timeunit ); + return walltime_timers_[ kernel::manager().get_thread_id() ].elapsed( timeunit ); } template < StopwatchGranularity detailed_timer > @@ -929,9 +929,9 @@ For example, the ``stopwatch.h`` file could look like: timers::timeunit_t timeunit, std::ostream& os ) const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].print( msg, timeunit, os ); + walltime_timers_[ kernel::manager().get_thread_id() ].print( msg, timeunit, os ); } template < StopwatchGranularity detailed_timer > @@ -941,9 +941,9 @@ For example, the ``stopwatch.h`` file could look like: std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() { - kernel().vp_manager.assert_single_threaded(); + kernel::manager().assert_single_threaded(); - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager().get_num_threads(); walltime_timers_.resize( num_threads ); cputime_timers_.resize( num_threads ); for ( size_t i = 0; i < num_threads; ++i ) diff --git a/doc/htmldoc/installation/cmake_options.rst b/doc/htmldoc/installation/cmake_options.rst index 323d3c6127..8f6191a4c4 100644 --- a/doc/htmldoc/installation/cmake_options.rst +++ b/doc/htmldoc/installation/cmake_options.rst @@ -213,7 +213,7 @@ NEST properties +-----------------------------------------------+----------------------------------------------------------------+ | ``-Dwith-full-logging=[OFF|ON]`` | Write debug output to file ``dump__.log`` | | | [default=OFF]. Developers should wrap debugging output in | -| | macro ``FULL_LOGGING_ONLY()`` and call kernel().write_dump()` | +| | macro ``FULL_LOGGING_ONLY()`` and call kernel::manager().write_dump()` | | | from inside it. The macro can contain almost any valid code. | +-----------------------------------------------+----------------------------------------------------------------+ diff --git a/libnestutil/dict_util.h b/libnestutil/dict_util.h index 93582046fe..398e99fe6b 100644 --- a/libnestutil/dict_util.h +++ b/libnestutil/dict_util.h @@ -53,8 +53,8 @@ updateValueParam( DictionaryDatum const& d, Name const n, VT& value, nest::Node* { throw BadParameter( "Cannot use Parameter with this model." ); } - auto vp = kernel().vp_manager.node_id_to_vp( node->get_node_id() ); - auto tid = kernel().vp_manager.vp_to_thread( vp ); + auto vp = kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ); + auto tid = kernel::manager< VPManager >().vp_to_thread( vp ); auto rng = get_vp_specific_rng( tid ); value = pd->get()->value( rng, node ); return true; diff --git a/libnestutil/logging.h b/libnestutil/logging.h index 61284b71cc..858723a24d 100644 --- a/libnestutil/logging.h +++ b/libnestutil/logging.h @@ -26,19 +26,21 @@ /** * */ -#define LOG( s, fctn, msg ) nest::kernel().logging_manager.publish_log( ( s ), ( fctn ), ( msg ), __FILE__, __LINE__ ) +#define LOG( s, fctn, msg ) \ + nest::kernel::manager< LoggingManager >().publish_log( ( s ), ( fctn ), ( msg ), __FILE__, __LINE__ ) /** * */ #define ALL_ENTRIES_ACCESSED( d, fctn, msg ) \ - nest::kernel().logging_manager.all_entries_accessed( ( d ), ( fctn ), ( msg ), __FILE__, __LINE__ ) + nest::kernel::manager< LoggingManager >().all_entries_accessed( ( d ), ( fctn ), ( msg ), __FILE__, __LINE__ ) /** * */ -#define ALL_ENTRIES_ACCESSED2( d, fctn, msg1, msg2 ) \ - nest::kernel().logging_manager.all_entries_accessed( ( d ), ( fctn ), ( msg1 ), ( msg2 ), __FILE__, __LINE__ ) +#define ALL_ENTRIES_ACCESSED2( d, fctn, msg1, msg2 ) \ + nest::kernel::manager< LoggingManager >().all_entries_accessed( \ + ( d ), ( fctn ), ( msg1 ), ( msg2 ), __FILE__, __LINE__ ) namespace nest { diff --git a/models/ac_generator.cpp b/models/ac_generator.cpp index fe9d069e01..4b4ec53cec 100644 --- a/models/ac_generator.cpp +++ b/models/ac_generator.cpp @@ -186,7 +186,7 @@ nest::ac_generator::pre_run_hook() StimulationDevice::pre_run_hook(); const double h = Time::get_resolution().get_ms(); - const double t = kernel().simulation_manager.get_time().get_ms(); + const double t = kernel::manager< SimulationManager >().get_time().get_ms(); // scale Hz to ms const double omega = 2.0 * numerics::pi * P_.freq_ / 1000.0; @@ -222,7 +222,7 @@ nest::ac_generator::update( Time const& origin, const long from, const long to ) { S_.I_ = S_.y_1_ + P_.offset_; ce.set_current( S_.I_ ); - kernel().event_delivery_manager.send( *this, ce, lag ); + kernel::manager< EventDeliveryManager >().send( *this, ce, lag ); } B_.logger_.record_data( origin.get_steps() + lag ); } diff --git a/models/aeif_cond_alpha.cpp b/models/aeif_cond_alpha.cpp index 94d8b19c9e..84f1e180e8 100644 --- a/models/aeif_cond_alpha.cpp +++ b/models/aeif_cond_alpha.cpp @@ -510,7 +510,7 @@ nest::aeif_cond_alpha::update( Time const& origin, const long from, const long t set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -539,12 +539,12 @@ nest::aeif_cond_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -557,7 +557,8 @@ nest::aeif_cond_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/aeif_cond_alpha_astro.cpp b/models/aeif_cond_alpha_astro.cpp index f6da7c1c6e..321a94db37 100644 --- a/models/aeif_cond_alpha_astro.cpp +++ b/models/aeif_cond_alpha_astro.cpp @@ -513,7 +513,7 @@ nest::aeif_cond_alpha_astro::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -543,12 +543,12 @@ nest::aeif_cond_alpha_astro::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -561,14 +561,15 @@ nest::aeif_cond_alpha_astro::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void nest::aeif_cond_alpha_astro::handle( SICEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/aeif_cond_alpha_multisynapse.cpp b/models/aeif_cond_alpha_multisynapse.cpp index 64c2707926..48f611904a 100644 --- a/models/aeif_cond_alpha_multisynapse.cpp +++ b/models/aeif_cond_alpha_multisynapse.cpp @@ -549,7 +549,7 @@ aeif_cond_alpha_multisynapse::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -596,7 +596,8 @@ aeif_cond_alpha_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -608,7 +609,8 @@ aeif_cond_alpha_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/aeif_cond_beta_multisynapse.cpp b/models/aeif_cond_beta_multisynapse.cpp index 20646dc575..1b4efdb6b5 100644 --- a/models/aeif_cond_beta_multisynapse.cpp +++ b/models/aeif_cond_beta_multisynapse.cpp @@ -558,7 +558,7 @@ aeif_cond_beta_multisynapse::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -605,7 +605,8 @@ aeif_cond_beta_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -617,7 +618,8 @@ aeif_cond_beta_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/aeif_cond_exp.cpp b/models/aeif_cond_exp.cpp index 5fdcc39845..3d925c078c 100644 --- a/models/aeif_cond_exp.cpp +++ b/models/aeif_cond_exp.cpp @@ -500,7 +500,7 @@ nest::aeif_cond_exp::update( const Time& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -529,12 +529,12 @@ nest::aeif_cond_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -547,7 +547,8 @@ nest::aeif_cond_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_alpha.cpp b/models/aeif_psc_alpha.cpp index 8a60a66f2d..bf0e1cf95c 100644 --- a/models/aeif_psc_alpha.cpp +++ b/models/aeif_psc_alpha.cpp @@ -500,7 +500,7 @@ nest::aeif_psc_alpha::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -529,12 +529,12 @@ nest::aeif_psc_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -547,7 +547,8 @@ nest::aeif_psc_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_delta.cpp b/models/aeif_psc_delta.cpp index c5762b8461..906fa72bca 100644 --- a/models/aeif_psc_delta.cpp +++ b/models/aeif_psc_delta.cpp @@ -500,7 +500,7 @@ nest::aeif_psc_delta::update( const Time& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -522,8 +522,8 @@ nest::aeif_psc_delta::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -535,7 +535,8 @@ nest::aeif_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_delta_clopath.cpp b/models/aeif_psc_delta_clopath.cpp index 3ef0c5747c..1989def896 100644 --- a/models/aeif_psc_delta_clopath.cpp +++ b/models/aeif_psc_delta_clopath.cpp @@ -548,7 +548,7 @@ nest::aeif_psc_delta_clopath::update( const Time& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } else if ( S_.clamp_r_ == 1 ) { @@ -601,8 +601,8 @@ nest::aeif_psc_delta_clopath::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -614,7 +614,8 @@ nest::aeif_psc_delta_clopath::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_exp.cpp b/models/aeif_psc_exp.cpp index 3ea33218ef..c158ef8b86 100644 --- a/models/aeif_psc_exp.cpp +++ b/models/aeif_psc_exp.cpp @@ -490,7 +490,7 @@ nest::aeif_psc_exp::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -518,12 +518,12 @@ nest::aeif_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -536,7 +536,8 @@ nest::aeif_psc_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/amat2_psc_exp.cpp b/models/amat2_psc_exp.cpp index cda1eab17c..5c724a8baa 100644 --- a/models/amat2_psc_exp.cpp +++ b/models/amat2_psc_exp.cpp @@ -406,7 +406,7 @@ nest::amat2_psc_exp::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else @@ -431,12 +431,12 @@ nest::amat2_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -450,7 +450,8 @@ nest::amat2_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/astrocyte_lr_1994.cpp b/models/astrocyte_lr_1994.cpp index d5fb3738e1..838b10a741 100644 --- a/models/astrocyte_lr_1994.cpp +++ b/models/astrocyte_lr_1994.cpp @@ -369,7 +369,7 @@ nest::astrocyte_lr_1994::init_buffers_() B_.spike_exc_.clear(); // includes resize B_.currents_.clear(); B_.sic_values.resize( - kernel().connection_manager.get_min_delay(), 0.0 ); // set size of SIC buffer according to min_delay + kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); // set size of SIC buffer according to min_delay B_.logger_.reset(); @@ -485,7 +485,7 @@ nest::astrocyte_lr_1994::update( Time const& origin, const long from, const long // send SIC event SICEvent sic; sic.set_coeffarray( B_.sic_values ); - kernel().event_delivery_manager.send_secondary( *this, sic ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, sic ); } void @@ -495,7 +495,7 @@ nest::astrocyte_lr_1994::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else @@ -512,7 +512,8 @@ nest::astrocyte_lr_1994::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/binary_neuron.h b/models/binary_neuron.h index d9afac52a7..b0eabd2db4 100644 --- a/models/binary_neuron.h +++ b/models/binary_neuron.h @@ -480,7 +480,7 @@ binary_neuron< TGainfunction >::update( Time const& origin, const long from, con // use multiplicity 2 to signal transition to 1 state // use multiplicity 1 to signal transition to 0 state se.set_multiplicity( new_y ? 2 : 1 ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); // As multiplicity is used only to signal internal information // to other binary neurons, we only set spiketime once, independent @@ -536,20 +536,21 @@ binary_neuron< TGainfunction >::handle( SpikeEvent& e ) // received twice the same node ID, so transition 0->1 // take double weight to compensate for subtracting first event B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), 2.0 * e.get_weight() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), 2.0 * e.get_weight() ); } else { // count this event negatively, assuming it comes as single event // transition 1->0 B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), -e.get_weight() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() ); } } else if ( m == 2 ) { // count this event positively, transition 0->1 - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() ); + B_.spikes_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() ); } S_.last_in_node_id_ = node_id; @@ -568,7 +569,8 @@ binary_neuron< TGainfunction >::handle( CurrentEvent& e ) // we use the spike buffer to receive the binary events // but also to handle the incoming current events added // both contributions are directly added to the variable h - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } diff --git a/models/cm_default.cpp b/models/cm_default.cpp index d85159a1d4..9ecd89c654 100644 --- a/models/cm_default.cpp +++ b/models/cm_default.cpp @@ -331,7 +331,7 @@ nest::cm_default::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } logger_.record_data( origin.get_steps() + lag ); @@ -350,7 +350,8 @@ nest::cm_default::handle( SpikeEvent& e ) assert( e.get_rport() < syn_buffers_.size() ); syn_buffers_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -362,7 +363,8 @@ nest::cm_default::handle( CurrentEvent& e ) const double w = e.get_weight(); Compartment* compartment = c_tree_.get_compartment_opt( e.get_rport() ); - compartment->currents.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + compartment->currents.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/cont_delay_synapse.h b/models/cont_delay_synapse.h index 057a69dcc8..3fa7086dd1 100644 --- a/models/cont_delay_synapse.h +++ b/models/cont_delay_synapse.h @@ -288,14 +288,15 @@ cont_delay_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, C if ( frac_delay == 0 ) { - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); set_delay_steps( Time::delay_ms_to_steps( delay ) ); delay_offset_ = 0.0; } else { const long lowerbound = static_cast< long >( int_delay ); - kernel().connection_manager.get_delay_checker().assert_two_valid_delays_steps( lowerbound, lowerbound + 1 ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_two_valid_delays_steps( + lowerbound, lowerbound + 1 ); set_delay_steps( lowerbound + 1 ); delay_offset_ = h * ( 1.0 - frac_delay ); } diff --git a/models/correlomatrix_detector.cpp b/models/correlomatrix_detector.cpp index 7fb08887c7..4ada5dd4a1 100644 --- a/models/correlomatrix_detector.cpp +++ b/models/correlomatrix_detector.cpp @@ -226,11 +226,11 @@ nest::correlomatrix_detector::State_::reset( const Parameters_& p ) count_covariance_.clear(); count_covariance_.resize( p.N_channels_ ); - for ( long i = 0; i < p.N_channels_; ++i ) + for ( size_t i = 0; i < p.N_channels_; ++i ) { covariance_[ i ].resize( p.N_channels_ ); count_covariance_[ i ].resize( p.N_channels_ ); - for ( long j = 0; j < p.N_channels_; ++j ) + for ( size_t j = 0; j < p.N_channels_; ++j ) { covariance_[ i ][ j ].resize( 1 + p.tau_max_.get_steps() / p.delta_tau_.get_steps(), 0 ); count_covariance_[ i ][ j ].resize( 1 + p.tau_max_.get_steps() / p.delta_tau_.get_steps(), 0 ); @@ -325,7 +325,7 @@ nest::correlomatrix_detector::handle( SpikeEvent& e ) // throw away all spikes which are too old to // enter the correlation window - const long min_delay = kernel().connection_manager.get_min_delay(); + const long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); while ( not otherSpikes.empty() and ( spike_i - otherSpikes.front().timestep_ ) >= tau_edge + min_delay ) { otherSpikes.pop_front(); @@ -347,8 +347,8 @@ nest::correlomatrix_detector::handle( SpikeEvent& e ) for ( SpikelistType::const_iterator spike_j = otherSpikes.begin(); spike_j != otherSpikes.end(); ++spike_j ) { size_t bin; - long other = spike_j->receptor_channel_; - long sender_ind, other_ind; + size_t other = spike_j->receptor_channel_; + size_t sender_ind, other_ind; if ( spike_i < spike_j->timestep_ ) { diff --git a/models/correlomatrix_detector.h b/models/correlomatrix_detector.h index ef3fbb0506..2a6a50f7cd 100644 --- a/models/correlomatrix_detector.h +++ b/models/correlomatrix_detector.h @@ -211,9 +211,9 @@ class correlomatrix_detector : public Node { long timestep_; double weight_; - long receptor_channel_; + size_t receptor_channel_; - Spike_( long timestep, double weight, long receptorchannel ) + Spike_( long timestep, double weight, size_t receptorchannel ) : timestep_( timestep ) , weight_( weight ) , receptor_channel_( receptorchannel ) diff --git a/models/correlospinmatrix_detector.cpp b/models/correlospinmatrix_detector.cpp index d49d82c0c3..761171fc8b 100644 --- a/models/correlospinmatrix_detector.cpp +++ b/models/correlospinmatrix_detector.cpp @@ -402,7 +402,7 @@ nest::correlospinmatrix_detector::handle( SpikeEvent& e ) } const double tau_edge = P_.tau_max_.get_steps() + P_.delta_tau_.get_steps(); - const long min_delay = kernel().connection_manager.get_min_delay(); + const long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); while ( not otherPulses.empty() and ( t_min_on - otherPulses.front().t_off_ ) >= tau_edge + min_delay ) { otherPulses.pop_front(); diff --git a/models/dc_generator.cpp b/models/dc_generator.cpp index 606a7e2355..e3bbec73da 100644 --- a/models/dc_generator.cpp +++ b/models/dc_generator.cpp @@ -175,7 +175,7 @@ nest::dc_generator::update( Time const& origin, const long from, const long to ) if ( StimulationDevice::is_active( Time::step( start + offs ) ) ) { S_.I_ = P_.amp_; - kernel().event_delivery_manager.send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); } B_.logger_.record_data( origin.get_steps() + offs ); } diff --git a/models/eprop_iaf.cpp b/models/eprop_iaf.cpp index adf74577af..fbac3c950a 100644 --- a/models/eprop_iaf.cpp +++ b/models/eprop_iaf.cpp @@ -306,7 +306,7 @@ eprop_iaf::update( Time const& origin, const long from, const long to ) if ( S_.v_m_ >= P_.V_th_ and S_.r_ == 0 ) { SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); S_.z_ = 1.0; S_.v_m_ -= P_.V_th_ * S_.z_; @@ -334,8 +334,8 @@ eprop_iaf::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -343,8 +343,8 @@ eprop_iaf::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/eprop_iaf_adapt.cpp b/models/eprop_iaf_adapt.cpp index 7f52ed0554..8399b981dd 100644 --- a/models/eprop_iaf_adapt.cpp +++ b/models/eprop_iaf_adapt.cpp @@ -345,7 +345,7 @@ eprop_iaf_adapt::update( Time const& origin, const long from, const long to ) if ( S_.v_m_ >= S_.v_th_adapt_ and S_.r_ == 0 ) { SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); S_.z_ = 1.0; S_.v_m_ -= P_.V_th_ * S_.z_; @@ -373,8 +373,8 @@ eprop_iaf_adapt::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -382,8 +382,8 @@ eprop_iaf_adapt::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/eprop_iaf_adapt_bsshslm_2020.cpp b/models/eprop_iaf_adapt_bsshslm_2020.cpp index b665212e0c..f100867005 100644 --- a/models/eprop_iaf_adapt_bsshslm_2020.cpp +++ b/models/eprop_iaf_adapt_bsshslm_2020.cpp @@ -299,8 +299,8 @@ eprop_iaf_adapt_bsshslm_2020::pre_run_hook() void eprop_iaf_adapt_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); const long shift = get_shift(); for ( long lag = from; lag < to; ++lag ) @@ -346,7 +346,7 @@ eprop_iaf_adapt_bsshslm_2020::update( Time const& origin, const long from, const count_spike(); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); S_.z_ = 1.0; S_.r_ = V_.RefractoryCounts_; @@ -378,8 +378,8 @@ eprop_iaf_adapt_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -387,8 +387,8 @@ eprop_iaf_adapt_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void @@ -454,8 +454,8 @@ eprop_iaf_adapt_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis } presyn_isis.clear(); - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval ); grad += firing_rate_reg * sum_e; diff --git a/models/eprop_iaf_bsshslm_2020.cpp b/models/eprop_iaf_bsshslm_2020.cpp index 2a6a4f9aac..3f6dc1d378 100644 --- a/models/eprop_iaf_bsshslm_2020.cpp +++ b/models/eprop_iaf_bsshslm_2020.cpp @@ -264,8 +264,8 @@ eprop_iaf_bsshslm_2020::pre_run_hook() void eprop_iaf_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); const long shift = get_shift(); for ( long lag = from; lag < to; ++lag ) @@ -306,7 +306,7 @@ eprop_iaf_bsshslm_2020::update( Time const& origin, const long from, const long count_spike(); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); S_.z_ = 1.0; S_.r_ = V_.RefractoryCounts_; @@ -338,8 +338,8 @@ eprop_iaf_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -347,8 +347,8 @@ eprop_iaf_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void @@ -412,8 +412,8 @@ eprop_iaf_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis, } presyn_isis.clear(); - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval ); grad += firing_rate_reg * sum_e; diff --git a/models/eprop_iaf_psc_delta.cpp b/models/eprop_iaf_psc_delta.cpp index bcc83788b5..e9a19b86ef 100644 --- a/models/eprop_iaf_psc_delta.cpp +++ b/models/eprop_iaf_psc_delta.cpp @@ -341,7 +341,7 @@ eprop_iaf_psc_delta::update( Time const& origin, const long from, const long to S_.v_m_ = P_.V_reset_; SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); z = 1.0; } @@ -367,8 +367,8 @@ eprop_iaf_psc_delta::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -376,8 +376,8 @@ eprop_iaf_psc_delta::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/eprop_iaf_psc_delta_adapt.cpp b/models/eprop_iaf_psc_delta_adapt.cpp index f720140773..13b43acbe4 100644 --- a/models/eprop_iaf_psc_delta_adapt.cpp +++ b/models/eprop_iaf_psc_delta_adapt.cpp @@ -380,7 +380,7 @@ eprop_iaf_psc_delta_adapt::update( Time const& origin, const long from, const lo S_.v_m_ = P_.V_reset_; SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); S_.z_ = 1.0; } @@ -406,8 +406,8 @@ eprop_iaf_psc_delta_adapt::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -415,8 +415,8 @@ eprop_iaf_psc_delta_adapt::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/eprop_readout.cpp b/models/eprop_readout.cpp index 66011a4d60..eb946125f6 100644 --- a/models/eprop_readout.cpp +++ b/models/eprop_readout.cpp @@ -215,7 +215,7 @@ eprop_readout::pre_run_hook() void eprop_readout::update( Time const& origin, const long from, const long to ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); std::vector< double > error_signal_buffer( buffer_size, 0.0 ); @@ -247,7 +247,7 @@ eprop_readout::update( Time const& origin, const long from, const long to ) LearningSignalConnectionEvent error_signal_event; error_signal_event.set_coeffarray( error_signal_buffer ); - kernel().event_delivery_manager.send_secondary( *this, error_signal_event ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, error_signal_event ); return; } @@ -283,8 +283,8 @@ eprop_readout::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -292,8 +292,8 @@ eprop_readout::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/eprop_readout.h b/models/eprop_readout.h index edaa2ae39b..dce5951585 100644 --- a/models/eprop_readout.h +++ b/models/eprop_readout.h @@ -517,7 +517,7 @@ eprop_readout::handles_test_event( CurrentEvent&, size_t receptor_type ) inline size_t eprop_readout::handles_test_event( DelayedRateConnectionEvent& e, size_t receptor_type ) { - size_t step_rate_model_id = kernel().model_manager.get_node_model_id( "step_rate_generator" ); + size_t step_rate_model_id = kernel::manager< ModelManager >().get_node_model_id( "step_rate_generator" ); size_t model_id = e.get_sender().get_model_id(); if ( step_rate_model_id == model_id and receptor_type != TARGET_SIG and receptor_type != LEARNING_WINDOW_SIG ) diff --git a/models/eprop_readout_bsshslm_2020.cpp b/models/eprop_readout_bsshslm_2020.cpp index f2e903aa2b..7aa5a5994b 100644 --- a/models/eprop_readout_bsshslm_2020.cpp +++ b/models/eprop_readout_bsshslm_2020.cpp @@ -231,12 +231,12 @@ eprop_readout_bsshslm_2020::pre_run_hook() void eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); - const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps(); - const bool with_reset = kernel().simulation_manager.get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); const long shift = get_shift(); - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); std::vector< double > error_signal_buffer( buffer_size, 0.0 ); std::vector< double > readout_signal_unnorm_buffer( buffer_size, 0.0 ); @@ -290,7 +290,7 @@ eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const l LearningSignalConnectionEvent error_signal_event; error_signal_event.set_coeffarray( error_signal_buffer ); - kernel().event_delivery_manager.send_secondary( *this, error_signal_event ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, error_signal_event ); if ( V_.signal_to_other_readouts_ ) { @@ -299,7 +299,7 @@ eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const l // in the next times step for computing the normalized readout signal DelayedRateConnectionEvent readout_signal_unnorm_event; readout_signal_unnorm_event.set_coeffarray( readout_signal_unnorm_buffer ); - kernel().event_delivery_manager.send_secondary( *this, readout_signal_unnorm_event ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, readout_signal_unnorm_event ); } return; } @@ -356,8 +356,8 @@ eprop_readout_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -365,8 +365,8 @@ eprop_readout_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void @@ -408,7 +408,7 @@ eprop_readout_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis, } presyn_isis.clear(); - const long learning_window = kernel().simulation_manager.get_eprop_learning_window().get_steps(); + const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); if ( average_gradient ) { grad /= learning_window; diff --git a/models/eprop_readout_bsshslm_2020.h b/models/eprop_readout_bsshslm_2020.h index 38e5ac95be..677ff85529 100644 --- a/models/eprop_readout_bsshslm_2020.h +++ b/models/eprop_readout_bsshslm_2020.h @@ -525,7 +525,7 @@ eprop_readout_bsshslm_2020::handles_test_event( CurrentEvent&, size_t receptor_t inline size_t eprop_readout_bsshslm_2020::handles_test_event( DelayedRateConnectionEvent& e, size_t receptor_type ) { - size_t step_rate_model_id = kernel().model_manager.get_node_model_id( "step_rate_generator" ); + size_t step_rate_model_id = kernel::manager< ModelManager >().get_node_model_id( "step_rate_generator" ); size_t model_id = e.get_sender().get_model_id(); if ( step_rate_model_id == model_id and receptor_type != TARGET_SIG ) diff --git a/models/eprop_synapse.cpp b/models/eprop_synapse.cpp index 59b8c40fba..8b5d87db4d 100644 --- a/models/eprop_synapse.cpp +++ b/models/eprop_synapse.cpp @@ -73,7 +73,7 @@ EpropSynapseCommonProperties::set_status( const DictionaryDatum& d, ConnectorMod const bool set_optimizer = updateValue< std::string >( optimizer_dict, names::type, new_optimizer ); if ( set_optimizer and new_optimizer != optimizer_cp_->get_name() ) { - if ( kernel().connection_manager.get_num_connections( cm.get_syn_id() ) > 0 ) + if ( kernel::manager< ConnectionManager >().get_num_connections( cm.get_syn_id() ) > 0 ) { throw BadParameter( "The optimizer cannot be changed because synapses have been created." ); } diff --git a/models/eprop_synapse_bsshslm_2020.cpp b/models/eprop_synapse_bsshslm_2020.cpp index de41bd993f..1f7e41a4dd 100644 --- a/models/eprop_synapse_bsshslm_2020.cpp +++ b/models/eprop_synapse_bsshslm_2020.cpp @@ -78,7 +78,7 @@ EpropSynapseBSSHSLM2020CommonProperties::set_status( const DictionaryDatum& d, C const bool set_optimizer = updateValue< std::string >( optimizer_dict, names::type, new_optimizer ); if ( set_optimizer and new_optimizer != optimizer_cp_->get_name() ) { - if ( kernel().connection_manager.get_num_connections( cm.get_syn_id() ) > 0 ) + if ( kernel::manager< ConnectionManager >().get_num_connections( cm.get_syn_id() ) > 0 ) { throw BadParameter( "The optimizer cannot be changed because synapses have been created." ); } diff --git a/models/eprop_synapse_bsshslm_2020.h b/models/eprop_synapse_bsshslm_2020.h index 3ac9144827..5d404cd0f6 100644 --- a/models/eprop_synapse_bsshslm_2020.h +++ b/models/eprop_synapse_bsshslm_2020.h @@ -408,7 +408,7 @@ eprop_synapse_bsshslm_2020< targetidentifierT >::eprop_synapse_bsshslm_2020( con , weight_( es.weight_ ) , t_spike_previous_( 0 ) , t_previous_update_( 0 ) - , t_next_update_( kernel().simulation_manager.get_eprop_update_interval().get_steps() ) + , t_next_update_( kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps() ) , t_previous_trigger_spike_( 0 ) , tau_m_readout_( es.tau_m_readout_ ) , kappa_( std::exp( -Time::get_resolution().get_ms() / tau_m_readout_ ) ) @@ -524,7 +524,7 @@ eprop_synapse_bsshslm_2020< targetidentifierT >::send( Event& e, assert( target ); const long t_spike = e.get_stamp().get_steps(); - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); const long shift = target->get_shift(); const long interval_step = ( t_spike - shift ) % update_interval; diff --git a/models/gamma_sup_generator.cpp b/models/gamma_sup_generator.cpp index 96c59098a4..fef9caf2bf 100644 --- a/models/gamma_sup_generator.cpp +++ b/models/gamma_sup_generator.cpp @@ -246,7 +246,7 @@ nest::gamma_sup_generator::update( Time const& T, const long from, const long to } DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } diff --git a/models/gif_cond_exp.cpp b/models/gif_cond_exp.cpp index 9bb5ef88c4..92a29fa9bd 100644 --- a/models/gif_cond_exp.cpp +++ b/models/gif_cond_exp.cpp @@ -556,7 +556,7 @@ nest::gif_cond_exp::update( Time const& origin, const long from, const long to ) // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } @@ -585,12 +585,12 @@ nest::gif_cond_exp::handle( SpikeEvent& e ) // is clumsy and should be improved. if ( e.get_weight() >= 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } // keep conductance positive } @@ -604,7 +604,8 @@ nest::gif_cond_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/gif_cond_exp_multisynapse.cpp b/models/gif_cond_exp_multisynapse.cpp index d3921d7484..14bb0fba8c 100644 --- a/models/gif_cond_exp_multisynapse.cpp +++ b/models/gif_cond_exp_multisynapse.cpp @@ -563,7 +563,7 @@ nest::gif_cond_exp_multisynapse::update( Time const& origin, const long from, co // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } @@ -595,7 +595,8 @@ nest::gif_cond_exp_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -607,7 +608,8 @@ nest::gif_cond_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/gif_pop_psc_exp.cpp b/models/gif_pop_psc_exp.cpp index 32e3ffabb0..25ef1ffa81 100644 --- a/models/gif_pop_psc_exp.cpp +++ b/models/gif_pop_psc_exp.cpp @@ -640,7 +640,7 @@ nest::gif_pop_psc_exp::update( Time const& origin, const long from, const long t SpikeEvent* se; se = new SpikeEvent; se->set_multiplicity( S_.n_spikes_ ); - kernel().event_delivery_manager.send( *this, *se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, *se, lag ); } } } @@ -654,11 +654,11 @@ gif_pop_psc_exp::handle( SpikeEvent& e ) if ( s > 0.0 ) { - B_.ex_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), s ); + B_.ex_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), s ); } else { - B_.in_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), s ); + B_.in_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), s ); } } @@ -671,7 +671,8 @@ nest::gif_pop_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/gif_psc_exp.cpp b/models/gif_psc_exp.cpp index fd6c5ca2eb..880d51733d 100644 --- a/models/gif_psc_exp.cpp +++ b/models/gif_psc_exp.cpp @@ -378,7 +378,7 @@ nest::gif_psc_exp::update( Time const& origin, const long from, const long to ) // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } @@ -408,12 +408,12 @@ nest::gif_psc_exp::handle( SpikeEvent& e ) // is clumsy and should be improved. if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -427,7 +427,8 @@ nest::gif_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/gif_psc_exp_multisynapse.cpp b/models/gif_psc_exp_multisynapse.cpp index 8502c0db22..0b9894ba13 100644 --- a/models/gif_psc_exp_multisynapse.cpp +++ b/models/gif_psc_exp_multisynapse.cpp @@ -405,7 +405,7 @@ nest::gif_psc_exp_multisynapse::update( Time const& origin, const long from, con // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } @@ -431,7 +431,8 @@ gif_psc_exp_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors_() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -443,7 +444,8 @@ nest::gif_psc_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/glif_cond.cpp b/models/glif_cond.cpp index c6ad3f0895..6532b611f6 100644 --- a/models/glif_cond.cpp +++ b/models/glif_cond.cpp @@ -737,7 +737,7 @@ nest::glif_cond::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else @@ -788,7 +788,8 @@ nest::glif_cond::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -796,8 +797,8 @@ nest::glif_cond::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } #endif // HAVE_GSL diff --git a/models/glif_psc.cpp b/models/glif_psc.cpp index 4eb7e52ddc..4595481a2a 100644 --- a/models/glif_psc.cpp +++ b/models/glif_psc.cpp @@ -561,7 +561,7 @@ glif_psc::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else diff --git a/models/glif_psc.h b/models/glif_psc.h index 1215624ac7..a5ede89930 100644 --- a/models/glif_psc.h +++ b/models/glif_psc.h @@ -485,7 +485,8 @@ glif_psc::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } inline void @@ -493,8 +494,8 @@ glif_psc::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } } // namespace nest diff --git a/models/glif_psc_double_alpha.cpp b/models/glif_psc_double_alpha.cpp index abf6ff3a6d..08114d2b59 100644 --- a/models/glif_psc_double_alpha.cpp +++ b/models/glif_psc_double_alpha.cpp @@ -626,7 +626,7 @@ nest::glif_psc_double_alpha::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else @@ -684,7 +684,8 @@ nest::glif_psc_double_alpha::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -692,6 +693,6 @@ nest::glif_psc_double_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } diff --git a/models/hh_cond_beta_gap_traub.cpp b/models/hh_cond_beta_gap_traub.cpp index 958b3396c7..85fc85adf4 100644 --- a/models/hh_cond_beta_gap_traub.cpp +++ b/models/hh_cond_beta_gap_traub.cpp @@ -104,7 +104,7 @@ hh_cond_beta_gap_traub_dynamics( double time, const double y[], double f[], void const double t = time / node.B_.step_; - switch ( kernel().simulation_manager.get_wfr_interpolation_order() ) + switch ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() ) { case 0: gap = -node.B_.sumj_g_ij_ * y[ S::V_M ] + node.B_.interpolation_coefficients[ node.B_.lag_ ]; @@ -342,7 +342,7 @@ nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub( const hh_cond_beta_gap_traub& n ) @@ -351,7 +351,7 @@ nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub( const hh_cond_beta_gap_tra , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } nest::hh_cond_beta_gap_traub::~hh_cond_beta_gap_traub() @@ -392,12 +392,12 @@ nest::hh_cond_beta_gap_traub::init_buffers_() // per min_delay step) // resize interpolation_coefficients depending on interpolation order - const size_t buffer_size = - kernel().connection_manager.get_min_delay() * ( kernel().simulation_manager.get_wfr_interpolation_order() + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() + * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ); B_.interpolation_coefficients.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( kernel().connection_manager.get_min_delay(), 0.0 ); + B_.last_y_values.resize( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); B_.sumj_g_ij_ = 0.0; @@ -475,13 +475,13 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t interpolation_order = kernel().simulation_manager.get_wfr_interpolation_order(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t interpolation_order = kernel::manager< SimulationManager >().get_wfr_interpolation_order(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store the new interpolation coefficients // to be sent by gap event - const size_t buffer_size = kernel().connection_manager.get_min_delay() * ( interpolation_order + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() * ( interpolation_order + 1 ); std::vector< double > new_coefficients( buffer_size, 0.0 ); // parameters needed for piecewise interpolation @@ -554,7 +554,7 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // log state data @@ -614,13 +614,13 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, new_coefficients[ temp * ( interpolation_order + 1 ) + 0 ] = S_.y_[ State_::V_M ]; } - std::vector< double >( kernel().connection_manager.get_min_delay(), 0.0 ).swap( B_.last_y_values ); + std::vector< double >( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ).swap( B_.last_y_values ); } // Send gap-event GapJunctionEvent ge; ge.set_coeffarray( new_coefficients ); - kernel().event_delivery_manager.send_secondary( *this, ge ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, ge ); // Reset variables B_.sumj_g_ij_ = 0.0; @@ -636,14 +636,14 @@ nest::hh_cond_beta_gap_traub::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { // add with negative weight, ie positive value, since we are changing a // conductance - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -657,7 +657,8 @@ nest::hh_cond_beta_gap_traub::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/hh_cond_exp_traub.cpp b/models/hh_cond_exp_traub.cpp index 86aae90aba..2b354b7e1b 100644 --- a/models/hh_cond_exp_traub.cpp +++ b/models/hh_cond_exp_traub.cpp @@ -428,7 +428,7 @@ nest::hh_cond_exp_traub::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -447,14 +447,14 @@ nest::hh_cond_exp_traub::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { // add with negative weight, ie positive value, since we are changing a // conductance - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -468,7 +468,8 @@ nest::hh_cond_exp_traub::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha.cpp b/models/hh_psc_alpha.cpp index d80b410956..21c1d1c9a1 100644 --- a/models/hh_psc_alpha.cpp +++ b/models/hh_psc_alpha.cpp @@ -437,7 +437,7 @@ nest::hh_psc_alpha::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // log state data @@ -455,12 +455,12 @@ nest::hh_psc_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -473,7 +473,8 @@ nest::hh_psc_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha_clopath.cpp b/models/hh_psc_alpha_clopath.cpp index 287f8ec5c6..b0cff794c6 100644 --- a/models/hh_psc_alpha_clopath.cpp +++ b/models/hh_psc_alpha_clopath.cpp @@ -474,7 +474,7 @@ nest::hh_psc_alpha_clopath::update( Time const& origin, const long from, const l set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // log state data @@ -492,12 +492,12 @@ nest::hh_psc_alpha_clopath::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } // current input, keep negative weight } @@ -511,7 +511,8 @@ nest::hh_psc_alpha_clopath::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha_gap.cpp b/models/hh_psc_alpha_gap.cpp index 10076c0507..be62091213 100644 --- a/models/hh_psc_alpha_gap.cpp +++ b/models/hh_psc_alpha_gap.cpp @@ -112,7 +112,7 @@ hh_psc_alpha_gap_dynamics( double time, const double y[], double f[], void* pnod const double t = time / node.B_.step_; - switch ( kernel().simulation_manager.get_wfr_interpolation_order() ) + switch ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() ) { case 0: gap = -node.B_.sumj_g_ij_ * V + node.B_.interpolation_coefficients[ node.B_.lag_ ]; @@ -333,7 +333,7 @@ nest::hh_psc_alpha_gap::hh_psc_alpha_gap() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } nest::hh_psc_alpha_gap::hh_psc_alpha_gap( const hh_psc_alpha_gap& n ) @@ -342,7 +342,7 @@ nest::hh_psc_alpha_gap::hh_psc_alpha_gap( const hh_psc_alpha_gap& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } nest::hh_psc_alpha_gap::~hh_psc_alpha_gap() @@ -383,12 +383,12 @@ nest::hh_psc_alpha_gap::init_buffers_() // per min_delay step) // resize interpolation_coefficients depending on interpolation order - const size_t buffer_size = - kernel().connection_manager.get_min_delay() * ( kernel().simulation_manager.get_wfr_interpolation_order() + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() + * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ); B_.interpolation_coefficients.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( kernel().connection_manager.get_min_delay(), 0.0 ); + B_.last_y_values.resize( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); B_.sumj_g_ij_ = 0.0; @@ -454,13 +454,13 @@ nest::hh_psc_alpha_gap::pre_run_hook() bool nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long to, const bool called_from_wfr_update ) { - const size_t interpolation_order = kernel().simulation_manager.get_wfr_interpolation_order(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t interpolation_order = kernel::manager< SimulationManager >().get_wfr_interpolation_order(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store the new interpolation coefficients // to be sent by gap event - const size_t buffer_size = kernel().connection_manager.get_min_delay() * ( interpolation_order + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() * ( interpolation_order + 1 ); std::vector< double > new_coefficients( buffer_size, 0.0 ); // parameters needed for piecewise interpolation @@ -535,7 +535,7 @@ nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // log state data @@ -595,13 +595,13 @@ nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long new_coefficients[ temp * ( interpolation_order + 1 ) + 0 ] = S_.y_[ State_::V_M ]; } - std::vector< double >( kernel().connection_manager.get_min_delay(), 0.0 ).swap( B_.last_y_values ); + std::vector< double >( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ).swap( B_.last_y_values ); } // Send gap-event GapJunctionEvent ge; ge.set_coeffarray( new_coefficients ); - kernel().event_delivery_manager.send_secondary( *this, ge ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, ge ); // Reset variables B_.sumj_g_ij_ = 0.0; @@ -617,12 +617,12 @@ nest::hh_psc_alpha_gap::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -635,7 +635,8 @@ nest::hh_psc_alpha_gap::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/ht_neuron.cpp b/models/ht_neuron.cpp index 48bf8575a6..2bc3461c6b 100644 --- a/models/ht_neuron.cpp +++ b/models/ht_neuron.cpp @@ -822,7 +822,7 @@ ht_neuron::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } @@ -855,7 +855,8 @@ nest::ht_neuron::handle( SpikeEvent& e ) assert( e.get_rport() < B_.spike_inputs_.size() ); B_.spike_inputs_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -867,7 +868,8 @@ nest::ht_neuron::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/iaf_bw_2001.cpp b/models/iaf_bw_2001.cpp index 9c57db6753..f1f97f45e8 100644 --- a/models/iaf_bw_2001.cpp +++ b/models/iaf_bw_2001.cpp @@ -407,7 +407,7 @@ nest::iaf_bw_2001::pre_run_hook() void nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) { - std::vector< double > s_vals( kernel().connection_manager.get_min_delay(), 0.0 ); + std::vector< double > s_vals( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); for ( long lag = from; lag < to; ++lag ) { double t = 0.0; @@ -473,7 +473,7 @@ nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) SpikeEvent se; se.set_offset( s_NMDA_delta ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current diff --git a/models/iaf_bw_2001.h b/models/iaf_bw_2001.h index 450b044c07..380ce95d05 100644 --- a/models/iaf_bw_2001.h +++ b/models/iaf_bw_2001.h @@ -535,7 +535,7 @@ iaf_bw_2001::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); + const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ); const auto rport = e.get_rport(); @@ -554,8 +554,8 @@ iaf_bw_2001::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } } // namespace diff --git a/models/iaf_bw_2001_exact.cpp b/models/iaf_bw_2001_exact.cpp index 64b67e96db..5759956951 100644 --- a/models/iaf_bw_2001_exact.cpp +++ b/models/iaf_bw_2001_exact.cpp @@ -504,7 +504,7 @@ nest::iaf_bw_2001_exact::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current diff --git a/models/iaf_bw_2001_exact.h b/models/iaf_bw_2001_exact.h index ff933e19a3..a149f0be7c 100644 --- a/models/iaf_bw_2001_exact.h +++ b/models/iaf_bw_2001_exact.h @@ -553,7 +553,7 @@ nest::iaf_bw_2001_exact::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); assert( e.get_rport() <= static_cast< int >( B_.spikes_.size() ) ); - const double steps = e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ); + const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ); const auto rport = e.get_rport(); if ( rport < NMDA ) @@ -584,8 +584,8 @@ nest::iaf_bw_2001_exact::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } } // namespace diff --git a/models/iaf_chs_2007.cpp b/models/iaf_chs_2007.cpp index e947b03bef..95cd2089af 100644 --- a/models/iaf_chs_2007.cpp +++ b/models/iaf_chs_2007.cpp @@ -251,7 +251,7 @@ nest::iaf_chs_2007::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // log state data @@ -266,7 +266,7 @@ nest::iaf_chs_2007::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } diff --git a/models/iaf_chxk_2008.cpp b/models/iaf_chxk_2008.cpp index fb847e3d8e..e671d7e8d2 100644 --- a/models/iaf_chxk_2008.cpp +++ b/models/iaf_chxk_2008.cpp @@ -424,7 +424,7 @@ nest::iaf_chxk_2008::update( Time const& origin, const long from, const long to SpikeEvent se; se.set_offset( dt ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // add incoming spikes @@ -446,12 +446,12 @@ nest::iaf_chxk_2008::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -461,8 +461,8 @@ nest::iaf_chxk_2008::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/iaf_cond_alpha.cpp b/models/iaf_cond_alpha.cpp index ff30457d89..5ed389f3a4 100644 --- a/models/iaf_cond_alpha.cpp +++ b/models/iaf_cond_alpha.cpp @@ -424,7 +424,7 @@ nest::iaf_cond_alpha::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // add incoming spikes @@ -446,12 +446,12 @@ nest::iaf_cond_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -461,8 +461,8 @@ nest::iaf_cond_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/iaf_cond_alpha_mc.cpp b/models/iaf_cond_alpha_mc.cpp index 711bf921f2..be8debf186 100644 --- a/models/iaf_cond_alpha_mc.cpp +++ b/models/iaf_cond_alpha_mc.cpp @@ -638,7 +638,7 @@ nest::iaf_cond_alpha_mc::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input currents @@ -659,7 +659,8 @@ nest::iaf_cond_alpha_mc::handle( SpikeEvent& e ) assert( e.get_rport() < 2 * NCOMP ); B_.spikes_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -671,7 +672,8 @@ nest::iaf_cond_alpha_mc::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/iaf_cond_beta.cpp b/models/iaf_cond_beta.cpp index 8a1a537b69..0e266b6e34 100644 --- a/models/iaf_cond_beta.cpp +++ b/models/iaf_cond_beta.cpp @@ -437,7 +437,7 @@ nest::iaf_cond_beta::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // add incoming spikes @@ -459,12 +459,12 @@ nest::iaf_cond_beta::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } // ensure conductance is positive } @@ -475,8 +475,8 @@ nest::iaf_cond_beta::handle( CurrentEvent& e ) assert( e.get_delay_steps() > 0 ); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/iaf_cond_exp.cpp b/models/iaf_cond_exp.cpp index 860853d149..5035cd23ab 100644 --- a/models/iaf_cond_exp.cpp +++ b/models/iaf_cond_exp.cpp @@ -401,7 +401,7 @@ nest::iaf_cond_exp::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -419,12 +419,12 @@ nest::iaf_cond_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -437,7 +437,8 @@ nest::iaf_cond_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/iaf_cond_exp_sfa_rr.cpp b/models/iaf_cond_exp_sfa_rr.cpp index 96c3d66f58..ad2f91f68a 100644 --- a/models/iaf_cond_exp_sfa_rr.cpp +++ b/models/iaf_cond_exp_sfa_rr.cpp @@ -440,7 +440,7 @@ nest::iaf_cond_exp_sfa_rr::update( Time const& origin, const long from, const lo S_.y_[ State_::G_RR ] += P_.q_rr; SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -458,12 +458,12 @@ nest::iaf_cond_exp_sfa_rr::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -476,7 +476,8 @@ nest::iaf_cond_exp_sfa_rr::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_alpha.cpp b/models/iaf_psc_alpha.cpp index 1ea6ce344e..6a5df69b85 100644 --- a/models/iaf_psc_alpha.cpp +++ b/models/iaf_psc_alpha.cpp @@ -325,7 +325,7 @@ iaf_psc_alpha::update( Time const& origin, const long from, const long to ) S_.dI_ex_ *= V_.P11_ex_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // Apply spikes delivered in this step; spikes arriving at T+1 have @@ -354,7 +354,7 @@ iaf_psc_alpha::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -373,8 +373,8 @@ iaf_psc_alpha::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); @@ -387,8 +387,8 @@ iaf_psc_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); const double I = e.get_current(); const double w = e.get_weight(); diff --git a/models/iaf_psc_alpha_multisynapse.cpp b/models/iaf_psc_alpha_multisynapse.cpp index 5a55757292..8d258ff3b8 100644 --- a/models/iaf_psc_alpha_multisynapse.cpp +++ b/models/iaf_psc_alpha_multisynapse.cpp @@ -369,7 +369,7 @@ iaf_psc_alpha_multisynapse::update( Time const& origin, const long from, const l set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -398,7 +398,8 @@ iaf_psc_alpha_multisynapse::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -410,7 +411,8 @@ iaf_psc_alpha_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/iaf_psc_alpha_ps.cpp b/models/iaf_psc_alpha_ps.cpp index cf040eb540..2a203ebd53 100644 --- a/models/iaf_psc_alpha_ps.cpp +++ b/models/iaf_psc_alpha_ps.cpp @@ -472,7 +472,7 @@ nest::iaf_psc_alpha_ps::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -487,7 +487,8 @@ nest::iaf_psc_alpha_ps::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void @@ -552,7 +553,7 @@ nest::iaf_psc_alpha_ps::emit_spike_( Time const& origin, const long lag, const d set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); return; } @@ -574,7 +575,7 @@ nest::iaf_psc_alpha_ps::emit_instant_spike_( Time const& origin, const long lag, set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); return; } diff --git a/models/iaf_psc_delta.cpp b/models/iaf_psc_delta.cpp index 4d405c18e2..2e2aa63ba3 100644 --- a/models/iaf_psc_delta.cpp +++ b/models/iaf_psc_delta.cpp @@ -318,7 +318,7 @@ nest::iaf_psc_delta::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -338,8 +338,8 @@ nest::iaf_psc_delta::handle( SpikeEvent& e ) // explicity, since it depends on delay and offset within // the update cycle. The way it is done here works, but // is clumsy and should be improved. - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -351,7 +351,8 @@ nest::iaf_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_delta_ps.cpp b/models/iaf_psc_delta_ps.cpp index 513715967c..f9de090d64 100644 --- a/models/iaf_psc_delta_ps.cpp +++ b/models/iaf_psc_delta_ps.cpp @@ -479,7 +479,7 @@ nest::iaf_psc_delta_ps::emit_spike_( Time const& origin, const long lag, const d set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } void @@ -499,7 +499,7 @@ nest::iaf_psc_delta_ps::emit_instant_spike_( Time const& origin, const long lag, set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } void @@ -512,7 +512,7 @@ iaf_psc_delta_ps::handle( SpikeEvent& e ) in the queue. The time is computed according to Time Memo, Rule 3. */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -527,7 +527,8 @@ iaf_psc_delta_ps::handle( CurrentEvent& e ) const double w = e.get_weight(); // add stepwise constant current; MH 2009-10-14 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } diff --git a/models/iaf_psc_exp.cpp b/models/iaf_psc_exp.cpp index e6b3a61e55..73b9c998fa 100644 --- a/models/iaf_psc_exp.cpp +++ b/models/iaf_psc_exp.cpp @@ -311,7 +311,7 @@ nest::iaf_psc_exp::update( const Time& origin, const long from, const long to ) S_.i_syn_ex_ += ( 1. - V_.P11ex_ ) * S_.i_1_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // the spikes arriving at T+1 have an immediate effect on the state of the @@ -332,7 +332,7 @@ nest::iaf_psc_exp::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -352,8 +352,8 @@ nest::iaf_psc_exp::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); @@ -369,8 +369,8 @@ nest::iaf_psc_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); if ( 0 == e.get_rport() ) { diff --git a/models/iaf_psc_exp_htum.cpp b/models/iaf_psc_exp_htum.cpp index 85d430b38f..106262c382 100644 --- a/models/iaf_psc_exp_htum.cpp +++ b/models/iaf_psc_exp_htum.cpp @@ -330,7 +330,7 @@ nest::iaf_psc_exp_htum::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else @@ -355,12 +355,12 @@ nest::iaf_psc_exp_htum::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -374,7 +374,8 @@ nest::iaf_psc_exp_htum::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_exp_multisynapse.cpp b/models/iaf_psc_exp_multisynapse.cpp index 3d39df7889..166c230b97 100644 --- a/models/iaf_psc_exp_multisynapse.cpp +++ b/models/iaf_psc_exp_multisynapse.cpp @@ -335,7 +335,7 @@ iaf_psc_exp_multisynapse::update( const Time& origin, const long from, const lon set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -364,7 +364,8 @@ iaf_psc_exp_multisynapse::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -376,7 +377,8 @@ iaf_psc_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * I ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); } void diff --git a/models/iaf_psc_exp_ps.cpp b/models/iaf_psc_exp_ps.cpp index 109861db27..b1e662f858 100644 --- a/models/iaf_psc_exp_ps.cpp +++ b/models/iaf_psc_exp_ps.cpp @@ -441,7 +441,7 @@ nest::iaf_psc_exp_ps::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -456,7 +456,8 @@ nest::iaf_psc_exp_ps::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void @@ -515,7 +516,7 @@ nest::iaf_psc_exp_ps::emit_spike_( const Time& origin, const long lag, const dou SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } void @@ -536,7 +537,7 @@ nest::iaf_psc_exp_ps::emit_instant_spike_( const Time& origin, const long lag, c SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } double diff --git a/models/iaf_psc_exp_ps_lossless.cpp b/models/iaf_psc_exp_ps_lossless.cpp index 9561747875..d928bea16b 100644 --- a/models/iaf_psc_exp_ps_lossless.cpp +++ b/models/iaf_psc_exp_ps_lossless.cpp @@ -485,7 +485,7 @@ nest::iaf_psc_exp_ps_lossless::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -500,7 +500,8 @@ nest::iaf_psc_exp_ps_lossless::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void @@ -558,7 +559,7 @@ nest::iaf_psc_exp_ps_lossless::emit_spike_( const Time& origin, const long lag, SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } void @@ -579,7 +580,7 @@ nest::iaf_psc_exp_ps_lossless::emit_instant_spike_( const Time& origin, const lo SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } double diff --git a/models/iaf_tum_2000.cpp b/models/iaf_tum_2000.cpp index b1510bf0fe..c9491a7b23 100644 --- a/models/iaf_tum_2000.cpp +++ b/models/iaf_tum_2000.cpp @@ -364,7 +364,7 @@ nest::iaf_tum_2000::update( const Time& origin, const long from, const long to ) S_.i_syn_ex_ += ( 1. - V_.P11ex_ ) * S_.i_1_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // the spikes arriving at T+1 have an immediate effect on the state of the @@ -427,7 +427,7 @@ nest::iaf_tum_2000::update( const Time& origin, const long from, const long to ) // send spike with datafield SpikeEvent se; se.set_offset( delta_y_tsp ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -447,8 +447,8 @@ nest::iaf_tum_2000::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); // Multiply with datafield from SpikeEvent to apply depression/facilitation computed by presynaptic neuron double s = e.get_weight() * e.get_multiplicity(); @@ -470,8 +470,8 @@ nest::iaf_tum_2000::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); if ( 0 == e.get_rport() ) { diff --git a/models/ignore_and_fire.cpp b/models/ignore_and_fire.cpp index 5e6368bebf..2ba28827d7 100644 --- a/models/ignore_and_fire.cpp +++ b/models/ignore_and_fire.cpp @@ -181,7 +181,7 @@ ignore_and_fire::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } else { @@ -198,8 +198,8 @@ ignore_and_fire::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); // separate buffer channels for excitatory and inhibitory inputs @@ -211,8 +211,8 @@ ignore_and_fire::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel().event_delivery_manager.get_modulo( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); const double I = e.get_current(); const double w = e.get_weight(); diff --git a/models/inhomogeneous_poisson_generator.cpp b/models/inhomogeneous_poisson_generator.cpp index cb5459ea3c..4f272d3e46 100644 --- a/models/inhomogeneous_poisson_generator.cpp +++ b/models/inhomogeneous_poisson_generator.cpp @@ -82,7 +82,7 @@ nest::inhomogeneous_poisson_generator::Parameters_::assert_valid_rate_time_and_i { Time t_rate; - if ( t <= kernel().simulation_manager.get_time().get_ms() ) + if ( t <= kernel::manager< SimulationManager >().get_time().get_ms() ) { throw BadProperty( "Time points must lie strictly in the future." ); } @@ -269,7 +269,7 @@ nest::inhomogeneous_poisson_generator::update( Time const& origin, const long fr if ( B_.rate_ > 0 and StimulationDevice::is_active( Time::step( curr_time ) ) ) { DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, offs ); + kernel::manager< EventDeliveryManager >().send( *this, se, offs ); } } } diff --git a/models/izhikevich.cpp b/models/izhikevich.cpp index 71558f17e3..9bebc04240 100644 --- a/models/izhikevich.cpp +++ b/models/izhikevich.cpp @@ -229,7 +229,7 @@ nest::izhikevich::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } // set new input current @@ -244,8 +244,8 @@ void nest::izhikevich::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -255,7 +255,8 @@ nest::izhikevich::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/jonke_synapse.h b/models/jonke_synapse.h index e8bb578566..3c5573bb55 100644 --- a/models/jonke_synapse.h +++ b/models/jonke_synapse.h @@ -329,7 +329,7 @@ jonke_synapse< targetidentifierT >::send( Event& e, size_t t, const JonkeCommonP ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/mat2_psc_exp.cpp b/models/mat2_psc_exp.cpp index 2c42c5a458..277d007d9f 100644 --- a/models/mat2_psc_exp.cpp +++ b/models/mat2_psc_exp.cpp @@ -339,7 +339,7 @@ nest::mat2_psc_exp::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } else @@ -364,12 +364,12 @@ nest::mat2_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -383,7 +383,8 @@ nest::mat2_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/mip_generator.cpp b/models/mip_generator.cpp index 03ce07134e..4fe8944df6 100644 --- a/models/mip_generator.cpp +++ b/models/mip_generator.cpp @@ -141,7 +141,7 @@ nest::mip_generator::update( Time const& T, const long from, const long to ) DSSpikeEvent se; se.set_multiplicity( n_parent_spikes ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } diff --git a/models/multimeter.cpp b/models/multimeter.cpp index 1318dad50a..1f4e01f111 100644 --- a/models/multimeter.cpp +++ b/models/multimeter.cpp @@ -206,7 +206,7 @@ multimeter::update( Time const& origin, const long from, const long ) // // Note that not all nodes receiving the request will necessarily answer. DataLoggingRequest req; - kernel().event_delivery_manager.send( *this, req ); + kernel::manager< EventDeliveryManager >().send( *this, req ); } void diff --git a/models/multimeter.h b/models/multimeter.h index 80a3a5991a..6fab67cd03 100644 --- a/models/multimeter.h +++ b/models/multimeter.h @@ -247,7 +247,7 @@ nest::multimeter::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel().node_manager.get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/music_cont_in_proxy.cpp b/models/music_cont_in_proxy.cpp index 81494075a6..91c9b4a8b2 100644 --- a/models/music_cont_in_proxy.cpp +++ b/models/music_cont_in_proxy.cpp @@ -134,7 +134,7 @@ nest::music_cont_in_proxy::pre_run_hook() // only publish the port once if ( not S_.published_ ) { - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_cont_out_proxy.cpp b/models/music_cont_out_proxy.cpp index 9398587e3b..ef2f42afc7 100644 --- a/models/music_cont_out_proxy.cpp +++ b/models/music_cont_out_proxy.cpp @@ -242,16 +242,16 @@ nest::music_cont_out_proxy::pre_run_hook() // only publish the output port once, if ( S_.published_ == false ) { - const size_t synmodel_id = kernel().model_manager.get_synapse_model_id( "static_synapse" ); + const size_t synmodel_id = kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ); std::vector< MUSIC::GlobalIndex > music_index_map; DictionaryDatum dummy_params = new Dictionary(); for ( size_t i = 0; i < P_.targets_->size(); ++i ) { const size_t tnode_id = ( *P_.targets_ )[ i ]; - if ( kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { - kernel().connection_manager.connect( get_node_id(), tnode_id, dummy_params, synmodel_id ); + kernel::manager< ConnectionManager >().connect( get_node_id(), tnode_id, dummy_params, synmodel_id ); for ( size_t j = 0; j < P_.record_from_.size(); ++j ) { @@ -260,7 +260,7 @@ nest::music_cont_out_proxy::pre_run_hook() } } - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); @@ -323,7 +323,7 @@ nest::music_cont_out_proxy::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel().node_manager.get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { @@ -359,7 +359,7 @@ nest::music_cont_out_proxy::update( Time const& origin, const long from, const l // // Note that not all nodes receiving the request will necessarily answer. DataLoggingRequest req; - kernel().event_delivery_manager.send( *this, req ); + kernel::manager< EventDeliveryManager >().send( *this, req ); } void diff --git a/models/music_event_in_proxy.cpp b/models/music_event_in_proxy.cpp index 7e182c0dcc..04977d053e 100644 --- a/models/music_event_in_proxy.cpp +++ b/models/music_event_in_proxy.cpp @@ -102,7 +102,7 @@ nest::music_event_in_proxy::music_event_in_proxy() , S_() { // Register port for the model so it is available as default - kernel().music_manager.register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); } nest::music_event_in_proxy::music_event_in_proxy( const music_event_in_proxy& n ) @@ -111,7 +111,7 @@ nest::music_event_in_proxy::music_event_in_proxy( const music_event_in_proxy& n , S_( n.S_ ) { // Register port for node instance because MusicManager manages ports via reference count - kernel().music_manager.register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); } @@ -130,7 +130,7 @@ nest::music_event_in_proxy::pre_run_hook() // register my port and my channel at the scheduler if ( not S_.registered_ ) { - kernel().music_manager.register_music_event_in_proxy( P_.port_name_, P_.channel_, this ); + kernel::manager< MUSICManager >().register_music_event_in_proxy( P_.port_name_, P_.channel_, this ); S_.registered_ = true; } } @@ -152,8 +152,8 @@ nest::music_event_in_proxy::set_status( const DictionaryDatum& d ) stmp.set( d, P_ ); // throws if BadProperty // if we get here, temporaries contain consistent set of properties - kernel().music_manager.unregister_music_in_port( P_.port_name_ ); - kernel().music_manager.register_music_in_port( ptmp.port_name_ ); + kernel::manager< MUSICManager >().unregister_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( ptmp.port_name_ ); P_ = ptmp; S_ = stmp; @@ -164,9 +164,9 @@ nest::music_event_in_proxy::handle( SpikeEvent& e ) { e.set_sender( *this ); - for ( size_t t = 0; t < kernel().vp_manager.get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) { - kernel().connection_manager.send_from_device( t, local_device_id_, e ); + kernel::manager< ConnectionManager >().send_from_device( t, local_device_id_, e ); } } diff --git a/models/music_event_out_proxy.cpp b/models/music_event_out_proxy.cpp index 34e6c5094c..3cb24ed72a 100644 --- a/models/music_event_out_proxy.cpp +++ b/models/music_event_out_proxy.cpp @@ -137,7 +137,7 @@ nest::music_event_out_proxy::pre_run_hook() // only publish the output port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_message_in_proxy.cpp b/models/music_message_in_proxy.cpp index ff3eb4f955..05ed6b3daf 100644 --- a/models/music_message_in_proxy.cpp +++ b/models/music_message_in_proxy.cpp @@ -131,7 +131,7 @@ nest::music_message_in_proxy::pre_run_hook() // only publish the port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_rate_in_proxy.cpp b/models/music_rate_in_proxy.cpp index 0aee2c2f2b..aa955c542b 100644 --- a/models/music_rate_in_proxy.cpp +++ b/models/music_rate_in_proxy.cpp @@ -108,7 +108,7 @@ nest::music_rate_in_proxy::music_rate_in_proxy() , S_() { // Register port for the model so it is available as default - kernel().music_manager.register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); } nest::music_rate_in_proxy::music_rate_in_proxy( const music_rate_in_proxy& n ) @@ -117,7 +117,7 @@ nest::music_rate_in_proxy::music_rate_in_proxy( const music_rate_in_proxy& n ) , S_( n.S_ ) { // Register port for node instance because MusicManager manages ports via reference count - kernel().music_manager.register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); } @@ -136,7 +136,7 @@ nest::music_rate_in_proxy::pre_run_hook() // only publish the port once if ( not S_.registered_ ) { - kernel().music_manager.register_music_rate_in_proxy( P_.port_name_, P_.channel_, this ); + kernel::manager< MUSICManager >().register_music_rate_in_proxy( P_.port_name_, P_.channel_, this ); S_.registered_ = true; } } @@ -160,8 +160,8 @@ nest::music_rate_in_proxy::set_status( const DictionaryDatum& d ) stmp.set( d, P_ ); // throws if BadProperty // if we get here, temporaries contain consistent set of properties - kernel().music_manager.unregister_music_in_port( P_.port_name_ ); - kernel().music_manager.register_music_in_port( ptmp.port_name_ ); + kernel::manager< MUSICManager >().unregister_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >().register_music_in_port( ptmp.port_name_ ); P_ = ptmp; S_ = stmp; } @@ -174,7 +174,7 @@ nest::music_rate_in_proxy::update( Time const&, const long, const long ) void nest::music_rate_in_proxy::handle( InstantaneousRateConnectionEvent& e ) { - kernel().event_delivery_manager.send_secondary( *this, e ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, e ); } diff --git a/models/music_rate_out_proxy.cpp b/models/music_rate_out_proxy.cpp index df5f79f344..116752330a 100644 --- a/models/music_rate_out_proxy.cpp +++ b/models/music_rate_out_proxy.cpp @@ -143,7 +143,7 @@ nest::music_rate_out_proxy::pre_run_hook() // only publish the output port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { diff --git a/models/noise_generator.cpp b/models/noise_generator.cpp index 6f9fa3795c..f6b9fdce29 100644 --- a/models/noise_generator.cpp +++ b/models/noise_generator.cpp @@ -241,7 +241,7 @@ nest::noise_generator::pre_run_hook() V_.dt_steps_ = P_.dt_.get_steps(); const double h = Time::get_resolution().get_ms(); - const double t = kernel().simulation_manager.get_time().get_ms(); + const double t = kernel::manager< SimulationManager >().get_time().get_ms(); // scale Hz to ms const double omega = 2.0 * numerics::pi * P_.freq_ / 1000.0; @@ -337,7 +337,7 @@ nest::noise_generator::update( Time const& origin, const long from, const long t B_.logger_.record_data( origin.get_steps() + offs ); DSCurrentEvent ce; - kernel().event_delivery_manager.send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); } } diff --git a/models/noise_generator.h b/models/noise_generator.h index e928af53ff..16aa3d1db8 100644 --- a/models/noise_generator.h +++ b/models/noise_generator.h @@ -298,7 +298,7 @@ class noise_generator : public StimulationDevice inline size_t noise_generator::handles_test_event( DataLoggingRequest& dlr, size_t receptor_type ) { - if ( kernel().vp_manager.get_num_threads() > 1 ) + if ( kernel::manager< VPManager >().get_num_threads() > 1 ) { throw KernelException( "Recording from a noise_generator is only possible in single-threaded mode." ); } diff --git a/models/parrot_neuron.cpp b/models/parrot_neuron.cpp index 961685dd72..0d06f4ba6e 100644 --- a/models/parrot_neuron.cpp +++ b/models/parrot_neuron.cpp @@ -58,7 +58,7 @@ parrot_neuron::update( Time const& origin, const long from, const long to ) // create a new SpikeEvent, set its multiplicity and send it SpikeEvent se; se.set_multiplicity( current_spikes_n ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); // set the spike times, respecting the multiplicity for ( unsigned long i = 0; i < current_spikes_n; i++ ) @@ -87,7 +87,7 @@ parrot_neuron::handle( SpikeEvent& e ) // Repeat only spikes incoming on port 0, port 1 will be ignored if ( 0 == e.get_rport() ) { - B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } } diff --git a/models/parrot_neuron_ps.cpp b/models/parrot_neuron_ps.cpp index 79eddaae4c..8a7cb8aec0 100644 --- a/models/parrot_neuron_ps.cpp +++ b/models/parrot_neuron_ps.cpp @@ -80,7 +80,7 @@ parrot_neuron_ps::update( Time const& origin, long const from, long const to ) SpikeEvent se; se.set_multiplicity( multiplicity ); se.set_offset( ev_offset ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); for ( unsigned long i = 0; i < multiplicity; ++i ) { @@ -117,7 +117,7 @@ parrot_neuron_ps::handle( SpikeEvent& e ) const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; // parrot ignores weight of incoming connection, store multiplicity - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel().simulation_manager.get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), Tdeliver, e.get_offset(), static_cast< double >( e.get_multiplicity() ) ); diff --git a/models/poisson_generator.cpp b/models/poisson_generator.cpp index e1de8bfe10..e6edc49814 100644 --- a/models/poisson_generator.cpp +++ b/models/poisson_generator.cpp @@ -137,7 +137,7 @@ nest::poisson_generator::update( Time const& T, const long from, const long to ) } DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } diff --git a/models/poisson_generator_ps.cpp b/models/poisson_generator_ps.cpp index 830d558c28..a0ea31ae37 100644 --- a/models/poisson_generator_ps.cpp +++ b/models/poisson_generator_ps.cpp @@ -204,7 +204,7 @@ nest::poisson_generator_ps::update( Time const& T, const long from, const long t // the event hook then sends out the real spikes with offgrid timing // We pretend to send at T+from DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, from ); + kernel::manager< EventDeliveryManager >().send( *this, se, from ); } } diff --git a/models/pp_cond_exp_mc_urbanczik.cpp b/models/pp_cond_exp_mc_urbanczik.cpp index fc6c477902..2d7195df54 100644 --- a/models/pp_cond_exp_mc_urbanczik.cpp +++ b/models/pp_cond_exp_mc_urbanczik.cpp @@ -662,7 +662,7 @@ nest::pp_cond_exp_mc_urbanczik::update( Time const& origin, const long from, con // And send the spike event SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); // Set spike time in order to make plasticity rules work for ( unsigned int i = 0; i < n_spikes; i++ ) @@ -699,7 +699,8 @@ nest::pp_cond_exp_mc_urbanczik::handle( SpikeEvent& e ) assert( e.get_rport() < 2 * NCOMP ); B_.spikes_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -711,7 +712,8 @@ nest::pp_cond_exp_mc_urbanczik::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_current() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_current() ); } void diff --git a/models/pp_psc_delta.cpp b/models/pp_psc_delta.cpp index 96161ecb40..3dc68dabc5 100644 --- a/models/pp_psc_delta.cpp +++ b/models/pp_psc_delta.cpp @@ -430,7 +430,7 @@ nest::pp_psc_delta::update( Time const& origin, const long from, const long to ) // And send the spike event SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); // set spike time for STDP to work, // see https://github.com/nest/nest-simulator/issues/77 @@ -469,8 +469,8 @@ nest::pp_psc_delta::handle( SpikeEvent& e ) // explicitly, since it depends on delay and offset within // the update cycle. The way it is done here works, but // is clumsy and should be improved. - B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_weight() * e.get_multiplicity() ); } void @@ -482,7 +482,8 @@ nest::pp_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), w * c ); + B_.currents_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); } void diff --git a/models/ppd_sup_generator.cpp b/models/ppd_sup_generator.cpp index 5dddb142fb..e74851b98e 100644 --- a/models/ppd_sup_generator.cpp +++ b/models/ppd_sup_generator.cpp @@ -262,7 +262,7 @@ nest::ppd_sup_generator::update( Time const& T, const long from, const long to ) } DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } diff --git a/models/pulsepacket_generator.cpp b/models/pulsepacket_generator.cpp index dfe2391dee..b3d55b9fd5 100644 --- a/models/pulsepacket_generator.cpp +++ b/models/pulsepacket_generator.cpp @@ -147,7 +147,7 @@ nest::pulsepacket_generator::pre_run_hook() V_.tolerance = 1.0; } - const double now = ( kernel().simulation_manager.get_time() ).get_ms(); + const double now = ( kernel::manager< SimulationManager >().get_time() ).get_ms(); V_.start_center_idx_ = 0; V_.stop_center_idx_ = 0; @@ -225,7 +225,7 @@ nest::pulsepacket_generator::update( Time const& T, const long, const long to ) { SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel().event_delivery_manager.send( *this, se, prev_spike - T.get_steps() ); + kernel::manager< EventDeliveryManager >().send( *this, se, prev_spike - T.get_steps() ); n_spikes = 0; } } diff --git a/models/rate_neuron_ipn.h b/models/rate_neuron_ipn.h index 5140deb72a..bf53e5dad9 100644 --- a/models/rate_neuron_ipn.h +++ b/models/rate_neuron_ipn.h @@ -537,7 +537,7 @@ rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } template < class TNonlinearities > @@ -548,7 +548,7 @@ rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn( const rate_neuron_ipn& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } /* ---------------------------------------------------------------- @@ -563,7 +563,7 @@ rate_neuron_ipn< TNonlinearities >::init_buffers_() B_.delayed_rates_in_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); B_.instant_rates_ex_.resize( buffer_size, 0.0 ); B_.instant_rates_in_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -614,8 +614,8 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -704,7 +704,7 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -726,7 +726,7 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); @@ -778,7 +778,7 @@ void rate_neuron_ipn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/rate_neuron_opn.h b/models/rate_neuron_opn.h index e08448ced6..44dc22538b 100644 --- a/models/rate_neuron_opn.h +++ b/models/rate_neuron_opn.h @@ -517,7 +517,7 @@ nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } template < class TNonlinearities > @@ -527,7 +527,7 @@ nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn( const rate_neuron_opn , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } /* ---------------------------------------------------------------- @@ -542,7 +542,7 @@ nest::rate_neuron_opn< TNonlinearities >::init_buffers_() B_.delayed_rates_in_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); B_.instant_rates_ex_.resize( buffer_size, 0.0 ); B_.instant_rates_in_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -585,8 +585,8 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -672,7 +672,7 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -694,7 +694,7 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); @@ -746,7 +746,7 @@ void nest::rate_neuron_opn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/rate_transformer_node.h b/models/rate_transformer_node.h index 7f3390c9a9..61a65fc32d 100644 --- a/models/rate_transformer_node.h +++ b/models/rate_transformer_node.h @@ -406,7 +406,7 @@ nest::rate_transformer_node< TNonlinearities >::rate_transformer_node() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } template < class TNonlinearities > @@ -416,7 +416,7 @@ nest::rate_transformer_node< TNonlinearities >::rate_transformer_node( const rat , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); } /* ---------------------------------------------------------------- @@ -430,7 +430,7 @@ nest::rate_transformer_node< TNonlinearities >::init_buffers_() B_.delayed_rates_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); B_.instant_rates_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -456,8 +456,8 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -511,7 +511,7 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -526,7 +526,7 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ ); @@ -563,7 +563,7 @@ void nest::rate_transformer_node< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel().connection_manager.get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/siegert_neuron.cpp b/models/siegert_neuron.cpp index 1dd35eb371..5a1f0c3095 100644 --- a/models/siegert_neuron.cpp +++ b/models/siegert_neuron.cpp @@ -187,7 +187,7 @@ nest::siegert_neuron::siegert_neuron() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); gsl_w_ = gsl_integration_workspace_alloc( 1000 ); } @@ -197,7 +197,7 @@ nest::siegert_neuron::siegert_neuron( const siegert_neuron& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel().simulation_manager.use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); gsl_w_ = gsl_integration_workspace_alloc( 1000 ); } @@ -280,7 +280,7 @@ void nest::siegert_neuron::init_buffers_() { // resize buffers - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); B_.drift_input_.resize( buffer_size, 0.0 ); B_.diffusion_input_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -308,8 +308,8 @@ nest::siegert_neuron::pre_run_hook() bool nest::siegert_neuron::update_( Time const& origin, const long from, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); - const double wfr_tol = kernel().simulation_manager.get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -353,7 +353,7 @@ nest::siegert_neuron::update_( Time const& origin, const long from, const long t // Send diffusion-event DiffusionConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.drift_input_ ); diff --git a/models/sinusoidal_gamma_generator.cpp b/models/sinusoidal_gamma_generator.cpp index 18c7321c32..db5625c85c 100644 --- a/models/sinusoidal_gamma_generator.cpp +++ b/models/sinusoidal_gamma_generator.cpp @@ -248,7 +248,7 @@ nest::sinusoidal_gamma_generator::init_buffers_() StimulationDevice::init_buffers(); B_.logger_.reset(); - std::vector< double >( P_.num_trains_, kernel().simulation_manager.get_time().get_ms() ).swap( B_.t0_ms_ ); + std::vector< double >( P_.num_trains_, kernel::manager< SimulationManager >().get_time().get_ms() ).swap( B_.t0_ms_ ); std::vector< double >( P_.num_trains_, 0.0 ).swap( B_.Lambda_t0_ ); B_.P_prev_ = P_; } @@ -284,7 +284,7 @@ nest::sinusoidal_gamma_generator::pre_run_hook() V_.h_ = Time::get_resolution().get_ms(); V_.rng_ = get_vp_specific_rng( get_thread() ); - const double t_ms = kernel().simulation_manager.get_time().get_ms(); + const double t_ms = kernel::manager< SimulationManager >().get_time().get_ms(); // if new connections were created during simulation break, resize accordingly // this is a no-op if no new connections were created @@ -328,14 +328,14 @@ nest::sinusoidal_gamma_generator::update( Time const& origin, const long from, c if ( P_.individual_spike_trains_ ) { DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } else { if ( V_.rng_->drand() < hazard_( 0 ) ) { SpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); B_.t0_ms_[ 0 ] = V_.t_ms_; B_.Lambda_t0_[ 0 ] = 0; } diff --git a/models/sinusoidal_poisson_generator.cpp b/models/sinusoidal_poisson_generator.cpp index 85248c4cb4..7ee08d3785 100644 --- a/models/sinusoidal_poisson_generator.cpp +++ b/models/sinusoidal_poisson_generator.cpp @@ -221,7 +221,7 @@ nest::sinusoidal_poisson_generator::pre_run_hook() // time resolution V_.h_ = Time::get_resolution().get_ms(); - const double t = kernel().simulation_manager.get_time().get_ms(); + const double t = kernel::manager< SimulationManager >().get_time().get_ms(); // initial state S_.y_0_ = P_.amplitude_ * std::cos( P_.om_ * t + P_.phi_ ); @@ -268,7 +268,7 @@ nest::sinusoidal_poisson_generator::update( Time const& origin, const long from, if ( P_.individual_spike_trains_ ) { DSSpikeEvent se; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } else { @@ -276,7 +276,7 @@ nest::sinusoidal_poisson_generator::update( Time const& origin, const long from, long n_spikes = V_.poisson_dist_( rng, param ); SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } // store rate in spks/s diff --git a/models/spike_dilutor.cpp b/models/spike_dilutor.cpp index 906aeba557..f37264a6b8 100644 --- a/models/spike_dilutor.cpp +++ b/models/spike_dilutor.cpp @@ -97,7 +97,7 @@ nest::spike_dilutor::init_state_() // This check cannot be done in the copy constructor because that is also used to // create model prototypes. Since spike_dilutor is deprecated anyways, we put this // brute-force solution here. - if ( kernel().vp_manager.get_num_threads() > 1 ) + if ( kernel::manager< VPManager >().get_num_threads() > 1 ) { throw KernelException( "The network contains a spike_dilutor which cannot be used with multiple threads." ); } @@ -140,7 +140,7 @@ nest::spike_dilutor::update( Time const& T, const long from, const long to ) DSSpikeEvent se; se.set_multiplicity( n_mother_spikes ); - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } } } @@ -181,6 +181,6 @@ nest::spike_dilutor::event_hook( DSSpikeEvent& e ) void nest::spike_dilutor::handle( SpikeEvent& e ) { - B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } diff --git a/models/spike_generator.cpp b/models/spike_generator.cpp index 40032bded7..182d55ff71 100644 --- a/models/spike_generator.cpp +++ b/models/spike_generator.cpp @@ -387,7 +387,7 @@ nest::spike_generator::update( Time const& sliceT0, const long from, const long long lag = Time( tnext_stamp - sliceT0 ).get_steps() - 1; // all spikes are sent locally, so offset information is always preserved - kernel().event_delivery_manager.send( *this, *se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, *se, lag ); delete se; } diff --git a/models/spike_generator.h b/models/spike_generator.h index a7f5ce41c8..38316a503d 100644 --- a/models/spike_generator.h +++ b/models/spike_generator.h @@ -379,7 +379,7 @@ nest::spike_generator::set_status( const DictionaryDatum& d ) } // throws if BadProperty - ptmp.set( d, S_, origin, kernel().simulation_manager.get_time(), this ); + ptmp.set( d, S_, origin, kernel::manager< SimulationManager >().get_time(), this ); // We now know that ptmp is consistent. We do not write it back // to P_ before we are also sure that the properties to be set diff --git a/models/spike_recorder.cpp b/models/spike_recorder.cpp index 760002abb7..042dd99e7c 100644 --- a/models/spike_recorder.cpp +++ b/models/spike_recorder.cpp @@ -83,7 +83,7 @@ nest::spike_recorder::get_status( DictionaryDatum& d ) const // if we are the device on thread 0, also get the data from the siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel().node_manager.get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/spike_train_injector.cpp b/models/spike_train_injector.cpp index fc9b320d6e..ac5bec800c 100644 --- a/models/spike_train_injector.cpp +++ b/models/spike_train_injector.cpp @@ -303,7 +303,7 @@ spike_train_injector::pre_run_hook() // is not an exclusive precise spiking model if ( is_off_grid() ) { - kernel().event_delivery_manager.set_off_grid_communication( true ); + kernel::manager< EventDeliveryManager >().set_off_grid_communication( true ); LOG( M_INFO, "spike_train_injector::pre_run_hook", "Spike train injector has been configured to emit precisely timed " @@ -377,7 +377,7 @@ spike_train_injector::update( Time const& sliceT0, const long from, const long t // we need to subtract one from stamp which is added again in send() long lag = Time( tnext_stamp - sliceT0 ).get_steps() - 1; - kernel().event_delivery_manager.send( *this, se, lag ); + kernel::manager< EventDeliveryManager >().send( *this, se, lag ); } ++S_.position_; diff --git a/models/spike_train_injector.h b/models/spike_train_injector.h index 8d82529c1a..43aec86a9a 100644 --- a/models/spike_train_injector.h +++ b/models/spike_train_injector.h @@ -366,7 +366,7 @@ spike_train_injector::set_status( const DictionaryDatum& d ) } // throws if BadProperty - ptmp.set( d, S_, origin, kernel().simulation_manager.get_time(), this ); + ptmp.set( d, S_, origin, kernel::manager< SimulationManager >().get_time(), this ); // We now know that ptmp is consistent. We do not write it back // to P_ before we are also sure that the properties to be set diff --git a/models/spin_detector.cpp b/models/spin_detector.cpp index d080eed688..bc24f896c5 100644 --- a/models/spin_detector.cpp +++ b/models/spin_detector.cpp @@ -89,7 +89,7 @@ nest::spin_detector::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel().node_manager.get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/stdp_dopamine_synapse.cpp b/models/stdp_dopamine_synapse.cpp index 29cf8171f9..8768643573 100644 --- a/models/stdp_dopamine_synapse.cpp +++ b/models/stdp_dopamine_synapse.cpp @@ -86,8 +86,8 @@ STDPDopaCommonProperties::set_status( const DictionaryDatum& d, ConnectorModel& throw BadProperty( "Property volume_transmitter must be a single element NodeCollection" ); } - const size_t tid = kernel().vp_manager.get_thread_id(); - Node* vt_node = kernel().node_manager.get_node_or_proxy( ( *vt_datum )[ 0 ], tid ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + Node* vt_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *vt_datum )[ 0 ], tid ); volume_transmitter* vt = dynamic_cast< volume_transmitter* >( vt_node ); if ( not vt ) { diff --git a/models/stdp_dopamine_synapse.h b/models/stdp_dopamine_synapse.h index 118c0054ff..2cc220888c 100644 --- a/models/stdp_dopamine_synapse.h +++ b/models/stdp_dopamine_synapse.h @@ -394,7 +394,7 @@ void stdp_dopamine_synapse< targetidentifierT >::check_synapse_params( const DictionaryDatum& syn_spec ) const { // Setting of parameter c and n not thread safe. - if ( kernel().vp_manager.get_num_threads() > 1 ) + if ( kernel::manager< VPManager >().get_num_threads() > 1 ) { if ( syn_spec->known( names::c ) ) { @@ -456,7 +456,8 @@ stdp_dopamine_synapse< targetidentifierT >::process_dopa_spikes_( const std::vec // process dopa spikes in (t0, t1] // propagate weight from t0 to t1 if ( ( dopa_spikes.size() > dopa_spikes_idx_ + 1 ) - and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ > -1.0 * kernel().connection_manager.get_stdp_eps() ) ) + and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ + > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ) ) { // there is at least 1 dopa spike in (t0, t1] // propagate weight up to first dopa spike and update dopamine trace @@ -470,7 +471,8 @@ stdp_dopamine_synapse< targetidentifierT >::process_dopa_spikes_( const std::vec // process remaining dopa spikes in (t0, t1] double cd; while ( ( dopa_spikes.size() > dopa_spikes_idx_ + 1 ) - and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ > -1.0 * kernel().connection_manager.get_stdp_eps() ) ) + and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ + > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ) ) { // propagate weight up to next dopa spike and update dopamine trace // weight and dopamine trace n are at time of last dopa spike td but @@ -552,7 +554,7 @@ stdp_dopamine_synapse< targetidentifierT >::send( Event& e, size_t t, const STDP minus_dt = t_last_update_ - t0; // facilitate only in case of post- after presyn. spike // skip facilitation if pre- and postsyn. spike occur at the same time - if ( t_spike - start->t_ > kernel().connection_manager.get_stdp_eps() ) + if ( t_spike - start->t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) { facilitate_( Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/stdp_facetshw_synapse_hom.h b/models/stdp_facetshw_synapse_hom.h index 10287f5188..5916a51407 100644 --- a/models/stdp_facetshw_synapse_hom.h +++ b/models/stdp_facetshw_synapse_hom.h @@ -513,7 +513,7 @@ stdp_facetshw_synapse_hom< targetidentifierT >::send( Event& e, // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt_causal < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt_causal < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); a_causal_ += std::exp( minus_dt_causal / cp.tau_plus_ ); diff --git a/models/stdp_nn_pre_centered_synapse.h b/models/stdp_nn_pre_centered_synapse.h index ec2791efbf..48478112cd 100644 --- a/models/stdp_nn_pre_centered_synapse.h +++ b/models/stdp_nn_pre_centered_synapse.h @@ -285,7 +285,7 @@ stdp_nn_pre_centered_synapse< targetidentifierT >::send( Event& e, size_t t, con // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ) ); diff --git a/models/stdp_nn_restr_synapse.h b/models/stdp_nn_restr_synapse.h index 7562266dc3..2760da388f 100644 --- a/models/stdp_nn_restr_synapse.h +++ b/models/stdp_nn_restr_synapse.h @@ -280,7 +280,7 @@ stdp_nn_restr_synapse< targetidentifierT >::send( Event& e, size_t t, const Comm // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_nn_symm_synapse.h b/models/stdp_nn_symm_synapse.h index 92970ec8ec..065567568d 100644 --- a/models/stdp_nn_symm_synapse.h +++ b/models/stdp_nn_symm_synapse.h @@ -278,7 +278,7 @@ stdp_nn_symm_synapse< targetidentifierT >::send( Event& e, size_t t, const Commo // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_pl_synapse_hom.h b/models/stdp_pl_synapse_hom.h index b01a79c79d..d791d719b2 100644 --- a/models/stdp_pl_synapse_hom.h +++ b/models/stdp_pl_synapse_hom.h @@ -281,7 +281,7 @@ stdp_pl_synapse_hom< targetidentifierT >::send( Event& e, size_t t, const STDPPL start++; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt * cp.tau_plus_inv_ ), cp ); } diff --git a/models/stdp_synapse.h b/models/stdp_synapse.h index 61b3cbcafa..5e63a1a76d 100644 --- a/models/stdp_synapse.h +++ b/models/stdp_synapse.h @@ -268,7 +268,7 @@ stdp_synapse< targetidentifierT >::send( Event& e, size_t t, const CommonSynapse ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_synapse_hom.h b/models/stdp_synapse_hom.h index 803f513585..f37486b4d1 100644 --- a/models/stdp_synapse_hom.h +++ b/models/stdp_synapse_hom.h @@ -316,7 +316,7 @@ stdp_synapse_hom< targetidentifierT >::send( Event& e, size_t t, const STDPHomCo ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/stdp_triplet_synapse.h b/models/stdp_triplet_synapse.h index f2a8f8ced3..6db113e7a5 100644 --- a/models/stdp_triplet_synapse.h +++ b/models/stdp_triplet_synapse.h @@ -286,7 +286,7 @@ stdp_triplet_synapse< targetidentifierT >::send( Event& e, size_t t, const Commo ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ), ky ); } diff --git a/models/step_current_generator.cpp b/models/step_current_generator.cpp index ee7c852b4e..66d82cb2cb 100644 --- a/models/step_current_generator.cpp +++ b/models/step_current_generator.cpp @@ -309,7 +309,7 @@ nest::step_current_generator::update( Time const& origin, const long from, const CurrentEvent ce; ce.set_current( B_.amp_ ); S_.I_ = B_.amp_; - kernel().event_delivery_manager.send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); } B_.logger_.record_data( origin.get_steps() + offs ); } diff --git a/models/step_rate_generator.cpp b/models/step_rate_generator.cpp index f6ba8c432e..7d6c13360c 100644 --- a/models/step_rate_generator.cpp +++ b/models/step_rate_generator.cpp @@ -282,7 +282,7 @@ nest::step_rate_generator::update( Time const& origin, const long from, const lo const long t0 = origin.get_steps(); // allocate memory to store rates to be sent by rate events - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); std::vector< double > new_rates( buffer_size, 0.0 ); // Skip any times in the past. Since we must send events proactively, @@ -324,7 +324,7 @@ nest::step_rate_generator::update( Time const& origin, const long from, const lo { DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel().event_delivery_manager.send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); } } diff --git a/models/vogels_sprekeler_synapse.h b/models/vogels_sprekeler_synapse.h index cf30380486..c12fe6cbdd 100644 --- a/models/vogels_sprekeler_synapse.h +++ b/models/vogels_sprekeler_synapse.h @@ -240,7 +240,7 @@ vogels_sprekeler_synapse< targetidentifierT >::send( Event& e, size_t t, const C ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel().connection_manager.get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_ ) ); } diff --git a/models/volume_transmitter.cpp b/models/volume_transmitter.cpp index 69f048a03e..2762291530 100644 --- a/models/volume_transmitter.cpp +++ b/models/volume_transmitter.cpp @@ -94,7 +94,7 @@ void nest::volume_transmitter::pre_run_hook() { // +1 as pseudo dopa spike at t_trig is inserted after trigger_update_weight - B_.spikecounter_.reserve( kernel().connection_manager.get_min_delay() * P_.deliver_interval_ + 1 ); + B_.spikecounter_.reserve( kernel::manager< ConnectionManager >().get_min_delay() * P_.deliver_interval_ + 1 ); } void @@ -108,21 +108,23 @@ nest::volume_transmitter::update( const Time&, const long from, const long to ) multiplicity = B_.neuromodulatory_spikes_.get_value( lag ); if ( multiplicity > 0 ) { - t_spike = Time( Time::step( kernel().simulation_manager.get_slice_origin().get_steps() + lag + 1 ) ).get_ms(); + t_spike = + Time( Time::step( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + lag + 1 ) ).get_ms(); B_.spikecounter_.push_back( spikecounter( t_spike, multiplicity ) ); } } // all spikes stored in spikecounter_ are delivered to the target synapses - if ( ( kernel().simulation_manager.get_slice_origin().get_steps() + to ) - % ( P_.deliver_interval_ * kernel().connection_manager.get_min_delay() ) + if ( ( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + to ) + % ( P_.deliver_interval_ * kernel::manager< ConnectionManager >().get_min_delay() ) == 0 ) { - double t_trig = Time( Time::step( kernel().simulation_manager.get_slice_origin().get_steps() + to ) ).get_ms(); + double t_trig = + Time( Time::step( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + to ) ).get_ms(); if ( not B_.spikecounter_.empty() ) { - kernel().connection_manager.trigger_update_weight( get_node_id(), B_.spikecounter_, t_trig ); + kernel::manager< ConnectionManager >().trigger_update_weight( get_node_id(), B_.spikecounter_, t_trig ); } // clear spikecounter @@ -137,6 +139,7 @@ nest::volume_transmitter::update( const Time&, const long from, const long to ) void nest::volume_transmitter::handle( SpikeEvent& e ) { - B_.neuromodulatory_spikes_.add_value( e.get_rel_delivery_steps( kernel().simulation_manager.get_slice_origin() ), + B_.neuromodulatory_spikes_.add_value( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } diff --git a/models/weight_recorder.cpp b/models/weight_recorder.cpp index cd1f9f434d..82fe6edfca 100644 --- a/models/weight_recorder.cpp +++ b/models/weight_recorder.cpp @@ -168,7 +168,7 @@ nest::weight_recorder::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel().node_manager.get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/nest/neststartup.cpp b/nest/neststartup.cpp index b27ba0abc5..d6aaee614d 100644 --- a/nest/neststartup.cpp +++ b/nest/neststartup.cpp @@ -123,9 +123,8 @@ neststartup( int* argc, char*** argv, SLIInterpreter& engine, std::string module void nestshutdown( int exitcode ) { - nest::kernel().finalize(); - nest::kernel().mpi_manager.mpi_finalize( exitcode ); - nest::KernelManager::destroy_kernel_manager(); + nest::kernel::manager< nest::KernelManager >().finalize(); + nest::kernel::manager< nest::MPIManager >().mpi_finalize( exitcode ); } #if defined( HAVE_LIBNEUROSIM ) && defined( _IS_PYNEST ) @@ -161,7 +160,7 @@ set_communicator( PyObject* pyobj ) throw nest::KernelException( "set_communicator: argument is not a mpi4py communicator" ); } - nest::kernel().mpi_manager.set_communicator( *PyMPIComm_Get( pyobj ) ); + nest::kernel::manager< nest::MPIManager >().set_communicator( *PyMPIComm_Get( pyobj ) ); } #else // ! HAVE_MPI4PY diff --git a/nestkernel/archiving_node.cpp b/nestkernel/archiving_node.cpp index ba35130aa1..41f6af71fa 100644 --- a/nestkernel/archiving_node.cpp +++ b/nestkernel/archiving_node.cpp @@ -71,8 +71,8 @@ ArchivingNode::register_stdp_connection( double t_first_read, double delay ) // connections afterwards without leaving spikes in the history. // For details see bug #218. MH 08-04-22 - for ( std::deque< histentry >::iterator runner = history_.begin(); - runner != history_.end() and ( t_first_read - runner->t_ > -1.0 * kernel().connection_manager.get_stdp_eps() ); + for ( std::deque< histentry >::iterator runner = history_.begin(); runner != history_.end() + and ( t_first_read - runner->t_ > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); ++runner ) { ( runner->access_counter_ )++; @@ -98,7 +98,7 @@ nest::ArchivingNode::get_K_value( double t ) int i = history_.size() - 1; while ( i >= 0 ) { - if ( t - history_[ i ].t_ > kernel().connection_manager.get_stdp_eps() ) + if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) { trace_ = ( history_[ i ].Kminus_ * std::exp( ( history_[ i ].t_ - t ) * tau_minus_inv_ ) ); return trace_; @@ -132,7 +132,7 @@ nest::ArchivingNode::get_K_values( double t, int i = history_.size() - 1; while ( i >= 0 ) { - if ( t - history_[ i ].t_ > kernel().connection_manager.get_stdp_eps() ) + if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) { K_triplet_value = ( history_[ i ].Kminus_triplet_ * std::exp( ( history_[ i ].t_ - t ) * tau_minus_triplet_inv_ ) ); @@ -163,8 +163,8 @@ nest::ArchivingNode::get_history( double t1, return; } std::deque< histentry >::reverse_iterator runner = history_.rbegin(); - const double t2_lim = t2 + kernel().connection_manager.get_stdp_eps(); - const double t1_lim = t1 + kernel().connection_manager.get_stdp_eps(); + const double t2_lim = t2 + kernel::manager< ConnectionManager >().get_stdp_eps(); + const double t1_lim = t1 + kernel::manager< ConnectionManager >().get_stdp_eps(); while ( runner != history_.rend() and runner->t_ >= t2_lim ) { ++runner; @@ -197,8 +197,9 @@ nest::ArchivingNode::set_spiketime( Time const& t_sp, double offset ) { const double next_t_sp = history_[ 1 ].t_; if ( history_.front().access_counter_ >= n_incoming_ - and t_sp_ms - next_t_sp > max_delay_ + Time::delay_steps_to_ms( kernel().connection_manager.get_min_delay() ) - + kernel().connection_manager.get_stdp_eps() ) + and t_sp_ms - next_t_sp > max_delay_ + + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_min_delay() ) + + kernel::manager< ConnectionManager >().get_stdp_eps() ) { history_.pop_front(); } diff --git a/nestkernel/buffer_resize_log.cpp b/nestkernel/buffer_resize_log.cpp index 948766245b..47df4c882d 100644 --- a/nestkernel/buffer_resize_log.cpp +++ b/nestkernel/buffer_resize_log.cpp @@ -49,7 +49,7 @@ BufferResizeLog::clear() void BufferResizeLog::add_entry( size_t global_max_spikes_sent, size_t new_buffer_size ) { - time_steps_.emplace_back( kernel().simulation_manager.get_clock().get_steps() ); + time_steps_.emplace_back( kernel::manager< SimulationManager >().get_clock().get_steps() ); global_max_spikes_sent_.emplace_back( global_max_spikes_sent ); new_buffer_size_.emplace_back( new_buffer_size ); } diff --git a/nestkernel/clopath_archiving_node.cpp b/nestkernel/clopath_archiving_node.cpp index 07f3e30038..2cc56412e2 100644 --- a/nestkernel/clopath_archiving_node.cpp +++ b/nestkernel/clopath_archiving_node.cpp @@ -72,7 +72,7 @@ nest::ClopathArchivingNode::init_clopath_buffers() // initialize the ltp-history ltd_hist_current_ = 0; - ltd_hist_len_ = kernel().connection_manager.get_max_delay() + 1; + ltd_hist_len_ = kernel::manager< ConnectionManager >().get_max_delay() + 1; ltd_history_.resize( ltd_hist_len_, histentry_extended( 0.0, 0.0, 0 ) ); } @@ -138,7 +138,7 @@ nest::ClopathArchivingNode::get_LTD_value( double t ) runner = ltd_history_.begin(); while ( runner != ltd_history_.end() ) { - if ( fabs( t - runner->t_ ) < kernel().connection_manager.get_stdp_eps() ) + if ( fabs( t - runner->t_ ) < kernel::manager< ConnectionManager >().get_stdp_eps() ) { return runner->dw_; } diff --git a/nestkernel/common_synapse_properties.cpp b/nestkernel/common_synapse_properties.cpp index 6bb03a4f6b..6e96d773a4 100644 --- a/nestkernel/common_synapse_properties.cpp +++ b/nestkernel/common_synapse_properties.cpp @@ -63,8 +63,8 @@ CommonSynapseProperties::set_status( const DictionaryDatum& d, ConnectorModel& ) throw BadProperty( "Property weight_recorder must be a single element NodeCollection" ); } - const size_t tid = kernel().vp_manager.get_thread_id(); - Node* wr_node = kernel().node_manager.get_node_or_proxy( ( *wr_datum )[ 0 ], tid ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + Node* wr_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *wr_datum )[ 0 ], tid ); weight_recorder* wr = dynamic_cast< weight_recorder* >( wr_node ); if ( not wr ) { diff --git a/nestkernel/conn_builder.cpp b/nestkernel/conn_builder.cpp index 1169651b38..c847de9616 100644 --- a/nestkernel/conn_builder.cpp +++ b/nestkernel/conn_builder.cpp @@ -54,12 +54,8 @@ nest::ConnBuilder::ConnBuilder( const std::string& primary_rule, const std::vector< DictionaryDatum >& syn_specs ) : third_in_builder_( nullptr ) , third_out_builder_( nullptr ) - , primary_builder_( kernel().connection_manager.get_conn_builder( primary_rule, - sources, - targets, - third_out_builder_, - conn_spec, - syn_specs ) ) + , primary_builder_( kernel::manager< ConnectionManager >() + .get_conn_builder( primary_rule, sources, targets, third_out_builder_, conn_spec, syn_specs ) ) { } @@ -75,14 +71,14 @@ nest::ConnBuilder::ConnBuilder( const std::string& primary_rule, third, third_conn_spec, const_cast< std::map< Name, std::vector< DictionaryDatum > >& >( syn_specs )[ names::third_in ] ) ) - , third_out_builder_( kernel().connection_manager.get_third_conn_builder( third_rule, + , third_out_builder_( kernel::manager< ConnectionManager >().get_third_conn_builder( third_rule, third, targets, third_in_builder_, third_conn_spec, // const_cast here seems required, clang complains otherwise; try to clean up when Datums disappear const_cast< std::map< Name, std::vector< DictionaryDatum > >& >( syn_specs )[ names::third_out ] ) ) - , primary_builder_( kernel().connection_manager.get_conn_builder( primary_rule, + , primary_builder_( kernel::manager< ConnectionManager >().get_conn_builder( primary_rule, sources, targets, third_out_builder_, @@ -131,7 +127,7 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, , allow_multapses_( true ) , make_symmetric_( false ) , creates_symmetric_connections_( false ) - , exceptions_raised_( kernel().vp_manager.get_num_threads() ) + , exceptions_raised_( kernel::manager< VPManager >().get_num_threads() ) , use_structural_plasticity_( false ) , parameters_requiring_skipping_() , param_dicts_() @@ -160,7 +156,7 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, delays_.resize( syn_specs.size() ); synapse_params_.resize( syn_specs.size() ); synapse_model_id_.resize( syn_specs.size() ); - synapse_model_id_[ 0 ] = kernel().model_manager.get_synapse_model_id( "static_synapse" ); + synapse_model_id_[ 0 ] = kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ); param_dicts_.resize( syn_specs.size() ); // loop through vector of synapse dictionaries, and set synapse parameters @@ -171,7 +167,8 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, set_synapse_model_( syn_params, synapse_indx ); set_default_weight_or_delay_( syn_params, synapse_indx ); - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( synapse_model_id_[ synapse_indx ] ); + DictionaryDatum syn_defaults = + kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id_[ synapse_indx ] ); #ifdef HAVE_MUSIC // We allow music_channel as alias for receptor_type during connection setup @@ -235,9 +232,9 @@ nest::BipartiteConnBuilder::change_connected_synaptic_elements( size_t snode_id, int local = true; // check whether the source is on this mpi machine - if ( kernel().node_manager.is_local_node_id( snode_id ) ) + if ( kernel::manager< NodeManager >().is_local_node_id( snode_id ) ) { - Node* const source = kernel().node_manager.get_node_or_proxy( snode_id, tid ); + Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, tid ); const size_t source_thread = source->get_thread(); // check whether the source is on our thread @@ -249,13 +246,13 @@ nest::BipartiteConnBuilder::change_connected_synaptic_elements( size_t snode_id, } // check whether the target is on this mpi machine - if ( not kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { local = false; } else { - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is on our thread if ( tid != target_thread ) @@ -280,7 +277,7 @@ nest::BipartiteConnBuilder::connect() for ( auto synapse_model_id : synapse_model_id_ ) { const ConnectorModel& synapse_model = - kernel().model_manager.get_connection_model( synapse_model_id, /* thread */ 0 ); + kernel::manager< ModelManager >().get_connection_model( synapse_model_id, /* thread */ 0 ); const bool requires_symmetric = synapse_model.has_property( ConnectionModelProperties::REQUIRES_SYMMETRIC ); if ( requires_symmetric and not( is_symmetric() or make_symmetric_ ) ) @@ -329,7 +326,7 @@ nest::BipartiteConnBuilder::connect() } } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -351,7 +348,7 @@ nest::BipartiteConnBuilder::disconnect() } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -367,7 +364,8 @@ nest::BipartiteConnBuilder::update_param_dict_( size_t snode_id, RngPtr rng, size_t synapse_indx ) { - assert( kernel().vp_manager.get_num_threads() == static_cast< size_t >( param_dicts_[ synapse_indx ].size() ) ); + assert( + kernel::manager< VPManager >().get_num_threads() == static_cast< size_t >( param_dicts_[ synapse_indx ].size() ) ); for ( auto synapse_parameter : synapse_params_[ synapse_indx ] ) { @@ -402,7 +400,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size if ( default_weight_and_delay_[ synapse_indx ] ) { - kernel().connection_manager.connect( snode_id, + kernel::manager< ConnectionManager >().connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -410,7 +408,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size } else if ( default_weight_[ synapse_indx ] ) { - kernel().connection_manager.connect( snode_id, + kernel::manager< ConnectionManager >().connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -419,7 +417,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size } else if ( default_delay_[ synapse_indx ] ) { - kernel().connection_manager.connect( snode_id, + kernel::manager< ConnectionManager >().connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -431,7 +429,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size { const double delay = delays_[ synapse_indx ]->value_double( target_thread, rng, snode_id, &target ); const double weight = weights_[ synapse_indx ]->value_double( target_thread, rng, snode_id, &target ); - kernel().connection_manager.connect( snode_id, + kernel::manager< ConnectionManager >().connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -497,7 +495,7 @@ nest::BipartiteConnBuilder::all_parameters_scalar_() const bool nest::BipartiteConnBuilder::loop_over_targets_() const { - return targets_->size() < kernel().node_manager.size() or not targets_->is_range() + return targets_->size() < kernel::manager< NodeManager >().size() or not targets_->is_range() or parameters_requiring_skipping_.size() > 0; } @@ -511,18 +509,20 @@ nest::BipartiteConnBuilder::set_synapse_model_( DictionaryDatum syn_params, size const std::string syn_name = ( *syn_params )[ names::synapse_model ]; // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel().model_manager.get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); synapse_model_id_[ synapse_indx ] = synapse_model_id; // We need to make sure that Connect can process all synapse parameters specified. - const ConnectorModel& synapse_model = kernel().model_manager.get_connection_model( synapse_model_id, /* thread */ 0 ); + const ConnectorModel& synapse_model = + kernel::manager< ModelManager >().get_connection_model( synapse_model_id, /* thread */ 0 ); synapse_model.check_synapse_params( syn_params ); } void nest::BipartiteConnBuilder::set_default_weight_or_delay_( DictionaryDatum syn_params, size_t synapse_indx ) { - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( synapse_model_id_[ synapse_indx ] ); + DictionaryDatum syn_defaults = + kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id_[ synapse_indx ] ); // All synapse models have the possibility to set the delay (see SynIdDelay), but some have // homogeneous weights, hence it should be possible to set the delay without the weight. @@ -538,19 +538,19 @@ nest::BipartiteConnBuilder::set_default_weight_or_delay_( DictionaryDatum syn_pa if ( not default_weight_and_delay_[ synapse_indx ] ) { weights_[ synapse_indx ] = syn_params->known( names::weight ) - ? ConnParameter::create( ( *syn_params )[ names::weight ], kernel().vp_manager.get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::weight ], kernel().vp_manager.get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::weight ], kernel::manager< VPManager >().get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::weight ], kernel::manager< VPManager >().get_num_threads() ); register_parameters_requiring_skipping_( *weights_[ synapse_indx ] ); delays_[ synapse_indx ] = syn_params->known( names::delay ) - ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel().vp_manager.get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel().vp_manager.get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ); } else if ( default_weight_[ synapse_indx ] ) { delays_[ synapse_indx ] = syn_params->known( names::delay ) - ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel().vp_manager.get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel().vp_manager.get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ); } register_parameters_requiring_skipping_( *delays_[ synapse_indx ] ); } @@ -571,14 +571,14 @@ nest::BipartiteConnBuilder::set_synapse_params( DictionaryDatum syn_defaults, if ( syn_params->known( param_name ) ) { synapse_params_[ synapse_indx ][ param_name ] = - ConnParameter::create( ( *syn_params )[ param_name ], kernel().vp_manager.get_num_threads() ); + ConnParameter::create( ( *syn_params )[ param_name ], kernel::manager< VPManager >().get_num_threads() ); register_parameters_requiring_skipping_( *synapse_params_[ synapse_indx ][ param_name ] ); } } // Now create dictionary with dummy values that we will use to pass settings to the synapses created. We // create it here once to avoid re-creating the object over and over again. - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { param_dicts_[ synapse_indx ].push_back( new Dictionary() ); @@ -658,14 +658,14 @@ nest::ThirdInBuilder::ThirdInBuilder( NodeCollectionPTR sources, const DictionaryDatum& third_conn_spec, const std::vector< DictionaryDatum >& syn_specs ) : BipartiteConnBuilder( sources, third, nullptr, third_conn_spec, syn_specs ) - , source_third_gids_( kernel().vp_manager.get_num_threads(), nullptr ) - , source_third_counts_( kernel().vp_manager.get_num_threads(), nullptr ) + , source_third_gids_( kernel::manager< VPManager >().get_num_threads(), nullptr ) + , source_third_counts_( kernel::manager< VPManager >().get_num_threads(), nullptr ) { #pragma omp parallel { - const size_t thrd = kernel().vp_manager.get_thread_id(); + const size_t thrd = kernel::manager< VPManager >().get_thread_id(); source_third_gids_[ thrd ] = new BlockVector< SourceThirdInfo_ >(); - source_third_counts_[ thrd ] = new std::vector< size_t >( kernel().mpi_manager.get_num_processes(), 0 ); + source_third_counts_[ thrd ] = new std::vector< size_t >( kernel::manager< MPIManager >().get_num_processes(), 0 ); } } @@ -673,7 +673,7 @@ nest::ThirdInBuilder::~ThirdInBuilder() { #pragma omp parallel { - const size_t thrd = kernel().vp_manager.get_thread_id(); + const size_t thrd = kernel::manager< VPManager >().get_thread_id(); delete source_third_gids_[ thrd ]; delete source_third_counts_[ thrd ]; } @@ -682,9 +682,9 @@ nest::ThirdInBuilder::~ThirdInBuilder() void nest::ThirdInBuilder::register_connection( size_t primary_source_id, size_t third_node_id ) { - const size_t tid = kernel().vp_manager.get_thread_id(); - const auto third_node_rank = - kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( third_node_id ) ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const auto third_node_rank = kernel::manager< MPIManager >().get_process_id_of_vp( + kernel::manager< VPManager >().node_id_to_vp( third_node_id ) ); source_third_gids_[ tid ]->push_back( { primary_source_id, third_node_id, third_node_rank } ); ++( ( *source_third_counts_[ tid ] )[ third_node_rank ] ); } @@ -692,10 +692,10 @@ nest::ThirdInBuilder::register_connection( size_t primary_source_id, size_t thir void nest::ThirdInBuilder::connect_() { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); // count up how many source-third pairs we need to send to each rank - const size_t num_ranks = kernel().mpi_manager.get_num_processes(); + const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); std::vector< size_t > source_third_per_rank( num_ranks, 0 ); for ( auto stcp : source_third_counts_ ) { @@ -708,9 +708,9 @@ nest::ThirdInBuilder::connect_() // now find global maximum; for simplicity, we will use this to configure buffers std::vector< long > max_stc( num_ranks ); // MPIManager does not support size_t - max_stc[ kernel().mpi_manager.get_rank() ] = + max_stc[ kernel::manager< MPIManager >().get_rank() ] = *std::max_element( source_third_per_rank.begin(), source_third_per_rank.end() ); - kernel().mpi_manager.communicate( max_stc ); + kernel::manager< MPIManager >().communicate( max_stc ); const size_t global_max_stc = *std::max_element( max_stc.begin(), max_stc.end() ); if ( global_max_stc == 0 ) @@ -749,7 +749,7 @@ nest::ThirdInBuilder::connect_() // force to master thread for compatibility with MPI standard #pragma omp master { - kernel().mpi_manager.communicate_Alltoall( send_stg, recv_stg, send_recv_count ); + kernel::manager< MPIManager >().communicate_Alltoall( send_stg, recv_stg, send_recv_count ); } // Now recv_stg contains all source-third pairs where third is on current rank @@ -757,8 +757,8 @@ nest::ThirdInBuilder::connect_() #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); - RngPtr rng = kernel().random_manager.get_vp_specific_rng( tid ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + RngPtr rng = kernel::manager< RandomManager >().get_vp_specific_rng( tid ); for ( size_t idx = 0; idx < recv_stg.size(); idx += 2 ) { @@ -772,11 +772,11 @@ nest::ThirdInBuilder::connect_() continue; } - if ( kernel().vp_manager.is_node_id_vp_local( third_gid ) ) + if ( kernel::manager< VPManager >().is_node_id_vp_local( third_gid ) ) { const auto source_gid = recv_stg[ idx + 1 ]; assert( source_gid > 0 ); - single_connect_( source_gid, *kernel().node_manager.get_node_or_proxy( third_gid, tid ), tid, rng ); + single_connect_( source_gid, *kernel::manager< NodeManager >().get_node_or_proxy( third_gid, tid ), tid, rng ); } } } @@ -802,7 +802,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo , random_pool_( true ) , pool_size_( third->size() ) , targets_per_third_( targets->size() / third->size() ) - , pools_( kernel().vp_manager.get_num_threads(), nullptr ) + , pools_( kernel::manager< VPManager >().get_num_threads(), nullptr ) { updateValue< double >( conn_spec, names::p, p_ ); updateValue< long >( conn_spec, names::pool_size, pool_size_ ); @@ -845,7 +845,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo #pragma omp parallel { - const size_t thrd = kernel().vp_manager.get_thread_id(); + const size_t thrd = kernel::manager< VPManager >().get_thread_id(); pools_[ thrd ] = new TgtPoolMap_(); } @@ -860,7 +860,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo size_t idx = 0; for ( auto tgt_it = targets_->begin(); tgt_it != targets_->end(); ++tgt_it ) { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id ); if ( not tgt->is_proxy() ) { tgt->set_tmp_nc_index( idx++ ); // must be postfix @@ -873,7 +873,7 @@ nest::ThirdBernoulliWithPoolBuilder::~ThirdBernoulliWithPoolBuilder() { #pragma omp parallel { - const size_t thrd = kernel().vp_manager.get_thread_id(); + const size_t thrd = kernel::manager< VPManager >().get_thread_id(); delete pools_[ thrd ]; if ( not random_pool_ ) @@ -884,7 +884,7 @@ nest::ThirdBernoulliWithPoolBuilder::~ThirdBernoulliWithPoolBuilder() // Here we can work in parallel since we just reset to invalid_index for ( auto tgt_it = targets_->thread_local_begin(); tgt_it != targets_->end(); ++tgt_it ) { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thrd ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thrd ); assert( not tgt->is_proxy() ); tgt->set_tmp_nc_index( invalid_index ); } @@ -896,7 +896,7 @@ void nest::ThirdBernoulliWithPoolBuilder::third_connect( size_t primary_source_id, Node& primary_target ) { // We assume target is on this thread - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); RngPtr rng = get_vp_specific_rng( tid ); // conditionally connect third factor @@ -969,7 +969,7 @@ nest::OneToOneBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -994,7 +994,7 @@ nest::OneToOneBuilder::connect_() continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1007,7 +1007,7 @@ nest::OneToOneBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1048,7 +1048,7 @@ nest::OneToOneBuilder::disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1062,13 +1062,13 @@ nest::OneToOneBuilder::disconnect_() const size_t snode_id = ( *source_it ).node_id; // check whether the target is on this mpi machine - if ( not kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is a proxy @@ -1096,7 +1096,7 @@ nest::OneToOneBuilder::sp_connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1121,7 +1121,7 @@ nest::OneToOneBuilder::sp_connect_() skip_conn_parameter_( tid ); continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_connect_( snode_id, *target, target_thread, rng ); @@ -1143,7 +1143,7 @@ nest::OneToOneBuilder::sp_disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1161,7 +1161,7 @@ nest::OneToOneBuilder::sp_disconnect_() continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_disconnect_( snode_id, *target, target_thread ); @@ -1183,7 +1183,7 @@ nest::AllToAllBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1195,7 +1195,7 @@ nest::AllToAllBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { skip_conn_parameter_( tid, sources_->size() ); @@ -1207,7 +1207,7 @@ nest::AllToAllBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1271,7 +1271,7 @@ nest::AllToAllBuilder::sp_connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { RngPtr rng = get_vp_specific_rng( tid ); @@ -1296,7 +1296,7 @@ nest::AllToAllBuilder::sp_connect_() skip_conn_parameter_( tid, sources_->size() ); continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_connect_( snode_id, *target, target_thread, rng ); } @@ -1318,7 +1318,7 @@ nest::AllToAllBuilder::disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1328,13 +1328,13 @@ nest::AllToAllBuilder::disconnect_() const size_t tnode_id = ( *target_it ).node_id; // check whether the target is on this mpi machine - if ( not kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is a proxy @@ -1367,7 +1367,7 @@ nest::AllToAllBuilder::sp_disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1386,7 +1386,7 @@ nest::AllToAllBuilder::sp_disconnect_() // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_disconnect_( snode_id, *target, target_thread ); } @@ -1464,7 +1464,7 @@ nest::FixedInDegreeBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1476,7 +1476,7 @@ nest::FixedInDegreeBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const long indegree_value = std::round( indegree_->value( rng, target ) ); if ( target->is_proxy() ) @@ -1491,7 +1491,7 @@ nest::FixedInDegreeBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1639,7 +1639,7 @@ nest::FixedOutDegreeBuilder::connect_() std::vector< size_t > tgt_ids_; const long n_rnd = targets_->size(); - Node* source_node = kernel().node_manager.get_node_or_proxy( snode_id ); + Node* source_node = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); const long outdegree_value = std::round( outdegree_->value( grng, source_node ) ); for ( long j = 0; j < outdegree_value; ++j ) { @@ -1667,7 +1667,7 @@ nest::FixedOutDegreeBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1676,7 +1676,7 @@ nest::FixedOutDegreeBuilder::connect_() std::vector< size_t >::const_iterator tnode_id_it = tgt_ids_.begin(); for ( ; tnode_id_it != tgt_ids_.end(); ++tnode_id_it ) { - Node* const target = kernel().node_manager.get_node_or_proxy( *tnode_id_it, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( *tnode_id_it, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1736,7 +1736,7 @@ nest::FixedTotalNumberBuilder::FixedTotalNumberBuilder( NodeCollectionPTR source void nest::FixedTotalNumberBuilder::connect_() { - const int M = kernel().vp_manager.get_num_virtual_processes(); + const int M = kernel::manager< VPManager >().get_num_virtual_processes(); const long size_sources = sources_->size(); const long size_targets = targets_->size(); @@ -1746,12 +1746,12 @@ nest::FixedTotalNumberBuilder::connect_() // function std::vector< size_t > number_of_targets_on_vp( M, 0 ); std::vector< size_t > local_targets; - local_targets.reserve( size_targets / kernel().mpi_manager.get_num_processes() ); + local_targets.reserve( size_targets / kernel::manager< MPIManager >().get_num_processes() ); for ( size_t t = 0; t < targets_->size(); t++ ) { - int vp = kernel().vp_manager.node_id_to_vp( ( *targets_ )[ t ] ); + int vp = kernel::manager< VPManager >().node_id_to_vp( ( *targets_ )[ t ] ); ++number_of_targets_on_vp[ vp ]; - if ( kernel().vp_manager.is_local_vp( vp ) ) + if ( kernel::manager< VPManager >().is_local_vp( vp ) ) { local_targets.push_back( ( *targets_ )[ t ] ); } @@ -1807,13 +1807,13 @@ nest::FixedTotalNumberBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { - const size_t vp_id = kernel().vp_manager.thread_to_vp( tid ); + const size_t vp_id = kernel::manager< VPManager >().thread_to_vp( tid ); - if ( kernel().vp_manager.is_local_vp( vp_id ) ) + if ( kernel::manager< VPManager >().is_local_vp( vp_id ) ) { RngPtr rng = get_vp_specific_rng( tid ); @@ -1824,7 +1824,7 @@ nest::FixedTotalNumberBuilder::connect_() std::vector< size_t >::const_iterator tnode_id_it = local_targets.begin(); for ( ; tnode_id_it != local_targets.end(); ++tnode_id_it ) { - if ( kernel().vp_manager.node_id_to_vp( *tnode_id_it ) == vp_id ) + if ( kernel::manager< VPManager >().node_id_to_vp( *tnode_id_it ) == vp_id ) { thread_local_targets.push_back( *tnode_id_it ); } @@ -1847,7 +1847,7 @@ nest::FixedTotalNumberBuilder::connect_() // targets_on_vp vector const long tnode_id = thread_local_targets[ t_index ]; - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); if ( allow_autapses_ or snode_id != tnode_id ) @@ -1900,7 +1900,7 @@ nest::BernoulliBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -1912,7 +1912,7 @@ nest::BernoulliBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1926,7 +1926,7 @@ nest::BernoulliBuilder::connect_() else { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -2018,7 +2018,7 @@ nest::PoissonBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -2030,7 +2030,7 @@ nest::PoissonBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip parameters handled in other virtual processes @@ -2043,7 +2043,7 @@ nest::PoissonBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -2140,7 +2140,7 @@ nest::SymmetricBernoulliBuilder::connect_() { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); // Use RNG generating same number sequence on all threads RngPtr synced_rng = get_vp_synced_rng( tid ); @@ -2168,7 +2168,7 @@ nest::SymmetricBernoulliBuilder::connect_() } assert( indegree < sources_->size() ); - target = kernel().node_manager.get_node_or_proxy( ( *tnode_id ).node_id, tid ); + target = kernel::manager< NodeManager >().get_node_or_proxy( ( *tnode_id ).node_id, tid ); target_thread = tid; // check whether the target is on this thread @@ -2194,7 +2194,7 @@ nest::SymmetricBernoulliBuilder::connect_() } previous_snode_ids.insert( snode_id ); - source = kernel().node_manager.get_node_or_proxy( snode_id, tid ); + source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, tid ); source_thread = tid; if ( source->is_proxy() ) @@ -2249,7 +2249,7 @@ nest::SPBuilder::update_delay( long& d ) const { if ( get_default_delay() ) { - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( get_synapse_model() ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( get_synapse_model() ); const double delay = getValue< double >( syn_defaults, "delay" ); d = Time( Time::ms( delay ) ).get_steps(); } @@ -2261,7 +2261,7 @@ nest::SPBuilder::sp_connect( const std::vector< size_t >& sources, const std::ve connect_( sources, targets ); // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -2298,7 +2298,7 @@ nest::SPBuilder::connect_( const std::vector< size_t >& sources, const std::vect #pragma omp parallel { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -2320,7 +2320,7 @@ nest::SPBuilder::connect_( const std::vector< size_t >& sources, const std::vect skip_conn_parameter_( tid ); continue; } - Node* const target = kernel().node_manager.get_node_or_proxy( *tnode_id_it, tid ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( *tnode_id_it, tid ); single_connect_( *snode_id_it, *target, tid, rng ); } diff --git a/nestkernel/conn_builder.h b/nestkernel/conn_builder.h index 4e24c7a60b..a4884d6675 100644 --- a/nestkernel/conn_builder.h +++ b/nestkernel/conn_builder.h @@ -872,7 +872,7 @@ BipartiteConnBuilder::single_disconnect_( size_t snode_id, Node& target, size_t { throw KernelException( "Can only disconnect when single element syn_spec has been used." ); } - kernel().sp_manager.disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); + kernel::manager< SPManager >().disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); } } // namespace nest diff --git a/nestkernel/conn_builder_conngen.cpp b/nestkernel/conn_builder_conngen.cpp index 7c0547ab34..cbe075029e 100644 --- a/nestkernel/conn_builder_conngen.cpp +++ b/nestkernel/conn_builder_conngen.cpp @@ -89,7 +89,7 @@ ConnectionGeneratorBuilder::connect_() { // No need to check for locality of the target, as the mask // created by cg_set_masks() only contains local nodes. - Node* const target_node = kernel().node_manager.get_node_or_proxy( ( *targets_ )[ target ] ); + Node* const target_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *targets_ )[ target ] ); const size_t target_thread = target_node->get_thread(); single_connect_( ( *sources_ )[ source ], *target_node, target_thread, rng ); } @@ -118,13 +118,13 @@ ConnectionGeneratorBuilder::connect_() { // No need to check for locality of the target node, as the mask // created by cg_set_masks() only contains local nodes. - Node* target_node = kernel().node_manager.get_node_or_proxy( ( *targets_ )[ target ] ); + Node* target_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *targets_ )[ target ] ); const size_t target_thread = target_node->get_thread(); update_param_dict_( ( *sources_ )[ source ], *target_node, target_thread, rng, 0 ); // Use the low-level connect() here, as we need to pass a custom weight and delay - kernel().connection_manager.connect( ( *sources_ )[ source ], + kernel::manager< ConnectionManager >().connect( ( *sources_ )[ source ], target_node, target_thread, synapse_model_id_[ 0 ], @@ -143,7 +143,7 @@ ConnectionGeneratorBuilder::connect_() void ConnectionGeneratorBuilder::cg_set_masks() { - const size_t np = kernel().mpi_manager.get_num_processes(); + const size_t np = kernel::manager< MPIManager >().get_num_processes(); std::vector< ConnectionGenerator::Mask > masks( np, ConnectionGenerator::Mask( 1, np ) ); // The index of the left border of the currently looked at range @@ -203,7 +203,7 @@ ConnectionGeneratorBuilder::cg_set_masks() cg_idx_left += num_elements; } - cg_->setMask( masks, kernel().mpi_manager.get_rank() ); + cg_->setMask( masks, kernel::manager< MPIManager >().get_rank() ); } diff --git a/nestkernel/connection.h b/nestkernel/connection.h index ddec70f6bb..36b8678da3 100644 --- a/nestkernel/connection.h +++ b/nestkernel/connection.h @@ -362,7 +362,7 @@ Connection< targetidentifierT >::set_status( const DictionaryDatum& d, Connector double delay; if ( updateValue< double >( d, names::delay, delay ) ) { - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); syn_id_delay_.set_delay_ms( delay ); } // no call to target_.set_status() because target and rport cannot be changed diff --git a/nestkernel/connection_creator.cpp b/nestkernel/connection_creator.cpp index 21b94e3153..157c22ec10 100644 --- a/nestkernel/connection_creator.cpp +++ b/nestkernel/connection_creator.cpp @@ -95,7 +95,7 @@ ConnectionCreator::ConnectionCreator( DictionaryDatum dict ) { // If not, we have single synapses. param_dicts_.resize( 1 ); - param_dicts_[ 0 ].resize( kernel().vp_manager.get_num_threads() ); + param_dicts_[ 0 ].resize( kernel::manager< VPManager >().get_num_threads() ); extract_params_( dict, param_dicts_[ 0 ] ); } @@ -104,9 +104,9 @@ ConnectionCreator::ConnectionCreator( DictionaryDatum dict ) // Set default synapse_model, weight and delay if not given explicitly if ( synapse_model_.empty() ) { - synapse_model_ = { kernel().model_manager.get_synapse_model_id( "static_synapse" ) }; + synapse_model_ = { kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ) }; } - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( synapse_model_[ 0 ] ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_[ 0 ] ); if ( weight_.empty() ) { weight_ = { NestModule::create_parameter( ( *syn_defaults )[ names::weight ] ) }; @@ -167,10 +167,10 @@ ConnectionCreator::extract_params_( const DictionaryDatum& dict_datum, std::vect std::string syn_name = ( *dict_datum )[ names::synapse_model ]; // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel().model_manager.get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); synapse_model_.push_back( synapse_model_id ); - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( synapse_model_id ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); if ( dict_datum->known( names::weight ) ) { weight_.push_back( NestModule::create_parameter( ( *dict_datum )[ names::weight ] ) ); @@ -209,10 +209,10 @@ ConnectionCreator::extract_params_( const DictionaryDatum& dict_datum, std::vect copy_long_if_known( names::synapse_label ); copy_long_if_known( names::receptor_type ); - params.resize( kernel().vp_manager.get_num_threads() ); + params.resize( kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - params.at( kernel().vp_manager.get_thread_id() ) = syn_dict; + params.at( kernel::manager< VPManager >().get_thread_id() ) = syn_dict; } } diff --git a/nestkernel/connection_creator.h b/nestkernel/connection_creator.h index 087e5f43a8..0ff9838216 100644 --- a/nestkernel/connection_creator.h +++ b/nestkernel/connection_creator.h @@ -259,7 +259,7 @@ ConnectionCreator::connect_to_target_( Iterator from, { for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel().connection_manager.connect( iter->second, + kernel::manager< ConnectionManager >().connect( iter->second, tgt_ptr, tgt_thread, synapse_model_[ indx ], @@ -304,7 +304,7 @@ ConnectionCreator::connect_to_target_poisson_( Iterator from, { for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel().connection_manager.connect( iter->second, + kernel::manager< ConnectionManager >().connect( iter->second, tgt_ptr, tgt_thread, synapse_model_[ indx ], @@ -405,11 +405,12 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, pool.define( source.get_global_positions_vector( source_nc ) ); } - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( + kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - const int thread_id = kernel().vp_manager.get_thread_id(); + const int thread_id = kernel::manager< VPManager >().get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->begin(); @@ -417,7 +418,7 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); if ( not tgt->is_proxy() ) { @@ -444,7 +445,7 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -482,10 +483,11 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, pool.define( source.get_global_positions_vector( source_nc ) ); } - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( + kernel::manager< VPManager >().get_num_threads() ); // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with pairwise_bernoulli to devices is not possible." ); @@ -493,7 +495,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, #pragma omp parallel { - const int thread_id = kernel().vp_manager.get_thread_id(); + const int thread_id = kernel::manager< VPManager >().get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->thread_local_begin(); @@ -501,7 +503,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); assert( not tgt->is_proxy() ); @@ -530,7 +532,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -564,11 +566,12 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, pool.define( source.get_global_positions_vector( source_nc ) ); } - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( + kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - const int thread_id = kernel().vp_manager.get_thread_id(); + const int thread_id = kernel::manager< VPManager >().get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->begin(); @@ -576,7 +579,7 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); if ( not tgt->is_proxy() ) { @@ -603,7 +606,7 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -628,7 +631,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, // 3. Draw source nodes and make connections // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." ); @@ -642,7 +645,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, // the network untouched if any target does not have proxies for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - assert( not kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + assert( not kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); } if ( mask_.get() ) @@ -655,7 +658,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); @@ -730,7 +733,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( + kernel::manager< ConnectionManager >().connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -769,7 +772,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( + kernel::manager< ConnectionManager >().connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -789,7 +792,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); + Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); @@ -856,7 +859,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( + kernel::manager< ConnectionManager >().connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -893,7 +896,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel().connection_manager.connect( + kernel::manager< ConnectionManager >().connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -918,7 +921,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, // the network untouched if any target does not have proxies // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel().node_manager.get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." ); @@ -929,7 +932,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - assert( not kernel().node_manager.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + assert( not kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); } // Fixed_outdegree connections (fixed fan out) @@ -953,7 +956,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, { const Position< D > source_pos = source_pos_node_id_pair.first; const size_t source_id = source_pos_node_id_pair.second; - const auto src = kernel().node_manager.get_node_or_proxy( source_id ); + const auto src = kernel::manager< NodeManager >().get_node_or_proxy( source_id ); const std::vector< double > source_pos_vector = source_pos.get_vector(); // We create a target pos vector here that can be updated with the @@ -974,7 +977,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, { // TODO: Why is probability calculated in source layer, but weight and delay in target layer? target_pos_node_id_pair.first.get_vector( target_pos_vector ); - const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pair.second ); + const auto tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_pos_node_id_pair.second ); probabilities.push_back( kernel_->value( grng, source_pos_vector, target_pos_vector, source, tgt ) ); } } @@ -1024,7 +1027,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, std::vector< double > rng_delay_vec; for ( size_t indx = 0; indx < weight_.size(); ++indx ) { - const auto tgt = kernel().node_manager.get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); + const auto tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); rng_weight_vec.push_back( weight_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); rng_delay_vec.push_back( delay_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); } @@ -1033,17 +1036,17 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, // required for it. Each VP thus counts the connection as created, but only the VP hosting the // target neuron actually creates the connection. --number_of_connections; - if ( not kernel().node_manager.is_local_node_id( target_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( target_id ) ) { continue; } - Node* target_ptr = kernel().node_manager.get_node_or_proxy( target_id ); + Node* target_ptr = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); const size_t target_thread = target_ptr->get_thread(); for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel().connection_manager.connect( source_id, + kernel::manager< ConnectionManager >().connect( source_id, target_ptr, target_thread, synapse_model_[ indx ], diff --git a/nestkernel/connection_manager.cpp b/nestkernel/connection_manager.cpp index aa08aac7bd..4e2051eea2 100644 --- a/nestkernel/connection_manager.cpp +++ b/nestkernel/connection_manager.cpp @@ -128,7 +128,7 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) sw_construction_connect.reset(); } - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); connections_.resize( num_threads ); secondary_recv_buffer_pos_.resize( num_threads ); compressed_spike_data_.resize( 0 ); @@ -140,11 +140,11 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) // We need to obtain this while in serial context to avoid problems when // increasing the number of threads. - const size_t num_conn_models = kernel().model_manager.get_num_connection_models(); + const size_t num_conn_models = kernel::manager< ModelManager >().get_num_connection_models(); #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); connections_.at( tid ) = std::vector< ConnectorBase* >( num_conn_models ); secondary_recv_buffer_pos_.at( tid ) = std::vector< std::vector< size_t > >(); } // of omp parallel @@ -153,10 +153,11 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) target_table_.initialize(); target_table_devices_.initialize(); - std::vector< DelayChecker > tmp( kernel().vp_manager.get_num_threads() ); + std::vector< DelayChecker > tmp( kernel::manager< VPManager >().get_num_threads() ); delay_checkers_.swap( tmp ); - std::vector< std::vector< size_t > > tmp2( kernel().vp_manager.get_num_threads(), std::vector< size_t >() ); + std::vector< std::vector< size_t > > tmp2( + kernel::manager< VPManager >().get_num_threads(), std::vector< size_t >() ); num_connections_.swap( tmp2 ); } @@ -198,7 +199,7 @@ ConnectionManager::set_status( const DictionaryDatum& d ) } updateValue< bool >( d, names::keep_source_table, keep_source_table_ ); - if ( not keep_source_table_ and kernel().sp_manager.is_structural_plasticity_enabled() ) + if ( not keep_source_table_ and kernel::manager< SPManager >().is_structural_plasticity_enabled() ) { throw KernelException( "If structural plasticity is enabled, keep_source_table can not be set " @@ -217,7 +218,7 @@ ConnectionManager::set_status( const DictionaryDatum& d ) DelayChecker& ConnectionManager::get_delay_checker() { - return delay_checkers_[ kernel().vp_manager.get_thread_id() ]; + return delay_checkers_[ kernel::manager< VPManager >().get_thread_id() ]; } void @@ -249,18 +250,18 @@ ConnectionManager::get_synapse_status( const size_t source_node_id, const synindex syn_id, const size_t lcid ) const { - kernel().model_manager.assert_valid_syn_id( syn_id, kernel().vp_manager.get_thread_id() ); + kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); DictionaryDatum dict( new Dictionary ); ( *dict )[ names::source ] = source_node_id; ( *dict )[ names::synapse_model ] = - LiteralDatum( kernel().model_manager.get_connection_model( syn_id, /* thread */ 0 ).get_name() ); + LiteralDatum( kernel::manager< ModelManager >().get_connection_model( syn_id, /* thread */ 0 ).get_name() ); ( *dict )[ names::target_thread ] = tid; ( *dict )[ names::synapse_id ] = syn_id; ( *dict )[ names::port ] = lcid; - const Node* source = kernel().node_manager.get_node_or_proxy( source_node_id, tid ); - const Node* target = kernel().node_manager.get_node_or_proxy( target_node_id, tid ); + const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); + const Node* target = kernel::manager< NodeManager >().get_node_or_proxy( target_node_id, tid ); // synapses from neurons to neurons and from neurons to globally // receiving devices @@ -295,14 +296,14 @@ ConnectionManager::set_synapse_status( const size_t source_node_id, const size_t lcid, const DictionaryDatum& dict ) { - kernel().model_manager.assert_valid_syn_id( syn_id, kernel().vp_manager.get_thread_id() ); + kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); - const Node* source = kernel().node_manager.get_node_or_proxy( source_node_id, tid ); - const Node* target = kernel().node_manager.get_node_or_proxy( target_node_id, tid ); + const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); + const Node* target = kernel::manager< NodeManager >().get_node_or_proxy( target_node_id, tid ); try { - ConnectorModel& cm = kernel().model_manager.get_connection_model( syn_id, tid ); + ConnectorModel& cm = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); // synapses from neurons to neurons and from neurons to globally // receiving devices if ( ( source->has_proxies() and target->has_proxies() and connections_[ tid ][ syn_id ] ) @@ -329,7 +330,7 @@ ConnectionManager::set_synapse_status( const size_t source_node_id, { throw BadProperty( String::compose( "Setting status of '%1' connecting from node ID %2 to node ID %3 via port %4: %5", - kernel().model_manager.get_connection_model( syn_id, tid ).get_name(), + kernel::manager< ModelManager >().get_connection_model( syn_id, tid ).get_name(), source_node_id, target_node_id, lcid, @@ -434,7 +435,7 @@ ConnectionManager::get_third_conn_builder( const std::string& name, void ConnectionManager::calibrate( const TimeConverter& tc ) { - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { delay_checkers_[ tid ].calibrate( tc ); } @@ -499,15 +500,15 @@ ConnectionManager::connect( TokenArray sources, TokenArray targets, const Dictio { const std::string synmodel_name = getValue< std::string >( synmodel ); // The following throws UnknownSynapseType for invalid synmodel_name - syn_id = kernel().model_manager.get_synapse_model_id( synmodel_name ); + syn_id = kernel::manager< ModelManager >().get_synapse_model_id( synmodel_name ); } // Connect all sources to all targets for ( auto&& source : sources ) { - auto source_node = kernel().node_manager.get_node_or_proxy( source ); + auto source_node = kernel::manager< NodeManager >().get_node_or_proxy( source ); for ( auto&& target : targets ) { - auto target_node = kernel().node_manager.get_node_or_proxy( target ); + auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( target ); auto target_thread = target_node->get_thread(); connect_( *source_node, *target_node, source, target_thread, syn_id, syn_spec ); } @@ -518,7 +519,7 @@ ConnectionManager::connect( TokenArray sources, TokenArray targets, const Dictio void ConnectionManager::update_delay_extrema_() { - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { // Once simulation has started, min/max_delay can no longer change, // so there is nothing to update. @@ -532,8 +533,8 @@ ConnectionManager::update_delay_extrema_() { // If no min/max_delay is set explicitly, then the default delay used by the // SPBuilders have to be respected for min/max_delay. - min_delay_ = std::min( min_delay_, kernel().sp_manager.builder_min_delay() ); - max_delay_ = std::max( max_delay_, kernel().sp_manager.builder_max_delay() ); + min_delay_ = std::min( min_delay_, kernel::manager< SPManager >().builder_min_delay() ); + max_delay_ = std::max( max_delay_, kernel::manager< SPManager >().builder_max_delay() ); } // If the user explicitly set min/max_delay, this happend on all MPI ranks, @@ -541,17 +542,18 @@ ConnectionManager::update_delay_extrema_() // explicitly, Connect() cannot induce new extrema. Thuse, we only need to communicate // with other ranks if the user has not set the extrema and connections may have // been created. - if ( not kernel().connection_manager.get_user_set_delay_extrema() - and kernel().connection_manager.connections_have_changed() and kernel().mpi_manager.get_num_processes() > 1 ) + if ( not kernel::manager< ConnectionManager >().get_user_set_delay_extrema() + and kernel::manager< ConnectionManager >().connections_have_changed() + and kernel::manager< MPIManager >().get_num_processes() > 1 ) { - std::vector< long > min_delays( kernel().mpi_manager.get_num_processes() ); - min_delays[ kernel().mpi_manager.get_rank() ] = min_delay_; - kernel().mpi_manager.communicate( min_delays ); + std::vector< long > min_delays( kernel::manager< MPIManager >().get_num_processes() ); + min_delays[ kernel::manager< MPIManager >().get_rank() ] = min_delay_; + kernel::manager< MPIManager >().communicate( min_delays ); min_delay_ = *std::min_element( min_delays.begin(), min_delays.end() ); - std::vector< long > max_delays( kernel().mpi_manager.get_num_processes() ); - max_delays[ kernel().mpi_manager.get_rank() ] = max_delay_; - kernel().mpi_manager.communicate( max_delays ); + std::vector< long > max_delays( kernel::manager< MPIManager >().get_num_processes() ); + max_delays[ kernel::manager< MPIManager >().get_rank() ] = max_delay_; + kernel::manager< MPIManager >().communicate( max_delays ); max_delay_ = *std::max_element( max_delays.begin(), max_delays.end() ); } @@ -571,9 +573,9 @@ ConnectionManager::connect( const size_t snode_id, const double delay, const double weight ) { - kernel().model_manager.assert_valid_syn_id( syn_id, kernel().vp_manager.get_thread_id() ); + kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); - Node* source = kernel().node_manager.get_node_or_proxy( snode_id, target_thread ); + Node* source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, target_thread ); ConnectionType connection_type = connection_required( source, target, target_thread ); @@ -600,18 +602,18 @@ ConnectionManager::connect( const size_t snode_id, const DictionaryDatum& params, const synindex syn_id ) { - kernel().model_manager.assert_valid_syn_id( syn_id, kernel().vp_manager.get_thread_id() ); + kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); - if ( not kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { return false; } - Node* target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); - Node* source = kernel().node_manager.get_node_or_proxy( snode_id, target_thread ); + Node* source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, target_thread ); ConnectionType connection_type = connection_required( source, target, target_thread ); bool connected = true; @@ -662,13 +664,13 @@ ConnectionManager::connect_arrays( long* sources, } } - const auto synapse_model_id = kernel().model_manager.get_synapse_model_id( syn_model ); - const auto syn_model_defaults = kernel().model_manager.get_connector_defaults( synapse_model_id ); + const auto synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_model ); + const auto syn_model_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); // Dictionary holding additional synapse parameters, passed to the connect call. std::vector< DictionaryDatum > param_dicts; - param_dicts.reserve( kernel().vp_manager.get_num_threads() ); - for ( size_t i = 0; i < kernel().vp_manager.get_num_threads(); ++i ) + param_dicts.reserve( kernel::manager< VPManager >().get_num_threads() ); + for ( size_t i = 0; i < kernel::manager< VPManager >().get_num_threads(); ++i ) { param_dicts.emplace_back( new Dictionary ); for ( auto& param_key : p_keys ) @@ -711,11 +713,12 @@ ConnectionManager::connect_arrays( long* sources, set_connections_have_changed(); // Vector for storing exceptions raised by threads. - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( + kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - const auto tid = kernel().vp_manager.get_thread_id(); + const auto tid = kernel::manager< VPManager >().get_thread_id(); try { auto s = sources; @@ -728,15 +731,15 @@ ConnectionManager::connect_arrays( long* sources, for ( ; s != sources + n; ++s, ++t, ++index_counter ) { - if ( 0 >= *s or static_cast< size_t >( *s ) > kernel().node_manager.size() ) + if ( 0 >= *s or static_cast< size_t >( *s ) > kernel::manager< NodeManager >().size() ) { throw UnknownNode( *s ); } - if ( 0 >= *t or static_cast< size_t >( *t ) > kernel().node_manager.size() ) + if ( 0 >= *t or static_cast< size_t >( *t ) > kernel::manager< NodeManager >().size() ) { throw UnknownNode( *t ); } - auto target_node = kernel().node_manager.get_node_or_proxy( *t, tid ); + auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( *t, tid ); if ( target_node->is_proxy() ) { increment_wd( w, d ); @@ -799,7 +802,7 @@ ConnectionManager::connect_arrays( long* sources, } } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -896,7 +899,7 @@ ConnectionManager::connect_( Node& source, const double delay, const double weight ) { - ConnectorModel& conn_model = kernel().model_manager.get_connection_model( syn_id, tid ); + ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); const bool clopath_archiving = conn_model.has_property( ConnectionModelProperties::REQUIRES_CLOPATH_ARCHIVING ); if ( clopath_archiving and not dynamic_cast< ClopathArchivingNode* >( &target ) ) @@ -1040,7 +1043,7 @@ ConnectionManager::trigger_update_weight( const long vt_id, const std::vector< spikecounter >& dopa_spikes, const double t_trig ) { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); for ( std::vector< ConnectorBase* >::iterator it = connections_[ tid ].begin(); it != connections_[ tid ].end(); ++it ) @@ -1048,7 +1051,7 @@ ConnectionManager::trigger_update_weight( const long vt_id, if ( *it ) { ( *it )->trigger_update_weight( - vt_id, tid, dopa_spikes, t_trig, kernel().model_manager.get_connection_models( tid ) ); + vt_id, tid, dopa_spikes, t_trig, kernel::manager< ModelManager >().get_connection_models( tid ) ); } } } @@ -1138,12 +1141,12 @@ ConnectionManager::get_connections( const DictionaryDatum& params ) // Check whether waveform relaxation is used on any MPI process; // needs to be called before update_connection_infrastructure since // it resizes coefficient arrays for secondary events - kernel().node_manager.check_wfr_use(); + kernel::manager< NodeManager >().check_wfr_use(); #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); - kernel().simulation_manager.update_connection_infrastructure( tid ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + kernel::manager< SimulationManager >().update_connection_infrastructure( tid ); } } @@ -1153,12 +1156,12 @@ ConnectionManager::get_connections( const DictionaryDatum& params ) { const std::string synmodel_name = getValue< std::string >( syn_model_t ); // The following throws UnknownSynapseType for invalid synmodel_name - syn_id = kernel().model_manager.get_synapse_model_id( synmodel_name ); + syn_id = kernel::manager< ModelManager >().get_synapse_model_id( synmodel_name ); get_connections( connectome, source_a, target_a, syn_id, synapse_label ); } else { - for ( syn_id = 0; syn_id < kernel().model_manager.get_num_connection_models(); ++syn_id ) + for ( syn_id = 0; syn_id < kernel::manager< ModelManager >().get_num_connection_models(); ++syn_id ) { get_connections( connectome, source_a, target_a, syn_id, synapse_label ); } @@ -1202,7 +1205,7 @@ ConnectionManager::split_to_neuron_device_vectors_( const size_t tid, for ( ; t_id < nodecollection->end(); ++t_id ) { const size_t node_id = ( *t_id ).node_id; - const auto node = kernel().node_manager.get_node_or_proxy( node_id, tid ); + const auto node = kernel::manager< NodeManager >().get_node_or_proxy( node_id, tid ); // Normal neuron nodes have proxies. Globally receiving devices, e.g. volume transmitter, don't have a local // receiver, but are connected in the same way as normal neuron nodes. Therefore they have to be treated as such // here. @@ -1369,7 +1372,7 @@ nest::ConnectionManager::get_connections( std::deque< ConnectionID >& connectome throw KernelException( "Invalid attempt to access connection information: source table was cleared." ); } - size_t tid = kernel().vp_manager.get_thread_id(); + size_t tid = kernel::manager< VPManager >().get_thread_id(); std::deque< ConnectionID > conns_in_thread; @@ -1421,7 +1424,7 @@ ConnectionManager::get_sources( const std::vector< size_t >& targets, ( *i ).clear(); } - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { for ( size_t i = 0; i < targets.size(); ++i ) { @@ -1442,7 +1445,7 @@ ConnectionManager::get_targets( const std::vector< size_t >& sources, ( *i ).clear(); } - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { for ( size_t i = 0; i < sources.size(); ++i ) { @@ -1479,23 +1482,23 @@ ConnectionManager::compute_target_data_buffer_size() // has its own data structures, we need to count connections on every // thread separately to compute the total number of sources. size_t num_target_data = 0; - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { num_target_data += get_num_target_data( tid ); } // Determine maximum number of target data across all ranks, because // all ranks need identically sized buffers. - std::vector< long > global_num_target_data( kernel().mpi_manager.get_num_processes() ); - global_num_target_data[ kernel().mpi_manager.get_rank() ] = num_target_data; - kernel().mpi_manager.communicate( global_num_target_data ); + std::vector< long > global_num_target_data( kernel::manager< MPIManager >().get_num_processes() ); + global_num_target_data[ kernel::manager< MPIManager >().get_rank() ] = num_target_data; + kernel::manager< MPIManager >().communicate( global_num_target_data ); const size_t max_num_target_data = *std::max_element( global_num_target_data.begin(), global_num_target_data.end() ); // MPI buffers should have at least two entries per process - const size_t min_num_target_data = 2 * kernel().mpi_manager.get_num_processes(); + const size_t min_num_target_data = 2 * kernel::manager< MPIManager >().get_num_processes(); // Adjust target data buffers accordingly - kernel().mpi_manager.set_buffer_size_target_data( std::max( min_num_target_data, max_num_target_data ) ); + kernel::manager< MPIManager >().set_buffer_size_target_data( std::max( min_num_target_data, max_num_target_data ) ); } void @@ -1516,7 +1519,7 @@ ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const siz if ( connections_[ tid ][ syn_id ] ) { - ConnectorModel& conn_model = kernel().model_manager.get_connection_model( syn_id, tid ); + ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); if ( not is_primary ) @@ -1531,10 +1534,10 @@ ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const siz { const size_t source_node_id = source_table_.get_node_id( tid, syn_id, lcid ); const size_t sg_s_id = source_table_.pack_source_node_id_and_syn_id( source_node_id, syn_id ); - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_node_id ); + const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source_node_id ); positions[ lcid ] = buffer_pos_of_source_node_id_syn_id_[ sg_s_id ] - + kernel().mpi_manager.get_recv_displacement_secondary_events_in_int( source_rank ); + + kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); } } } @@ -1548,8 +1551,8 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid // proxy and that it is on thread tid. assert( not target->is_proxy() ); size_t target_vp = target->get_vp(); - assert( kernel().vp_manager.is_local_vp( target_vp ) ); - assert( kernel().vp_manager.vp_to_thread( target_vp ) == tid ); + assert( kernel::manager< VPManager >().is_local_vp( target_vp ) ); + assert( kernel::manager< VPManager >().vp_to_thread( target_vp ) == tid ); // Connections to nodes with proxies (neurons or devices with // proxies) which are local to tid have always to be @@ -1577,7 +1580,7 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid // source may be a proxy on tid. if ( target->one_node_per_process() ) { - if ( kernel().node_manager.is_local_node( source ) ) + if ( kernel::manager< NodeManager >().is_local_node( source ) ) { return CONNECT_TO_DEVICE; } @@ -1603,14 +1606,14 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid if ( not source->has_proxies() ) { const size_t target_node_id = target->get_node_id(); - target_vp = kernel().vp_manager.node_id_to_vp( target_node_id ); - const bool target_vp_local = kernel().vp_manager.is_local_vp( target_vp ); - const size_t target_thread = kernel().vp_manager.vp_to_thread( target_vp ); + target_vp = kernel::manager< VPManager >().node_id_to_vp( target_node_id ); + const bool target_vp_local = kernel::manager< VPManager >().is_local_vp( target_vp ); + const size_t target_thread = kernel::manager< VPManager >().vp_to_thread( target_vp ); if ( target_vp_local and target_thread == tid ) { const size_t source_node_id = source->get_node_id(); - source = kernel().node_manager.get_node_or_proxy( source_node_id, target_thread ); + source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, target_thread ); return CONNECT_FROM_DEVICE; } } @@ -1623,7 +1626,7 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid { if ( source->has_proxies() ) { - target = kernel().node_manager.get_node_or_proxy( target->get_node_id(), tid ); + target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), tid ); return CONNECT; } @@ -1667,21 +1670,21 @@ ConnectionManager::deliver_secondary_events( const size_t tid, const bool called_from_wfr_update, std::vector< unsigned int >& recv_buffer ) { - const std::vector< ConnectorModel* >& cm = kernel().model_manager.get_connection_models( tid ); - const Time stamp = - kernel().simulation_manager.get_slice_origin() + Time::step( 1 - kernel().connection_manager.get_min_delay() ); + const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >().get_connection_models( tid ); + const Time stamp = kernel::manager< SimulationManager >().get_slice_origin() + + Time::step( 1 - kernel::manager< ConnectionManager >().get_min_delay() ); const std::vector< std::vector< size_t > >& positions_tid = secondary_recv_buffer_pos_[ tid ]; const synindex syn_id_end = positions_tid.size(); for ( synindex syn_id = 0; syn_id < syn_id_end; ++syn_id ) { - const ConnectorModel& conn_model = kernel().model_manager.get_connection_model( syn_id, tid ); + const ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); const bool supports_wfr = conn_model.has_property( ConnectionModelProperties::SUPPORTS_WFR ); if ( not called_from_wfr_update or supports_wfr ) { if ( positions_tid[ syn_id ].size() > 0 ) { - SecondaryEvent& prototype = kernel().model_manager.get_secondary_event_prototype( syn_id, tid ); + SecondaryEvent& prototype = kernel::manager< ModelManager >().get_secondary_event_prototype( syn_id, tid ); size_t lcid = 0; const size_t lcid_end = positions_tid[ syn_id ].size(); @@ -1702,10 +1705,11 @@ ConnectionManager::deliver_secondary_events( const size_t tid, // Read waveform relaxation done marker from last position in every // chunk bool done = true; - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { - done = - done and recv_buffer[ kernel().mpi_manager.get_done_marker_position_in_secondary_events_recv_buffer( rank ) ]; + done = done + and recv_buffer[ kernel::manager< MPIManager >().get_done_marker_position_in_secondary_events_recv_buffer( + rank ) ]; } return done; } @@ -1743,9 +1747,10 @@ ConnectionManager::remove_disabled_connections( const size_t tid ) void ConnectionManager::resize_connections() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - connections_.at( kernel().vp_manager.get_thread_id() ).resize( kernel().model_manager.get_num_connection_models() ); + connections_.at( kernel::manager< VPManager >().get_thread_id() ) + .resize( kernel::manager< ModelManager >().get_num_connection_models() ); source_table_.resize_sources(); target_table_devices_.resize_to_number_of_synapse_types(); @@ -1754,19 +1759,19 @@ ConnectionManager::resize_connections() void ConnectionManager::sync_has_primary_connections() { - has_primary_connections_ = kernel().mpi_manager.any_true( has_primary_connections_ ); + has_primary_connections_ = kernel::manager< MPIManager >().any_true( has_primary_connections_ ); } void ConnectionManager::check_secondary_connections_exist() { - secondary_connections_exist_ = kernel().mpi_manager.any_true( secondary_connections_exist_ ); + secondary_connections_exist_ = kernel::manager< MPIManager >().any_true( secondary_connections_exist_ ); } void ConnectionManager::set_connections_have_changed() { - assert( kernel().vp_manager.get_thread_id() == 0 ); + assert( kernel::manager< VPManager >().get_thread_id() == 0 ); if ( get_connections_has_been_called_ ) { @@ -1799,9 +1804,9 @@ ConnectionManager::collect_compressed_spike_data( const size_t tid ) } // of omp single; implicit barrier source_table_.collect_compressible_sources( tid ); - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { source_table_.fill_compressed_spike_data( compressed_spike_data_ ); @@ -1830,13 +1835,13 @@ ConnectionManager::fill_target_buffer( const size_t tid, do { - const auto& conn_model = kernel().model_manager.get_connection_model( syn_id, tid ); + const auto& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); while ( source_2_idx != csd_maps.at( syn_id ).end() ) { const auto source_gid = source_2_idx->first; - const auto source_rank = kernel().mpi_manager.get_process_id_of_node_id( source_gid ); + const auto source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source_gid ); if ( not( rank_start <= source_rank and source_rank < rank_end ) ) { // We are not responsible for this source. @@ -1861,8 +1866,8 @@ ConnectionManager::fill_target_buffer( const size_t tid, next_target_data.set_is_primary( is_primary ); next_target_data.reset_marker(); next_target_data.set_source_tid( - kernel().vp_manager.vp_to_thread( kernel().vp_manager.node_id_to_vp( source_gid ) ) ); - next_target_data.set_source_lid( kernel().vp_manager.node_id_to_lid( source_gid ) ); + kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( source_gid ) ) ); + next_target_data.set_source_lid( kernel::manager< VPManager >().node_id_to_lid( source_gid ) ); if ( is_primary ) { @@ -1879,7 +1884,7 @@ ConnectionManager::fill_target_buffer( const size_t tid, assert( target_thread == static_cast< unsigned long >( conn_info.get_tid() ) ); const size_t relative_recv_buffer_pos = get_secondary_recv_buffer_position( target_thread, syn_id, conn_info.get_lcid() ) - - kernel().mpi_manager.get_recv_displacement_secondary_events_in_int( source_rank ); + - kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); SecondaryTargetDataFields& secondary_fields = next_target_data.secondary_data; secondary_fields.set_recv_buffer_pos( relative_recv_buffer_pos ); @@ -1927,7 +1932,7 @@ ConnectionManager::fill_target_buffer( const size_t tid, void ConnectionManager::initialize_iteration_state() { - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); iteration_state_.clear(); iteration_state_.reserve( num_threads ); @@ -1944,19 +1949,22 @@ ConnectionManager::initialize_iteration_state() void ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, Event& e ) { - target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); + target_table_devices_.send_to_device( + tid, source_node_id, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); } void ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, SecondaryEvent& e ) { - target_table_devices_.send_to_device( tid, source_node_id, e, kernel().model_manager.get_connection_models( tid ) ); + target_table_devices_.send_to_device( + tid, source_node_id, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); } void ConnectionManager::send_from_device( const size_t tid, const size_t ldid, Event& e ) { - target_table_devices_.send_from_device( tid, ldid, e, kernel().model_manager.get_connection_models( tid ) ); + target_table_devices_.send_from_device( + tid, ldid, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); } } diff --git a/nestkernel/connector_base.cpp b/nestkernel/connector_base.cpp index 951e3c0334..daac0d6bc6 100644 --- a/nestkernel/connector_base.cpp +++ b/nestkernel/connector_base.cpp @@ -39,7 +39,7 @@ ConnectorBase::prepare_weight_recorder_event( WeightRecorderEvent& wr_e, wr_e.set_rport( e.get_rport() ); wr_e.set_stamp( e.get_stamp() ); // Sender is not available for SecondaryEvents, and not needed, so we do not set it to avoid undefined behavior. - wr_e.set_sender_node_id( kernel().connection_manager.get_source_node_id( tid, syn_id, lcid ) ); + wr_e.set_sender_node_id( kernel::manager< ConnectionManager >().get_source_node_id( tid, syn_id, lcid ) ); wr_e.set_weight( e.get_weight() ); wr_e.set_delay_steps( e.get_delay_steps() ); wr_e.set_receiver( *static_cast< Node* >( cp.get_weight_recorder() ) ); diff --git a/nestkernel/connector_model.cpp b/nestkernel/connector_model.cpp index a200aafd8c..52fd112c76 100644 --- a/nestkernel/connector_model.cpp +++ b/nestkernel/connector_model.cpp @@ -43,7 +43,7 @@ ConnectorModel::ConnectorModel( const ConnectorModel& cm, const std::string name size_t ConnectorModel::get_synapse_model_id( const std::string& name ) { - return kernel().model_manager.get_synapse_model_id( name ); + return kernel::manager< ModelManager >().get_synapse_model_id( name ); } } // namespace nest diff --git a/nestkernel/connector_model_impl.h b/nestkernel/connector_model_impl.h index 4527a8dfac..62b5b7a310 100644 --- a/nestkernel/connector_model_impl.h +++ b/nestkernel/connector_model_impl.h @@ -107,12 +107,12 @@ GenericConnectorModel< ConnectionT >::set_status( const DictionaryDatum& d ) // set_status calls on common properties and default connection may // modify min/max delay, we need to freeze the min/max_delay checking. - kernel().connection_manager.get_delay_checker().freeze_delay_update(); + kernel::manager< ConnectionManager >().get_delay_checker().freeze_delay_update(); cp_.set_status( d, *this ); default_connection_.set_status( d, *this ); - kernel().connection_manager.get_delay_checker().enable_delay_update(); + kernel::manager< ConnectionManager >().get_delay_checker().enable_delay_update(); // we've possibly just got a new default delay. So enforce checking next time // it is used @@ -157,7 +157,7 @@ GenericConnectorModel< ConnectionT >::used_default_delay() if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { const double d = default_connection_.get_delay(); - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( d ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( d ); } // Let connections without delay contribute to the delay extrema with // wfr_comm_interval. For those connections the min_delay is important @@ -167,8 +167,8 @@ GenericConnectorModel< ConnectionT >::used_default_delay() // without delay is created. else { - const double wfr_comm_interval = kernel().simulation_manager.get_wfr_comm_interval(); - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( wfr_comm_interval ); + const double wfr_comm_interval = kernel::manager< SimulationManager >().get_wfr_comm_interval(); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( wfr_comm_interval ); } } catch ( BadDelay& e ) @@ -177,8 +177,8 @@ GenericConnectorModel< ConnectionT >::used_default_delay() String::compose( "Default delay of '%1' must be between min_delay %2 " "and max_delay %3.", get_name(), - Time::delay_steps_to_ms( kernel().connection_manager.get_min_delay() ), - Time::delay_steps_to_ms( kernel().connection_manager.get_max_delay() ) ) ); + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_min_delay() ), + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_max_delay() ) ) ); } default_delay_needs_check_ = false; } @@ -212,7 +212,7 @@ GenericConnectorModel< ConnectionT >::add_connection( Node& src, { if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); } if ( p->known( names::delay ) ) @@ -231,7 +231,7 @@ GenericConnectorModel< ConnectionT >::add_connection( Node& src, { if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { - kernel().connection_manager.get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); } } else diff --git a/nestkernel/delay_checker.cpp b/nestkernel/delay_checker.cpp index 7148ebd110..7e103238d8 100644 --- a/nestkernel/delay_checker.cpp +++ b/nestkernel/delay_checker.cpp @@ -137,7 +137,7 @@ nest::DelayChecker::set_status( const DictionaryDatum& d ) if ( min_delay_updated and max_delay_updated ) { - if ( kernel().connection_manager.get_num_connections() > 0 ) + if ( kernel::manager< ConnectionManager >().get_num_connections() > 0 ) { throw BadProperty( "Connections already exist. Please call ResetKernel first" ); } @@ -162,10 +162,10 @@ nest::DelayChecker::assert_valid_delay_ms( double requested_new_delay ) // if already simulated, the new delay has to be checked against the // min_delay and the max_delay which have been used during simulation - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { - const bool bad_min_delay = new_delay < kernel().connection_manager.get_min_delay(); - const bool bad_max_delay = new_delay > kernel().connection_manager.get_max_delay(); + const bool bad_min_delay = new_delay < kernel::manager< ConnectionManager >().get_min_delay(); + const bool bad_max_delay = new_delay > kernel::manager< ConnectionManager >().get_max_delay(); if ( bad_min_delay or bad_max_delay ) { throw BadDelay( new_delay_ms, @@ -223,10 +223,10 @@ nest::DelayChecker::assert_two_valid_delays_steps( long new_delay1, long new_del throw BadDelay( Time::delay_steps_to_ms( ldelay ), "Delay must be greater than or equal to resolution" ); } - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { - const bool bad_min_delay = ldelay < kernel().connection_manager.get_min_delay(); - const bool bad_max_delay = hdelay > kernel().connection_manager.get_max_delay(); + const bool bad_min_delay = ldelay < kernel::manager< ConnectionManager >().get_min_delay(); + const bool bad_max_delay = hdelay > kernel::manager< ConnectionManager >().get_max_delay(); if ( bad_min_delay ) { throw BadDelay( diff --git a/nestkernel/eprop_archiving_node.h b/nestkernel/eprop_archiving_node.h index 673f14814c..392f6a893c 100644 --- a/nestkernel/eprop_archiving_node.h +++ b/nestkernel/eprop_archiving_node.h @@ -253,7 +253,7 @@ EpropArchivingNode< HistEntryT >::erase_used_eprop_history() return; } - const long update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); + const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); auto it_update_hist = update_history_.begin(); diff --git a/nestkernel/eprop_archiving_node_recurrent.h b/nestkernel/eprop_archiving_node_recurrent.h index 3442796b96..ac416ea53d 100644 --- a/nestkernel/eprop_archiving_node_recurrent.h +++ b/nestkernel/eprop_archiving_node_recurrent.h @@ -484,7 +484,7 @@ EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_his return; } - const double update_interval = kernel().simulation_manager.get_eprop_update_interval().get_steps(); + const double update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); const double dt = Time::get_resolution().get_ms(); const long shift = Time::get_resolution().get_steps(); diff --git a/nestkernel/event.cpp b/nestkernel/event.cpp index ec67e07238..7a4bc721b9 100644 --- a/nestkernel/event.cpp +++ b/nestkernel/event.cpp @@ -55,7 +55,7 @@ Event::retrieve_sender_node_id_from_source_table() const } else { - const size_t node_id = kernel().connection_manager.get_source_node_id( + const size_t node_id = kernel::manager< ConnectionManager >().get_source_node_id( sender_spike_data_.get_tid(), sender_spike_data_.get_syn_id(), sender_spike_data_.get_lcid() ); return node_id; } diff --git a/nestkernel/event_delivery_manager.cpp b/nestkernel/event_delivery_manager.cpp index fd87d011c8..b96743cbe3 100644 --- a/nestkernel/event_delivery_manager.cpp +++ b/nestkernel/event_delivery_manager.cpp @@ -88,7 +88,7 @@ EventDeliveryManager::initialize( const bool adjust_number_of_threads_or_rng_onl send_recv_buffer_resize_log_.clear(); } - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); local_spike_counter_.resize( num_threads, 0 ); reset_counters(); @@ -98,7 +98,7 @@ EventDeliveryManager::initialize( const bool adjust_number_of_threads_or_rng_onl #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); if ( not emitted_spikes_register_[ tid ] ) { @@ -197,26 +197,26 @@ void EventDeliveryManager::resize_send_recv_buffers_target_data() { // compute send receive counts and allocate memory for buffers - send_buffer_target_data_.resize( kernel().mpi_manager.get_buffer_size_target_data() ); - recv_buffer_target_data_.resize( kernel().mpi_manager.get_buffer_size_target_data() ); + send_buffer_target_data_.resize( kernel::manager< MPIManager >().get_buffer_size_target_data() ); + recv_buffer_target_data_.resize( kernel::manager< MPIManager >().get_buffer_size_target_data() ); } void EventDeliveryManager::resize_send_recv_buffers_spike_data_() { - if ( kernel().mpi_manager.get_buffer_size_spike_data() > send_buffer_spike_data_.size() ) + if ( kernel::manager< MPIManager >().get_buffer_size_spike_data() > send_buffer_spike_data_.size() ) { - send_buffer_spike_data_.resize( kernel().mpi_manager.get_buffer_size_spike_data() ); - recv_buffer_spike_data_.resize( kernel().mpi_manager.get_buffer_size_spike_data() ); - send_buffer_off_grid_spike_data_.resize( kernel().mpi_manager.get_buffer_size_spike_data() ); - recv_buffer_off_grid_spike_data_.resize( kernel().mpi_manager.get_buffer_size_spike_data() ); + send_buffer_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); + recv_buffer_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); + send_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); + recv_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); } } void EventDeliveryManager::configure_spike_data_buffers() { - assert( kernel().connection_manager.get_min_delay() != 0 ); + assert( kernel::manager< ConnectionManager >().get_min_delay() != 0 ); configure_spike_register(); @@ -231,7 +231,7 @@ EventDeliveryManager::configure_spike_register() { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); reset_spike_register_( tid ); } } @@ -240,16 +240,18 @@ void EventDeliveryManager::configure_secondary_buffers() { send_buffer_secondary_events_.clear(); - send_buffer_secondary_events_.resize( kernel().mpi_manager.get_send_buffer_size_secondary_events_in_int() ); + send_buffer_secondary_events_.resize( + kernel::manager< MPIManager >().get_send_buffer_size_secondary_events_in_int() ); recv_buffer_secondary_events_.clear(); - recv_buffer_secondary_events_.resize( kernel().mpi_manager.get_recv_buffer_size_secondary_events_in_int() ); + recv_buffer_secondary_events_.resize( + kernel::manager< MPIManager >().get_recv_buffer_size_secondary_events_in_int() ); } void EventDeliveryManager::init_moduli() { - long min_delay = kernel().connection_manager.get_min_delay(); - long max_delay = kernel().connection_manager.get_max_delay(); + long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); + long max_delay = kernel::manager< ConnectionManager >().get_max_delay(); assert( min_delay != 0 ); assert( max_delay != 0 ); @@ -261,7 +263,7 @@ EventDeliveryManager::init_moduli() for ( long d = 0; d < min_delay + max_delay; ++d ) { - moduli_[ d ] = ( kernel().simulation_manager.get_clock().get_steps() + d ) % ( min_delay + max_delay ); + moduli_[ d ] = ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) % ( min_delay + max_delay ); } // Slice-based ring-buffers have one bin per min_delay steps, @@ -271,15 +273,15 @@ EventDeliveryManager::init_moduli() slice_moduli_.resize( min_delay + max_delay ); for ( long d = 0; d < min_delay + max_delay; ++d ) { - slice_moduli_[ d ] = ( ( kernel().simulation_manager.get_clock().get_steps() + d ) / min_delay ) % nbuff; + slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) / min_delay ) % nbuff; } } void EventDeliveryManager::update_moduli() { - long min_delay = kernel().connection_manager.get_min_delay(); - long max_delay = kernel().connection_manager.get_max_delay(); + long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); + long max_delay = kernel::manager< ConnectionManager >().get_max_delay(); assert( min_delay != 0 ); assert( max_delay != 0 ); @@ -296,7 +298,7 @@ EventDeliveryManager::update_moduli() const size_t nbuff = static_cast< size_t >( std::ceil( static_cast< double >( min_delay + max_delay ) / min_delay ) ); for ( long d = 0; d < min_delay + max_delay; ++d ) { - slice_moduli_[ d ] = ( ( kernel().simulation_manager.get_clock().get_steps() + d ) / min_delay ) % nbuff; + slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) / min_delay ) % nbuff; } } @@ -326,10 +328,10 @@ void EventDeliveryManager::write_done_marker_secondary_events_( const bool done ) { // write done marker at last position in every chunk - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { - send_buffer_secondary_events_[ kernel().mpi_manager.get_done_marker_position_in_secondary_events_send_buffer( - rank ) ] = done; + send_buffer_secondary_events_[ kernel::manager< MPIManager >() + .get_done_marker_position_in_secondary_events_send_buffer( rank ) ] = done; } } @@ -337,14 +339,14 @@ void EventDeliveryManager::gather_secondary_events( const bool done ) { write_done_marker_secondary_events_( done ); - kernel().mpi_manager.communicate_secondary_events_Alltoallv( + kernel::manager< MPIManager >().communicate_secondary_events_Alltoallv( send_buffer_secondary_events_, recv_buffer_secondary_events_ ); } bool EventDeliveryManager::deliver_secondary_events( const size_t tid, const bool called_from_wfr_update ) { - return kernel().connection_manager.deliver_secondary_events( + return kernel::manager< ConnectionManager >().deliver_secondary_events( tid, called_from_wfr_update, recv_buffer_secondary_events_ ); } @@ -369,14 +371,14 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // NOTE: For meaning and logic of SpikeData flags for detecting complete transmission // and information for shrink/grow, see comment in spike_data.h. - const size_t old_buff_size_per_rank = kernel().mpi_manager.get_send_recv_count_spike_data_per_rank(); + const size_t old_buff_size_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); if ( global_max_spikes_per_rank_ < send_recv_buffer_shrink_limit_ * old_buff_size_per_rank ) { const size_t new_buff_size_per_rank = std::max( 2UL, static_cast< size_t >( ( 1 + send_recv_buffer_shrink_spare_ ) * global_max_spikes_per_rank_ ) ); - kernel().mpi_manager.set_buffer_size_spike_data( - kernel().mpi_manager.get_num_processes() * new_buff_size_per_rank ); + kernel::manager< MPIManager >().set_buffer_size_spike_data( + kernel::manager< MPIManager >().get_num_processes() * new_buff_size_per_rank ); resize_send_recv_buffers_spike_data_(); send_recv_buffer_resize_log_.add_entry( global_max_spikes_per_rank_, new_buff_size_per_rank ); } @@ -395,7 +397,7 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // Set marker at end of each chunk to DEFAULT reset_complete_marker_spike_data_( send_buffer_position, send_buffer ); - std::vector< size_t > num_spikes_per_rank( kernel().mpi_manager.get_num_processes(), 0 ); + std::vector< size_t > num_spikes_per_rank( kernel::manager< MPIManager >().get_num_processes(), 0 ); // Collocate spikes to send buffer collocate_spike_data_buffers_( send_buffer_position, emitted_spikes_register_, send_buffer, num_spikes_per_rank ); @@ -419,19 +421,19 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // We introduce an explicit barrier at this point to measure how long each process idles until all other processes // reached this point as well. This barrier is directly followed by another implicit barrier due to global // communication. - kernel().simulation_manager.get_mpi_synchronization_stopwatch().start(); - kernel().mpi_manager.synchronize(); - kernel().simulation_manager.get_mpi_synchronization_stopwatch().stop(); + kernel::manager< SimulationManager >().get_mpi_synchronization_stopwatch().start(); + kernel::manager< MPIManager >().synchronize(); + kernel::manager< SimulationManager >().get_mpi_synchronization_stopwatch().stop(); #endif // Given that we templatize by plain vs offgrid, this if should not be necessary, but ... if ( off_grid_spiking_ ) { - kernel().mpi_manager.communicate_off_grid_spike_data_Alltoall( send_buffer, recv_buffer ); + kernel::manager< MPIManager >().communicate_off_grid_spike_data_Alltoall( send_buffer, recv_buffer ); } else { - kernel().mpi_manager.communicate_spike_data_Alltoall( send_buffer, recv_buffer ); + kernel::manager< MPIManager >().communicate_spike_data_Alltoall( send_buffer, recv_buffer ); } sw_communicate_spike_data_.stop(); @@ -439,15 +441,15 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer global_max_spikes_per_rank_ = get_global_max_spikes_per_rank_( send_buffer_position, recv_buffer ); all_spikes_transmitted = - global_max_spikes_per_rank_ <= kernel().mpi_manager.get_send_recv_count_spike_data_per_rank(); + global_max_spikes_per_rank_ <= kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); if ( not all_spikes_transmitted ) { const size_t new_buff_size_per_rank = static_cast< size_t >( ( 1 + send_recv_buffer_grow_extra_ ) * global_max_spikes_per_rank_ ); - kernel().mpi_manager.set_buffer_size_spike_data( - kernel().mpi_manager.get_num_processes() * new_buff_size_per_rank ); + kernel::manager< MPIManager >().set_buffer_size_spike_data( + kernel::manager< MPIManager >().get_num_processes() * new_buff_size_per_rank ); resize_send_recv_buffers_spike_data_(); send_recv_buffer_resize_log_.add_entry( global_max_spikes_per_rank_, new_buff_size_per_rank ); } @@ -502,9 +504,9 @@ EventDeliveryManager::set_end_marker_( const SendBufferPosition& send_buffer_pos { // See comment in spike_data.h for logic. const bool collocate_complete = local_max_spikes_per_rank - <= static_cast< size_t >( kernel().mpi_manager.get_send_recv_count_spike_data_per_rank() ); + <= static_cast< size_t >( kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank() ); - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { const size_t end_idx = send_buffer_position.end( rank ) - 1; if ( not collocate_complete ) @@ -545,7 +547,7 @@ void EventDeliveryManager::reset_complete_marker_spike_data_( const SendBufferPosition& send_buffer_position, std::vector< SpikeDataT >& send_buffer ) const { - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { const size_t idx = send_buffer_position.end( rank ) - 1; send_buffer[ idx ].reset_marker(); @@ -560,7 +562,7 @@ EventDeliveryManager::get_global_max_spikes_per_rank_( const SendBufferPosition& // TODO: send_buffer_position not needed here, only used to get endpoint of each per-rank section of buffer size_t maximum = 0; - for ( size_t target_rank = 0; target_rank < kernel().mpi_manager.get_num_processes(); ++target_rank ) + for ( size_t target_rank = 0; target_rank < kernel::manager< MPIManager >().get_num_processes(); ++target_rank ) { const auto& end_entry = recv_buffer[ send_buffer_position.end( target_rank ) - 1 ]; size_t max_per_thread_max_spikes_per_rank = 0; @@ -571,7 +573,7 @@ EventDeliveryManager::get_global_max_spikes_per_rank_( const SendBufferPosition& else { assert( end_entry.is_end_marker() ); - max_per_thread_max_spikes_per_rank = kernel().mpi_manager.get_send_recv_count_spike_data_per_rank(); + max_per_thread_max_spikes_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); } maximum = std::max( max_per_thread_max_spikes_per_rank, maximum ); } @@ -597,25 +599,25 @@ void EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< SpikeDataT >& recv_buffer ) { // deliver only at beginning of time slice - if ( kernel().simulation_manager.get_from_step() > 0 ) + if ( kernel::manager< SimulationManager >().get_from_step() > 0 ) { return; } - const size_t spike_buffer_size_per_rank = kernel().mpi_manager.get_send_recv_count_spike_data_per_rank(); - const std::vector< ConnectorModel* >& cm = kernel().model_manager.get_connection_models( tid ); + const size_t spike_buffer_size_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); + const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >().get_connection_models( tid ); // prepare Time objects for every possible time stamp within min_delay_ - std::vector< Time > prepared_timestamps( kernel().connection_manager.get_min_delay() ); - for ( size_t lag = 0; lag < static_cast< size_t >( kernel().connection_manager.get_min_delay() ); ++lag ) + std::vector< Time > prepared_timestamps( kernel::manager< ConnectionManager >().get_min_delay() ); + for ( size_t lag = 0; lag < static_cast< size_t >( kernel::manager< ConnectionManager >().get_min_delay() ); ++lag ) { // Subtract min_delay because spikes were emitted in previous time slice and we use current clock. - prepared_timestamps[ lag ] = - kernel().simulation_manager.get_clock() + Time::step( lag + 1 - kernel().connection_manager.get_min_delay() ); + prepared_timestamps[ lag ] = kernel::manager< SimulationManager >().get_clock() + + Time::step( lag + 1 - kernel::manager< ConnectionManager >().get_min_delay() ); } // Deliver spikes sent by each rank in order - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { // Continue with next rank if no spikes were sent by current rank if ( recv_buffer[ rank * spike_buffer_size_per_rank ].is_invalid_marker() ) @@ -647,7 +649,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik size_t syn_id_batch[ SPIKES_PER_BATCH ]; size_t lcid_batch[ SPIKES_PER_BATCH ]; - if ( not kernel().connection_manager.use_compressed_spikes() ) + if ( not kernel::manager< ConnectionManager >().use_compressed_spikes() ) { for ( size_t i = 0; i < num_batches; ++i ) { @@ -665,7 +667,8 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( tid_batch[ j ] == tid ) { - kernel().connection_manager.send( tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >().send( + tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } @@ -686,7 +689,8 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( tid_batch[ j ] == tid ) { - kernel().connection_manager.send( tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >().send( + tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } @@ -710,7 +714,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { // find the spike-data entry for this thread const std::vector< SpikeData >& compressed_spike_data = - kernel().connection_manager.get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); + kernel::manager< ConnectionManager >().get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); lcid_batch[ j ] = compressed_spike_data[ tid ].get_lcid(); } for ( size_t j = 0; j < SPIKES_PER_BATCH; ++j ) @@ -726,7 +730,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( lcid_batch[ j ] != invalid_lcid ) { - kernel().connection_manager.send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >().send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } @@ -747,7 +751,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { // find the spike-data entry for this thread const std::vector< SpikeData >& compressed_spike_data = - kernel().connection_manager.get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); + kernel::manager< ConnectionManager >().get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); lcid_batch[ j ] = compressed_spike_data[ tid ].get_lcid(); } for ( size_t j = 0; j < num_remaining_entries; ++j ) @@ -763,7 +767,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( lcid_batch[ j ] != invalid_lcid ) { - kernel().connection_manager.send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >().send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } // if-else not compressed @@ -774,16 +778,16 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik void EventDeliveryManager::gather_target_data( const size_t tid ) { - assert( not kernel().connection_manager.is_source_table_cleared() ); + assert( not kernel::manager< ConnectionManager >().is_source_table_cleared() ); // assume all threads have some work to do gather_completed_checker_.set_false( tid ); assert( gather_completed_checker_.all_false() ); - const AssignedRanks assigned_ranks = kernel().vp_manager.get_assigned_ranks( tid ); + const AssignedRanks assigned_ranks = kernel::manager< VPManager >().get_assigned_ranks( tid ); - kernel().connection_manager.prepare_target_table( tid ); - kernel().connection_manager.reset_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >().prepare_target_table( tid ); + kernel::manager< ConnectionManager >().reset_source_table_entry_point( tid ); while ( gather_completed_checker_.any_false() ) { @@ -793,19 +797,19 @@ EventDeliveryManager::gather_target_data( const size_t tid ) #pragma omp master { - if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) + if ( kernel::manager< MPIManager >().adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) { resize_send_recv_buffers_target_data(); } } // of omp master; (no barrier) - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); - kernel().connection_manager.restore_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >().restore_source_table_entry_point( tid ); TargetSendBufferPosition send_buffer_position( - assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() ); + assigned_ranks, kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank() ); const bool gather_completed = collocate_target_data_buffers_( tid, assigned_ranks, send_buffer_position ); gather_completed_checker_.logical_and( tid, gather_completed ); @@ -814,16 +818,17 @@ EventDeliveryManager::gather_target_data( const size_t tid ) { set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } - kernel().connection_manager.save_source_table_entry_point( tid ); - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< ConnectionManager >().save_source_table_entry_point( tid ); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); - kernel().connection_manager.clean_source_table( tid ); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< ConnectionManager >().clean_source_table( tid ); #pragma omp master { sw_communicate_target_data_.start(); - kernel().mpi_manager.communicate_target_data_Alltoall( send_buffer_target_data_, recv_buffer_target_data_ ); + kernel::manager< MPIManager >().communicate_target_data_Alltoall( + send_buffer_target_data_, recv_buffer_target_data_ ); sw_communicate_target_data_.stop(); } // of omp master (no barriers!) #pragma omp barrier @@ -832,31 +837,31 @@ EventDeliveryManager::gather_target_data( const size_t tid ) gather_completed_checker_.logical_and( tid, distribute_completed ); // resize mpi buffers, if necessary and allowed - if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() ) + if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >().adaptive_target_buffers() ) { #pragma omp master { - buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data(); + buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >().increase_buffer_size_target_data(); } #pragma omp barrier } } // of while - kernel().connection_manager.clear_source_table( tid ); + kernel::manager< ConnectionManager >().clear_source_table( tid ); } void EventDeliveryManager::gather_target_data_compressed( const size_t tid ) { - assert( not kernel().connection_manager.is_source_table_cleared() ); + assert( not kernel::manager< ConnectionManager >().is_source_table_cleared() ); // assume all threads have some work to do gather_completed_checker_.set_false( tid ); assert( gather_completed_checker_.all_false() ); - const AssignedRanks assigned_ranks = kernel().vp_manager.get_assigned_ranks( tid ); + const AssignedRanks assigned_ranks = kernel::manager< VPManager >().get_assigned_ranks( tid ); - kernel().connection_manager.prepare_target_table( tid ); + kernel::manager< ConnectionManager >().prepare_target_table( tid ); while ( gather_completed_checker_.any_false() ) { @@ -865,17 +870,17 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) #pragma omp master { - if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) + if ( kernel::manager< MPIManager >().adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) { resize_send_recv_buffers_target_data(); } } // of omp master; no barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); TargetSendBufferPosition send_buffer_position( - assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() ); + assigned_ranks, kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank() ); const bool gather_completed = collocate_target_data_buffers_compressed_( tid, assigned_ranks, send_buffer_position ); @@ -887,14 +892,15 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); #pragma omp master { sw_communicate_target_data_.start(); - kernel().mpi_manager.communicate_target_data_Alltoall( send_buffer_target_data_, recv_buffer_target_data_ ); + kernel::manager< MPIManager >().communicate_target_data_Alltoall( + send_buffer_target_data_, recv_buffer_target_data_ ); sw_communicate_target_data_.stop(); } // of omp master (no barrier) #pragma omp barrier @@ -906,20 +912,20 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) gather_completed_checker_.logical_and( tid, distribute_completed ); // resize mpi buffers, if necessary and allowed - if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() ) + if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >().adaptive_target_buffers() ) { #pragma omp master { - buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data(); + buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >().increase_buffer_size_target_data(); } // of omp master (no barrier) - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); } } // of while - kernel().connection_manager.clear_source_table( tid ); + kernel::manager< ConnectionManager >().clear_source_table( tid ); } bool @@ -935,7 +941,7 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, // no ranks to process for this thread if ( assigned_ranks.begin == assigned_ranks.end ) { - kernel().connection_manager.no_targets_to_process( tid ); + kernel::manager< ConnectionManager >().no_targets_to_process( tid ); return is_source_table_read; } @@ -952,7 +958,7 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, while ( true ) { - valid_next_target_data = kernel().connection_manager.get_next_target_data( + valid_next_target_data = kernel::manager< ConnectionManager >().get_next_target_data( tid, assigned_ranks.begin, assigned_ranks.end, source_rank, next_target_data ); if ( valid_next_target_data ) // add valid entry to MPI buffer { @@ -960,11 +966,11 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, { // entry does not fit in this part of the MPI buffer any more, // so we need to reject it - kernel().connection_manager.reject_last_target_data( tid ); + kernel::manager< ConnectionManager >().reject_last_target_data( tid ); // after rejecting the last target, we need to save the // position to start at this point again next communication // round - kernel().connection_manager.save_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >().save_source_table_entry_point( tid ); // we have just rejected an entry, so source table can not be // fully read is_source_table_read = false; @@ -1024,7 +1030,7 @@ EventDeliveryManager::collocate_target_data_buffers_compressed_( const size_t ti send_buffer_target_data_[ send_buffer_position.begin( rank ) ].set_invalid_marker(); } - const bool is_source_table_read = kernel().connection_manager.fill_target_buffer( + const bool is_source_table_read = kernel::manager< ConnectionManager >().fill_target_buffer( tid, assigned_ranks.begin, assigned_ranks.end, send_buffer_target_data_, send_buffer_position ); return is_source_table_read; @@ -1047,9 +1053,9 @@ nest::EventDeliveryManager::distribute_target_data_buffers_( const size_t tid ) { bool are_others_completed = true; const unsigned int send_recv_count_target_data_per_rank = - kernel().mpi_manager.get_send_recv_count_target_data_per_rank(); + kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank(); - for ( size_t rank = 0; rank < kernel().mpi_manager.get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) { // Check last entry for completed marker if ( not recv_buffer_target_data_[ ( rank + 1 ) * send_recv_count_target_data_per_rank - 1 ].is_complete_marker() ) @@ -1068,7 +1074,7 @@ nest::EventDeliveryManager::distribute_target_data_buffers_( const size_t tid ) const TargetData& target_data = recv_buffer_target_data_[ rank * send_recv_count_target_data_per_rank + i ]; if ( target_data.get_source_tid() == tid ) { - kernel().connection_manager.add_target( tid, rank, target_data ); + kernel::manager< ConnectionManager >().add_target( tid, rank, target_data ); } // Is this the last target from this rank? diff --git a/nestkernel/event_delivery_manager.h b/nestkernel/event_delivery_manager.h index c50326bf38..dd221bb332 100644 --- a/nestkernel/event_delivery_manager.h +++ b/nestkernel/event_delivery_manager.h @@ -538,22 +538,22 @@ inline void EventDeliveryManager::send_local_( Node& source, EventT& e, const long lag ) { assert( not source.has_proxies() ); - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( lag + 1 ) ); e.set_sender( source ); const size_t t = source.get_thread(); const size_t ldid = source.get_local_device_id(); - kernel().connection_manager.send_from_device( t, ldid, e ); + kernel::manager< ConnectionManager >().send_from_device( t, ldid, e ); } inline void EventDeliveryManager::send_local_( Node& source, SecondaryEvent& e, const long ) { assert( not source.has_proxies() ); - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( 1 ) ); e.set_sender( source ); const size_t t = source.get_thread(); const size_t ldid = source.get_local_device_id(); - kernel().connection_manager.send_from_device( t, ldid, e ); + kernel::manager< ConnectionManager >().send_from_device( t, ldid, e ); } template < class EventT > @@ -574,7 +574,7 @@ EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const lon { local_spike_counter_[ tid ] += e.get_multiplicity(); - e.set_stamp( kernel().simulation_manager.get_slice_origin() + Time::step( lag + 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( lag + 1 ) ); e.set_sender( source ); if ( source.is_off_grid() ) @@ -585,7 +585,7 @@ EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const lon { send_remote( tid, e, lag ); } - kernel().connection_manager.send_to_devices( tid, source_node_id, e ); + kernel::manager< ConnectionManager >().send_to_devices( tid, source_node_id, e ); } else { @@ -605,8 +605,8 @@ inline void EventDeliveryManager::send_remote( size_t tid, SpikeEvent& e, const long lag ) { // Put the spike in a buffer for the remote machines - const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel::manager< ConnectionManager >().get_remote_targets_of_local_node( tid, lid ); for ( const auto& target : targets ) { @@ -622,8 +622,8 @@ inline void EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const long lag ) { // Put the spike in a buffer for the remote machines - const size_t lid = kernel().vp_manager.node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel().connection_manager.get_remote_targets_of_local_node( tid, lid ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel::manager< ConnectionManager >().get_remote_targets_of_local_node( tid, lid ); for ( const auto& target : targets ) { @@ -638,9 +638,9 @@ EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const lon inline void EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); const size_t source_node_id = source.get_node_id(); - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); if ( source.has_proxies() ) { @@ -652,7 +652,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) for ( const auto& syn_id : supported_syn_ids ) { const std::vector< size_t >& positions = - kernel().connection_manager.get_secondary_send_buffer_positions( tid, lid, syn_id ); + kernel::manager< ConnectionManager >().get_secondary_send_buffer_positions( tid, lid, syn_id ); for ( size_t i = 0; i < positions.size(); ++i ) { @@ -660,7 +660,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) e >> it; } } - kernel().connection_manager.send_to_devices( tid, source_node_id, e ); + kernel::manager< ConnectionManager >().send_to_devices( tid, source_node_id, e ); } else { @@ -673,7 +673,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) inline size_t EventDeliveryManager::write_toggle() const { - return kernel().simulation_manager.get_slice() % 2; + return kernel::manager< SimulationManager >().get_slice() % 2; } } // namespace nest diff --git a/nestkernel/free_layer.h b/nestkernel/free_layer.h index 786c2ba075..e3f44f607d 100644 --- a/nestkernel/free_layer.h +++ b/nestkernel/free_layer.h @@ -130,7 +130,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) 0, []( size_t a, NodeIDTriple b ) { - const auto node = kernel().node_manager.get_mpi_local_node_or_device_head( b.node_id ); + const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( b.node_id ); return node->is_proxy() ? a : a + 1; } ); @@ -150,7 +150,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) { assert( nc_it != this->node_collection_->end() ); Position< D > point = getValue< std::vector< double > >( *it ); - const auto node = kernel().node_manager.get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); + const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); assert( node ); if ( not node->is_proxy() ) { @@ -188,7 +188,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) // max_point on all processes. Position< D > point = pos->get_values( rng ); - const auto node = kernel().node_manager.get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); + const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); assert( node ); if ( not node->is_proxy() ) { @@ -284,7 +284,7 @@ FreeLayer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const { // Node index in node collection is global to NEST, so we need to scale down // to get right indices into positions_, which has only rank-local data. - const size_t n_procs = kernel().mpi_manager.get_num_processes(); + const size_t n_procs = kernel::manager< MPIManager >().get_num_processes(); size_t pos_idx = ( *nc_it ).nc_index / n_procs; size_t step = nc_it.get_step_size() / n_procs; @@ -340,7 +340,7 @@ FreeLayer< D >::communicate_positions_( Ins iter, NodeCollectionPTR node_collect // This array will be filled with node ID,pos_x,pos_y[,pos_z] for global nodes: std::vector< double > global_node_id_pos; std::vector< int > displacements; - kernel().mpi_manager.communicate( local_node_id_pos, global_node_id_pos, displacements ); + kernel::manager< MPIManager >().communicate( local_node_id_pos, global_node_id_pos, displacements ); // To avoid copying the vector one extra time in order to sort, we // sneakishly use reinterpret_cast @@ -403,7 +403,7 @@ FreeLayer< D >::lid_to_position_id_( size_t lid ) const } else { - const auto num_procs = kernel().mpi_manager.get_num_processes(); + const auto num_procs = kernel::manager< MPIManager >().get_num_processes(); return lid / num_procs; } } diff --git a/nestkernel/kernel_manager.cpp b/nestkernel/kernel_manager.cpp index b4cb7329ba..eafae80e29 100644 --- a/nestkernel/kernel_manager.cpp +++ b/nestkernel/kernel_manager.cpp @@ -40,59 +40,23 @@ namespace nest { -KernelManager* KernelManager::kernel_manager_instance_ = nullptr; - -void -KernelManager::create_kernel_manager() -{ -#pragma omp master - { - if ( not kernel_manager_instance_ ) - { - kernel_manager_instance_ = new KernelManager(); - assert( kernel_manager_instance_ ); - } - } -#pragma omp barrier -} - -void -KernelManager::destroy_kernel_manager() -{ - kernel_manager_instance_->logging_manager.set_logging_level( M_QUIET ); - delete kernel_manager_instance_; -} KernelManager::KernelManager() : fingerprint_( 0 ) - , logging_manager( *new LoggingManager() ) - , mpi_manager( *new MPIManager() ) - , vp_manager( *new VPManager() ) - , module_manager( *new ModuleManager() ) - , random_manager( *new RandomManager() ) - , simulation_manager( *new SimulationManager() ) - , modelrange_manager( *new ModelRangeManager() ) - , connection_manager( *new ConnectionManager() ) - , sp_manager( *new SPManager() ) - , event_delivery_manager( *new EventDeliveryManager() ) - , io_manager( *new IOManager() ) - , model_manager( *new ModelManager() ) - , music_manager( *new MUSICManager() ) - , node_manager( *new NodeManager() ) - , managers( { &logging_manager, - &mpi_manager, - &vp_manager, - &module_manager, - &random_manager, - &simulation_manager, - &modelrange_manager, - &connection_manager, - &sp_manager, - &event_delivery_manager, - &io_manager, - &model_manager, - &music_manager, - &node_manager } ) + , managers( { &kernel::manager< LoggingManager >(), + &kernel::manager< MPIManager >(), + &kernel::manager< VPManager >(), + &kernel::manager< ModuleManager >(), + &kernel::manager< RandomManager >(), + &kernel::manager< SimulationManager >(), + &kernel::manager< ModelRangeManager >(), + &kernel::manager< ConnectionManager >(), + &kernel::manager< SPManager >(), + &kernel::manager< EventDeliveryManager >(), + &kernel::manager< IOManager >(), + &kernel::manager< ModelManager >(), + &kernel::manager< MUSICManager >(), + &kernel::manager< NodeManager >() } ) , initialized_( false ) { } @@ -101,17 +65,12 @@ KernelManager::~KernelManager() { if ( initialized_ ) { - finalize(); - } - - for ( auto manager : managers ) - { - delete manager; + KernelManager::finalize(); } } void -KernelManager::initialize() +KernelManager::initialize( const bool ) { for ( auto& manager : managers ) { @@ -143,7 +102,7 @@ KernelManager::cleanup() } void -KernelManager::finalize() +KernelManager::finalize( const bool ) { FULL_LOGGING_ONLY( dump_.close(); ) @@ -176,7 +135,7 @@ KernelManager::change_number_of_threads( size_t new_num_threads ) ( *it )->finalize( /* adjust_number_of_threads_or_rng_only */ true ); } - vp_manager.set_num_threads( new_num_threads ); + kernel::manager< VPManager >().set_num_threads( new_num_threads ); // Initialize in original order with new number of threads set for ( auto& manager : managers ) @@ -187,13 +146,13 @@ KernelManager::change_number_of_threads( size_t new_num_threads ) // Finalizing deleted all register components. Now that all infrastructure // is in place again, we can tell modules to re-register the components // they provide. - module_manager.reinitialize_dynamic_modules(); + kernel::manager< ModuleManager >().reinitialize_dynamic_modules(); // Prepare timers and set the number of threads for multi-threaded timers - kernel().simulation_manager.reset_timers_for_preparation(); - kernel().simulation_manager.reset_timers_for_dynamics(); - kernel().event_delivery_manager.reset_timers_for_preparation(); - kernel().event_delivery_manager.reset_timers_for_dynamics(); + kernel::manager< SimulationManager >().reset_timers_for_preparation(); + kernel::manager< SimulationManager >().reset_timers_for_dynamics(); + kernel::manager< EventDeliveryManager >().reset_timers_for_preparation(); + kernel::manager< EventDeliveryManager >().reset_timers_for_dynamics(); } void diff --git a/nestkernel/kernel_manager.h b/nestkernel/kernel_manager.h index 05411cbb65..0f579aa81c 100644 --- a/nestkernel/kernel_manager.h +++ b/nestkernel/kernel_manager.h @@ -28,6 +28,8 @@ #include +#include "manager_interface.h" + /** @BeginDocumentation Name: kernel - Global properties of the simulation kernel. @@ -184,27 +186,16 @@ class ModelManager; class MUSICManager; class NodeManager; -class ManagerInterface; - -class KernelManager +class KernelManager : public ManagerInterface { - KernelManager(); - ~KernelManager(); - unsigned long fingerprint_; - static KernelManager* kernel_manager_instance_; - KernelManager( KernelManager const& ); // do not implement void operator=( KernelManager const& ); // do not implement public: - /** - * Create/destroy and access the KernelManager singleton. - */ - static void create_kernel_manager(); - static void destroy_kernel_manager(); - static KernelManager& get_kernel_manager(); + KernelManager(); + ~KernelManager() override; /** * Prepare kernel for operation. @@ -214,7 +205,7 @@ class KernelManager * * @see finalize(), reset() */ - void initialize(); + void initialize( const bool adjust_number_of_threads_or_rng_only = false ) override; /** * Take down kernel after operation. @@ -224,7 +215,7 @@ class KernelManager * * @see initialize(), reset() */ - void finalize(); + void finalize( const bool adjust_number_of_threads_or_rng_only = false ) override; /** * Reset kernel. @@ -238,16 +229,15 @@ class KernelManager /** * Change number of threads. * - * Set the new number of threads on all managers by calling - * change_number_of_threads() on each of them. + * Set the new number of threads on all managers by calling change_number_of_threads() on each of them. */ void change_number_of_threads( size_t new_num_threads ); - void set_status( const DictionaryDatum& ); - void get_status( DictionaryDatum& ); + void set_status( const DictionaryDatum& ) override; + void get_status( DictionaryDatum& ) override; - void prepare(); - void cleanup(); + void prepare() override; + void cleanup() override; //! Returns true if kernel is initialized bool is_initialized() const; @@ -261,30 +251,6 @@ class KernelManager */ void write_to_dump( const std::string& msg ); - /** - * \defgroup Manager components in NEST kernel - * - * The managers are defined below in the order in which they need to be initialized. - * - * NodeManager is last to ensure all model structures are in place before it is initialized. - * @{ - */ - // Property-like access to managers (public references). - LoggingManager& logging_manager; - MPIManager& mpi_manager; - VPManager& vp_manager; - ModuleManager& module_manager; - RandomManager& random_manager; - SimulationManager& simulation_manager; - ModelRangeManager& modelrange_manager; - ConnectionManager& connection_manager; - SPManager& sp_manager; - EventDeliveryManager& event_delivery_manager; - IOManager& io_manager; - ModelManager& model_manager; - MUSICManager& music_manager; - NodeManager& node_manager; - private: //! All managers, order determines initialization and finalization order (latter backwards) std::vector< ManagerInterface* > managers; @@ -293,21 +259,6 @@ class KernelManager std::ofstream dump_; //!< for FULL_LOGGING output }; -KernelManager& kernel(); - -inline KernelManager& -KernelManager::get_kernel_manager() -{ - assert( kernel_manager_instance_ ); - return *kernel_manager_instance_; -} - -inline KernelManager& -kernel() -{ - return KernelManager::get_kernel_manager(); -} - inline bool KernelManager::is_initialized() const { @@ -320,6 +271,21 @@ KernelManager::get_fingerprint() const return fingerprint_; } +namespace kernel +{ + +template < class T > +inline T g_manager_instance; // one per type across all TUs + +template < class T > +T& +manager() noexcept +{ + return g_manager_instance< T >; +} + +} + } // namespace nest #endif /* KERNEL_MANAGER_H */ diff --git a/nestkernel/layer.cpp b/nestkernel/layer.cpp index ccd766c5f4..aa6b031cb0 100644 --- a/nestkernel/layer.cpp +++ b/nestkernel/layer.cpp @@ -56,7 +56,7 @@ AbstractLayer::create_layer( const DictionaryDatum& layer_dict ) AbstractLayer* layer_local = nullptr; auto element_name = getValue< std::string >( layer_dict, names::elements ); - auto element_id = kernel().model_manager.get_node_model_id( element_name ); + auto element_id = kernel::manager< ModelManager >().get_node_model_id( element_name ); if ( layer_dict->known( names::positions ) ) { @@ -144,7 +144,7 @@ AbstractLayer::create_layer( const DictionaryDatum& layer_dict ) NodeCollectionMetadataPTR layer_meta( new LayerMetadata( layer_safe ) ); // We have at least one element, create a NodeCollection for it - NodeCollectionPTR node_collection = kernel().node_manager.add_node( element_id, length ); + NodeCollectionPTR node_collection = kernel::manager< NodeManager >().add_node( element_id, length ); node_collection->set_metadata( layer_meta ); diff --git a/nestkernel/layer_impl.h b/nestkernel/layer_impl.h index 7f96d0e44b..3cbd823bd5 100644 --- a/nestkernel/layer_impl.h +++ b/nestkernel/layer_impl.h @@ -471,7 +471,7 @@ Layer< D >::dump_connections( std::ostream& out, def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); def( conn_filter, names::synapse_model, syn_model ); - ArrayDatum connectome = kernel().connection_manager.get_connections( conn_filter ); + ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); // Get positions of remote nodes std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); @@ -496,7 +496,7 @@ Layer< D >::dump_connections( std::ostream& out, previous_source_node_id = source_node_id; } - DictionaryDatum result_dict = kernel().connection_manager.get_synapse_status( source_node_id, + DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), diff --git a/nestkernel/model.cpp b/nestkernel/model.cpp index 9779816cc5..c46b470a64 100644 --- a/nestkernel/model.cpp +++ b/nestkernel/model.cpp @@ -49,7 +49,7 @@ Model::Model( const std::string& name ) void Model::set_threads() { - set_threads_( kernel().vp_manager.get_num_threads() ); + set_threads_( kernel::manager< VPManager >().get_num_threads() ); } void @@ -130,7 +130,7 @@ Model::get_status() } ( *d )[ names::instantiations ] = Token( tmp ); - ( *d )[ names::type_id ] = LiteralDatum( kernel().model_manager.get_node_model( type_id_ )->get_name() ); + ( *d )[ names::type_id ] = LiteralDatum( kernel::manager< ModelManager >().get_node_model( type_id_ )->get_name() ); for ( size_t t = 0; t < tmp.size(); ++t ) { diff --git a/nestkernel/model_manager.cpp b/nestkernel/model_manager.cpp index fc79cc56e4..7ac034b8ff 100644 --- a/nestkernel/model_manager.cpp +++ b/nestkernel/model_manager.cpp @@ -69,7 +69,7 @@ ModelManager::initialize( const bool ) proxynode_model_->set_threads(); } - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); // Make space for one vector of connection models per thread connection_models_.resize( num_threads ); @@ -101,7 +101,7 @@ ModelManager::get_num_connection_models() const return 0; } - return connection_models_.at( kernel().vp_manager.get_thread_id() ).size(); + return connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).size(); } void @@ -174,7 +174,7 @@ ModelManager::register_node_model_( Model* model ) #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); + const size_t t = kernel::manager< VPManager >().get_thread_id(); proxy_nodes_[ t ].push_back( create_proxynode_( t, id ) ); } @@ -198,7 +198,7 @@ ModelManager::copy_node_model_( const size_t old_id, Name new_name, DictionaryDa #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); + const size_t t = kernel::manager< VPManager >().get_thread_id(); proxy_nodes_[ t ].push_back( create_proxynode_( t, new_id ) ); } } @@ -206,9 +206,9 @@ ModelManager::copy_node_model_( const size_t old_id, Name new_name, DictionaryDa void ModelManager::copy_connection_model_( const size_t old_id, Name new_name, DictionaryDatum params ) { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); - const size_t new_id = connection_models_.at( kernel().vp_manager.get_thread_id() ).size(); + const size_t new_id = connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).size(); if ( new_id == invalid_synindex ) { @@ -222,11 +222,11 @@ ModelManager::copy_connection_model_( const size_t old_id, Name new_name, Dictio #pragma omp parallel { - const size_t thread_id = kernel().vp_manager.get_thread_id(); + const size_t thread_id = kernel::manager< VPManager >().get_thread_id(); connection_models_.at( thread_id ) .push_back( get_connection_model( old_id, thread_id ).clone( new_name.toString(), new_id ) ); - kernel().connection_manager.resize_connections(); + kernel::manager< ConnectionManager >().resize_connections(); } set_synapse_defaults_( new_id, params ); // handles parallelism internally @@ -274,15 +274,16 @@ void ModelManager::set_synapse_defaults_( size_t model_id, const DictionaryDatum& params ) { params->clear_access_flags(); - assert_valid_syn_id( model_id, kernel().vp_manager.get_thread_id() ); + assert_valid_syn_id( model_id, kernel::manager< VPManager >().get_thread_id() ); - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( + kernel::manager< VPManager >().get_num_threads() ); // We have to run this in parallel to set the status on nodes that exist on each // thread, such as volume_transmitter. #pragma omp parallel { - size_t tid = kernel().vp_manager.get_thread_id(); + size_t tid = kernel::manager< VPManager >().get_thread_id(); try { @@ -296,7 +297,7 @@ ModelManager::set_synapse_defaults_( size_t model_id, const DictionaryDatum& par } } - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -339,17 +340,17 @@ ModelManager::get_synapse_model_id( std::string model_name ) DictionaryDatum ModelManager::get_connector_defaults( synindex syn_id ) const { - assert_valid_syn_id( syn_id, kernel().vp_manager.get_thread_id() ); + assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); DictionaryDatum dict( new Dictionary() ); - for ( size_t t = 0; t < static_cast< size_t >( kernel().vp_manager.get_num_threads() ); ++t ) + for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >().get_num_threads() ); ++t ) { // each call adds to num_connections connection_models_[ t ][ syn_id ]->get_status( dict ); } - ( *dict )[ names::num_connections ] = kernel().connection_manager.get_num_connections( syn_id ); + ( *dict )[ names::num_connections ] = kernel::manager< ConnectionManager >().get_num_connections( syn_id ); ( *dict )[ names::element_type ] = "synapse"; return dict; @@ -409,7 +410,7 @@ ModelManager::calibrate( const TimeConverter& tc ) { model->calibrate_time( tc ); } - for ( size_t t = 0; t < static_cast< size_t >( kernel().vp_manager.get_num_threads() ); ++t ) + for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >().get_num_threads() ); ++t ) { for ( auto&& connection_model : connection_models_[ t ] ) { @@ -425,8 +426,8 @@ ModelManager::calibrate( const TimeConverter& tc ) bool ModelManager::compare_model_by_id_( const int a, const int b ) { - return kernel().model_manager.get_node_model( a )->get_name() - < kernel().model_manager.get_node_model( b )->get_name(); + return kernel::manager< ModelManager >().get_node_model( a )->get_name() + < kernel::manager< ModelManager >().get_node_model( b )->get_name(); } void diff --git a/nestkernel/model_manager.h b/nestkernel/model_manager.h index 41b636715d..a05c3ad746 100644 --- a/nestkernel/model_manager.h +++ b/nestkernel/model_manager.h @@ -370,7 +370,7 @@ template < typename CompleteConnectionT > void ModelManager::register_specific_connection_model_( const std::string& name ) { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); if ( synapsedict_->known( name ) ) { @@ -398,18 +398,18 @@ ModelManager::register_specific_connection_model_( const std::string& name ) { conn_model->get_secondary_event()->add_syn_id( new_syn_id ); } - connection_models_.at( kernel().vp_manager.get_thread_id() ).push_back( conn_model ); - kernel().connection_manager.resize_connections(); + connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).push_back( conn_model ); + kernel::manager< ConnectionManager >().resize_connections(); } // end of parallel section } inline Node* ModelManager::get_proxy_node( size_t tid, size_t node_id ) { - const int model_id = kernel().modelrange_manager.get_model_id( node_id ); + const int model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); Node* proxy = proxy_nodes_[ tid ].at( model_id ); proxy->set_node_id_( node_id ); - proxy->set_vp( kernel().vp_manager.node_id_to_vp( node_id ) ); + proxy->set_vp( kernel::manager< VPManager >().node_id_to_vp( node_id ) ); return proxy; } diff --git a/nestkernel/modelrange_manager.cpp b/nestkernel/modelrange_manager.cpp index 29b2ac09b3..957ed0c5d9 100644 --- a/nestkernel/modelrange_manager.cpp +++ b/nestkernel/modelrange_manager.cpp @@ -113,7 +113,7 @@ ModelRangeManager::get_model_id( size_t node_id ) const nest::Model* nest::ModelRangeManager::get_model_of_node_id( size_t node_id ) { - return kernel().model_manager.get_node_model( get_model_id( node_id ) ); + return kernel::manager< ModelManager >().get_node_model( get_model_id( node_id ) ); } const modelrange& diff --git a/nestkernel/module_manager.cpp b/nestkernel/module_manager.cpp index 57c668da9b..5c2bcb44f4 100644 --- a/nestkernel/module_manager.cpp +++ b/nestkernel/module_manager.cpp @@ -116,7 +116,7 @@ ModuleManager::install( const std::string& name ) { // We cannot have connections without network elements, so we only need to check nodes. // Simulating an empty network causes no problems, so we don't have to check for that. - if ( kernel().node_manager.size() > 0 ) + if ( kernel::manager< NodeManager >().size() > 0 ) { throw KernelException( "Network elements have been created, so external modules can no longer be imported. " diff --git a/nestkernel/mpi_manager.cpp b/nestkernel/mpi_manager.cpp index 48b2aab7cd..1ea8714ff7 100644 --- a/nestkernel/mpi_manager.cpp +++ b/nestkernel/mpi_manager.cpp @@ -91,8 +91,8 @@ nest::MPIManager::init_mpi( int*, char*** ) // use 2 processes entries (need at least two // entries per process to use flag of first entry as validity and // last entry to communicate end of communication) - kernel().mpi_manager.set_buffer_size_target_data( 2 ); - kernel().mpi_manager.set_buffer_size_spike_data( 2 ); + kernel::manager< MPIManager >().set_buffer_size_target_data( 2 ); + kernel::manager< MPIManager >().set_buffer_size_spike_data( 2 ); recv_counts_secondary_events_in_int_per_rank_.resize( 1, 0 ); recv_displacements_secondary_events_in_int_per_rank_.resize( 1, 0 ); @@ -113,8 +113,9 @@ nest::MPIManager::set_communicator( MPI_Comm global_comm ) // use at least 2 * number of processes entries (need at least two // entries per process to use flag of first entry as validity and // last entry to communicate end of communication) - kernel().mpi_manager.set_buffer_size_target_data( 2 * kernel().mpi_manager.get_num_processes() ); - kernel().mpi_manager.set_buffer_size_spike_data( 2 * kernel().mpi_manager.get_num_processes() ); + kernel::manager< MPIManager >().set_buffer_size_target_data( + 2 * kernel::manager< MPIManager >().get_num_processes() ); + kernel::manager< MPIManager >().set_buffer_size_spike_data( 2 * kernel::manager< MPIManager >().get_num_processes() ); } void @@ -126,9 +127,9 @@ nest::MPIManager::init_mpi( int* argc, char** argv[] ) if ( init == 0 ) { #ifdef HAVE_MUSIC - kernel().music_manager.init_music( argc, argv ); + kernel::manager< MUSICManager >().init_music( argc, argv ); // get a communicator from MUSIC - set_communicator( static_cast< MPI_Comm >( kernel().music_manager.communicator() ) ); + set_communicator( static_cast< MPI_Comm >( kernel::manager< MUSICManager >().communicator() ) ); #else /* #ifdef HAVE_MUSIC */ int provided_thread_level; MPI_Init_thread( argc, argv, MPI_THREAD_FUNNELED, &provided_thread_level ); @@ -286,7 +287,7 @@ nest::MPIManager::mpi_finalize( int exitcode ) { if ( exitcode == 0 ) { - kernel().music_manager.music_finalize(); // calls MPI_Finalize() + kernel::manager< MUSICManager >().music_finalize(); // calls MPI_Finalize() } else { diff --git a/nestkernel/music_event_handler.cpp b/nestkernel/music_event_handler.cpp index 1407adea8a..b0cd209b4f 100644 --- a/nestkernel/music_event_handler.cpp +++ b/nestkernel/music_event_handler.cpp @@ -95,7 +95,7 @@ MusicEventHandler::publish_port() { if ( not published_ ) { - music_port_ = kernel().music_manager.get_music_setup()->publishEventInput( portname_ ); + music_port_ = kernel::manager< MUSICManager >().get_music_setup()->publishEventInput( portname_ ); // MUSIC wants seconds, NEST has miliseconds const double acceptable_latency_s = 0.001 * acceptable_latency_; diff --git a/nestkernel/music_rate_in_handler.cpp b/nestkernel/music_rate_in_handler.cpp index 55a4089858..f70686aed5 100644 --- a/nestkernel/music_rate_in_handler.cpp +++ b/nestkernel/music_rate_in_handler.cpp @@ -85,7 +85,7 @@ MusicRateInHandler::publish_port() if ( not published_ ) { - MUSIC::Setup* s = kernel().music_manager.get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( "" ); @@ -126,7 +126,7 @@ MusicRateInHandler::publish_port() void MusicRateInHandler::update( Time const&, const long, const long ) { - const size_t buffer_size = kernel().connection_manager.get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); std::vector< double > new_rates( buffer_size, 0.0 ); for ( size_t channel = 0; channel < channelmap_.size(); ++channel ) diff --git a/nestkernel/nest.cpp b/nestkernel/nest.cpp index d050d932bb..21d0fbe825 100644 --- a/nestkernel/nest.cpp +++ b/nestkernel/nest.cpp @@ -48,9 +48,8 @@ namespace nest void init_nest( int* argc, char** argv[] ) { - KernelManager::create_kernel_manager(); - kernel().mpi_manager.init_mpi( argc, argv ); - kernel().initialize(); + kernel::manager< MPIManager >().init_mpi( argc, argv ); + kernel::manager< KernelManager >().initialize(); } void @@ -66,54 +65,54 @@ install_module( const std::string& ) void reset_kernel() { - kernel().reset(); + kernel::manager< KernelManager >().reset(); } void register_logger_client( const deliver_logging_event_ptr client_callback ) { - kernel().logging_manager.register_logging_client( client_callback ); + kernel::manager< LoggingManager >().register_logging_client( client_callback ); } void print_nodes_to_stream( std::ostream& ostr ) { - kernel().node_manager.print( ostr ); + kernel::manager< NodeManager >().print( ostr ); } RngPtr get_rank_synced_rng() { - return kernel().random_manager.get_rank_synced_rng(); + return kernel::manager< RandomManager >().get_rank_synced_rng(); } RngPtr get_vp_synced_rng( size_t tid ) { - return kernel().random_manager.get_vp_synced_rng( tid ); + return kernel::manager< RandomManager >().get_vp_synced_rng( tid ); } RngPtr get_vp_specific_rng( size_t tid ) { - return kernel().random_manager.get_vp_specific_rng( tid ); + return kernel::manager< RandomManager >().get_vp_specific_rng( tid ); } void set_kernel_status( const DictionaryDatum& dict ) { dict->clear_access_flags(); - kernel().set_status( dict ); + kernel::manager< KernelManager >().set_status( dict ); ALL_ENTRIES_ACCESSED( *dict, "SetKernelStatus", "Unread dictionary entries: " ); } DictionaryDatum get_kernel_status() { - assert( kernel().is_initialized() ); + assert( kernel::manager< KernelManager >().is_initialized() ); DictionaryDatum d( new Dictionary ); - kernel().get_status( d ); + kernel::manager< KernelManager >().get_status( d ); return d; } @@ -121,13 +120,13 @@ get_kernel_status() void set_node_status( const size_t node_id, const DictionaryDatum& dict ) { - kernel().node_manager.set_status( node_id, dict ); + kernel::manager< NodeManager >().set_status( node_id, dict ); } DictionaryDatum get_node_status( const size_t node_id ) { - return kernel().node_manager.get_status( node_id ); + return kernel::manager< NodeManager >().get_status( node_id ); } void @@ -142,7 +141,7 @@ set_connection_status( const ConnectionDatum& conn, const DictionaryDatum& dict dict->clear_access_flags(); - kernel().connection_manager.set_synapse_status( source_node_id, target_node_id, tid, syn_id, p, dict ); + kernel::manager< ConnectionManager >().set_synapse_status( source_node_id, target_node_id, tid, syn_id, p, dict ); ALL_ENTRIES_ACCESSED2( *dict, "SetStatus", @@ -154,7 +153,7 @@ set_connection_status( const ConnectionDatum& conn, const DictionaryDatum& dict DictionaryDatum get_connection_status( const ConnectionDatum& conn ) { - return kernel().connection_manager.get_synapse_status( conn.get_source_node_id(), + return kernel::manager< ConnectionManager >().get_synapse_status( conn.get_source_node_id(), conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), @@ -169,14 +168,14 @@ create( const Name& model_name, const size_t n_nodes ) throw RangeCheck(); } - const size_t model_id = kernel().model_manager.get_node_model_id( model_name ); - return kernel().node_manager.add_node( model_id, n_nodes ); + const size_t model_id = kernel::manager< ModelManager >().get_node_model_id( model_name ); + return kernel::manager< NodeManager >().add_node( model_id, n_nodes ); } NodeCollectionPTR get_nodes( const DictionaryDatum& params, const bool local_only ) { - return kernel().node_manager.get_nodes( params, local_only ); + return kernel::manager< NodeManager >().get_nodes( params, local_only ); } void @@ -185,7 +184,7 @@ connect( NodeCollectionPTR sources, const DictionaryDatum& connectivity, const std::vector< DictionaryDatum >& synapse_params ) { - kernel().connection_manager.connect( sources, targets, connectivity, synapse_params ); + kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, synapse_params ); } void @@ -196,7 +195,7 @@ connect_tripartite( NodeCollectionPTR sources, const DictionaryDatum& third_connectivity, const std::map< Name, std::vector< DictionaryDatum > >& synapse_specs ) { - kernel().connection_manager.connect_tripartite( + kernel::manager< ConnectionManager >().connect_tripartite( sources, targets, third, connectivity, third_connectivity, synapse_specs ); } @@ -210,7 +209,8 @@ connect_arrays( long* sources, size_t n, std::string syn_model ) { - kernel().connection_manager.connect_arrays( sources, targets, weights, delays, p_keys, p_values, n, syn_model ); + kernel::manager< ConnectionManager >().connect_arrays( + sources, targets, weights, delays, p_keys, p_values, n, syn_model ); } ArrayDatum @@ -218,7 +218,7 @@ get_connections( const DictionaryDatum& dict ) { dict->clear_access_flags(); - ArrayDatum array = kernel().connection_manager.get_connections( dict ); + ArrayDatum array = kernel::manager< ConnectionManager >().get_connections( dict ); ALL_ENTRIES_ACCESSED( *dict, "GetConnections", "Unread dictionary entries: " ); @@ -231,8 +231,8 @@ disconnect( const ArrayDatum& conns ) for ( size_t conn_index = 0; conn_index < conns.size(); ++conn_index ) { const auto conn_datum = getValue< ConnectionDatum >( conns.get( conn_index ) ); - const auto target_node = kernel().node_manager.get_node_or_proxy( conn_datum.get_target_node_id() ); - kernel().sp_manager.disconnect( + const auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( conn_datum.get_target_node_id() ); + kernel::manager< SPManager >().disconnect( conn_datum.get_source_node_id(), target_node, conn_datum.get_target_thread(), conn_datum.get_synapse_model_id() ); } } @@ -265,38 +265,38 @@ run( const double& time ) "of the simulation resolution." ); } - kernel().simulation_manager.run( t_sim ); + kernel::manager< SimulationManager >().run( t_sim ); } void prepare() { - kernel().prepare(); + kernel::manager< KernelManager >().prepare(); } void cleanup() { - kernel().cleanup(); + kernel::manager< KernelManager >().cleanup(); } void copy_model( const Name& oldmodname, const Name& newmodname, const DictionaryDatum& dict ) { - kernel().model_manager.copy_model( oldmodname, newmodname, dict ); + kernel::manager< ModelManager >().copy_model( oldmodname, newmodname, dict ); } void set_model_defaults( const std::string component, const DictionaryDatum& dict ) { - if ( kernel().model_manager.set_model_defaults( component, dict ) ) + if ( kernel::manager< ModelManager >().set_model_defaults( component, dict ) ) { return; } - if ( kernel().io_manager.is_valid_recording_backend( component ) ) + if ( kernel::manager< IOManager >().is_valid_recording_backend( component ) ) { - kernel().io_manager.set_recording_backend_status( component, dict ); + kernel::manager< IOManager >().set_recording_backend_status( component, dict ); return; } @@ -308,8 +308,8 @@ get_model_defaults( const std::string component ) { try { - const size_t model_id = kernel().model_manager.get_node_model_id( component ); - return kernel().model_manager.get_node_model( model_id )->get_status(); + const size_t model_id = kernel::manager< ModelManager >().get_node_model_id( component ); + return kernel::manager< ModelManager >().get_node_model( model_id )->get_status(); } catch ( UnknownModelName& ) { @@ -318,17 +318,17 @@ get_model_defaults( const std::string component ) try { - const size_t synapse_model_id = kernel().model_manager.get_synapse_model_id( component ); - return kernel().model_manager.get_connector_defaults( synapse_model_id ); + const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( component ); + return kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); } catch ( UnknownSynapseType& ) { // ignore errors; throw at the end of the function if that's reached } - if ( kernel().io_manager.is_valid_recording_backend( component ) ) + if ( kernel::manager< IOManager >().is_valid_recording_backend( component ) ) { - return kernel().io_manager.get_recording_backend_status( component ); + return kernel::manager< IOManager >().get_recording_backend_status( component ); } throw UnknownComponent( component ); @@ -368,7 +368,7 @@ apply( const ParameterDatum& param, const NodeCollectionDatum& nc ) RngPtr rng = get_rank_synced_rng(); for ( auto it = nc->begin(); it < nc->end(); ++it ) { - auto node = kernel().node_manager.get_node_or_proxy( ( *it ).node_id ); + auto node = kernel::manager< NodeManager >().get_node_or_proxy( ( *it ).node_id ); result.push_back( param->value( rng, node ) ); } return result; diff --git a/nestkernel/nest.h b/nestkernel/nest.h index 5bd652200b..3b61772b01 100644 --- a/nestkernel/nest.h +++ b/nestkernel/nest.h @@ -190,14 +190,14 @@ template < template < typename > class ConnectorModelT > void register_connection_model( const std::string& name ) { - kernel().model_manager.register_connection_model< ConnectorModelT >( name ); + kernel::manager< ModelManager >().register_connection_model< ConnectorModelT >( name ); } template < typename NodeModelT > void register_node_model( const std::string& name, std::string deprecation_info ) { - kernel().model_manager.register_node_model< NodeModelT >( name, deprecation_info ); + kernel::manager< ModelManager >().register_node_model< NodeModelT >( name, deprecation_info ); } } diff --git a/nestkernel/nestmodule.cpp b/nestkernel/nestmodule.cpp index 7a7f3449ce..40ef15d704 100644 --- a/nestkernel/nestmodule.cpp +++ b/nestkernel/nestmodule.cpp @@ -375,7 +375,7 @@ NestModule::SetStatus_aaFunction::execute( SLIInterpreter* i ) const { ConnectionDatum con_id = getValue< ConnectionDatum >( conn_a[ con ] ); dict->clear_access_flags(); - kernel().connection_manager.set_synapse_status( con_id.get_source_node_id(), + kernel::manager< ConnectionManager >().set_synapse_status( con_id.get_source_node_id(), con_id.get_target_node_id(), con_id.get_target_thread(), con_id.get_synapse_model_id(), @@ -393,7 +393,7 @@ NestModule::SetStatus_aaFunction::execute( SLIInterpreter* i ) const DictionaryDatum dict = getValue< DictionaryDatum >( dict_a[ con ] ); ConnectionDatum con_id = getValue< ConnectionDatum >( conn_a[ con ] ); dict->clear_access_flags(); - kernel().connection_manager.set_synapse_status( con_id.get_source_node_id(), + kernel::manager< ConnectionManager >().set_synapse_status( con_id.get_source_node_id(), con_id.get_target_node_id(), con_id.get_target_thread(), con_id.get_synapse_model_id(), @@ -457,7 +457,7 @@ NestModule::GetStatus_CFunction::execute( SLIInterpreter* i ) const ConnectionDatum conn = getValue< ConnectionDatum >( i->OStack.pick( 0 ) ); - DictionaryDatum result_dict = kernel().connection_manager.get_synapse_status( conn.get_source_node_id(), + DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( conn.get_source_node_id(), conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), @@ -480,11 +480,12 @@ NestModule::GetStatus_aFunction::execute( SLIInterpreter* i ) const for ( size_t nt = 0; nt < n_results; ++nt ) { ConnectionDatum con_id = getValue< ConnectionDatum >( conns.get( nt ) ); - DictionaryDatum result_dict = kernel().connection_manager.get_synapse_status( con_id.get_source_node_id(), - con_id.get_target_node_id(), - con_id.get_target_thread(), - con_id.get_synapse_model_id(), - con_id.get_port() ); + DictionaryDatum result_dict = + kernel::manager< ConnectionManager >().get_synapse_status( con_id.get_source_node_id(), + con_id.get_target_node_id(), + con_id.get_target_thread(), + con_id.get_synapse_model_id(), + con_id.get_port() ); result.push_back( result_dict ); } @@ -557,7 +558,7 @@ NestModule::Install_sFunction::execute( SLIInterpreter* i ) const const std::string modulename = getValue< std::string >( i->OStack.pick( 0 ) ); - kernel().module_manager.install( modulename ); + kernel::manager< ModuleManager >().install( modulename ); i->OStack.pop(); i->EStack.pop(); @@ -629,7 +630,7 @@ NestModule::CopyModel_l_l_DFunction::execute( SLIInterpreter* i ) const const Name new_name = getValue< Name >( i->OStack.pick( 1 ) ); DictionaryDatum params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); - kernel().model_manager.copy_model( old_name, new_name, params ); + kernel::manager< ModelManager >().copy_model( old_name, new_name, params ); i->OStack.pop( 3 ); i->EStack.pop(); @@ -693,7 +694,7 @@ NestModule::Disconnect_g_g_D_DFunction::execute( SLIInterpreter* i ) const DictionaryDatum synapse_params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); // dictionary access checking is handled by disconnect - kernel().sp_manager.disconnect( sources, targets, connectivity, synapse_params ); + kernel::manager< SPManager >().disconnect( sources, targets, connectivity, synapse_params ); i->OStack.pop( 4 ); i->EStack.pop(); @@ -718,7 +719,7 @@ NestModule::Disconnect_aFunction::execute( SLIInterpreter* i ) const void NestModule::Connect_g_g_D_DFunction::execute( SLIInterpreter* i ) const { - kernel().connection_manager.sw_construction_connect.start(); + kernel::manager< ConnectionManager >().sw_construction_connect.start(); i->assert_stack_load( 4 ); @@ -728,18 +729,18 @@ NestModule::Connect_g_g_D_DFunction::execute( SLIInterpreter* i ) const DictionaryDatum synapse_params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); // dictionary access checking is handled by connect - kernel().connection_manager.connect( sources, targets, connectivity, { synapse_params } ); + kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, { synapse_params } ); i->OStack.pop( 4 ); i->EStack.pop(); - kernel().connection_manager.sw_construction_connect.stop(); + kernel::manager< ConnectionManager >().sw_construction_connect.stop(); } void NestModule::Connect_g_g_D_aFunction::execute( SLIInterpreter* i ) const { - kernel().connection_manager.sw_construction_connect.start(); + kernel::manager< ConnectionManager >().sw_construction_connect.start(); i->assert_stack_load( 4 ); @@ -755,19 +756,19 @@ NestModule::Connect_g_g_D_aFunction::execute( SLIInterpreter* i ) const } // dictionary access checking is handled by connect - kernel().connection_manager.connect( sources, targets, connectivity, synapse_params ); + kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, synapse_params ); i->OStack.pop( 4 ); i->EStack.pop(); - kernel().connection_manager.sw_construction_connect.stop(); + kernel::manager< ConnectionManager >().sw_construction_connect.stop(); } void NestModule::ConnectTripartite_g_g_g_D_D_DFunction::execute( SLIInterpreter* i ) const { - kernel().connection_manager.sw_construction_connect.start(); + kernel::manager< ConnectionManager >().sw_construction_connect.start(); i->assert_stack_load( 6 ); @@ -798,26 +799,26 @@ NestModule::ConnectTripartite_g_g_g_D_D_DFunction::execute( SLIInterpreter* i ) i->OStack.pop( 6 ); i->EStack.pop(); - kernel().connection_manager.sw_construction_connect.stop(); + kernel::manager< ConnectionManager >().sw_construction_connect.stop(); } void NestModule::ConnectSonata_D_Function::execute( SLIInterpreter* i ) const { - kernel().connection_manager.sw_construction_connect.start(); + kernel::manager< ConnectionManager >().sw_construction_connect.start(); i->assert_stack_load( 2 ); DictionaryDatum graph_specs = getValue< DictionaryDatum >( i->OStack.pick( 1 ) ); const long hyberslab_size = getValue< long >( i->OStack.pick( 0 ) ); - kernel().connection_manager.connect_sonata( graph_specs, hyberslab_size ); + kernel::manager< ConnectionManager >().connect_sonata( graph_specs, hyberslab_size ); i->OStack.pop( 2 ); i->EStack.pop(); - kernel().connection_manager.sw_construction_connect.stop(); + kernel::manager< ConnectionManager >().sw_construction_connect.stop(); } /** @BeginDocumentation @@ -837,7 +838,7 @@ NestModule::ConnectSonata_D_Function::execute( SLIInterpreter* i ) const void NestModule::MemoryInfoFunction::execute( SLIInterpreter* i ) const { - kernel().model_manager.memory_info(); + kernel::manager< ModelManager >().memory_info(); i->EStack.pop(); } @@ -862,21 +863,21 @@ NestModule::PrintNodesToStreamFunction::execute( SLIInterpreter* i ) const void NestModule::RankFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel().mpi_manager.get_rank() ); + i->OStack.push( kernel::manager< MPIManager >().get_rank() ); i->EStack.pop(); } void NestModule::NumProcessesFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel().mpi_manager.get_num_processes() ); + i->OStack.push( kernel::manager< MPIManager >().get_num_processes() ); i->EStack.pop(); } void NestModule::SyncProcessesFunction::execute( SLIInterpreter* i ) const { - kernel().mpi_manager.synchronize(); + kernel::manager< MPIManager >().synchronize(); i->EStack.pop(); } @@ -891,11 +892,11 @@ NestModule::TimeCommunication_i_i_bFunction::execute( SLIInterpreter* i ) const double time = 0.0; if ( offgrid ) { - time = kernel().mpi_manager.time_communicate_offgrid( num_bytes, samples ); + time = kernel::manager< MPIManager >().time_communicate_offgrid( num_bytes, samples ); } else { - time = kernel().mpi_manager.time_communicate( num_bytes, samples ); + time = kernel::manager< MPIManager >().time_communicate( num_bytes, samples ); } i->OStack.pop( 3 ); @@ -913,7 +914,7 @@ NestModule::TimeCommunicationv_i_iFunction::execute( SLIInterpreter* i ) const double time = 0.0; - time = kernel().mpi_manager.time_communicatev( num_bytes, samples ); + time = kernel::manager< MPIManager >().time_communicatev( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -930,7 +931,7 @@ NestModule::TimeCommunicationAlltoall_i_iFunction::execute( SLIInterpreter* i ) double time = 0.0; - time = kernel().mpi_manager.time_communicate_alltoall( num_bytes, samples ); + time = kernel::manager< MPIManager >().time_communicate_alltoall( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -947,7 +948,7 @@ NestModule::TimeCommunicationAlltoallv_i_iFunction::execute( SLIInterpreter* i ) double time = 0.0; - time = kernel().mpi_manager.time_communicate_alltoallv( num_bytes, samples ); + time = kernel::manager< MPIManager >().time_communicate_alltoallv( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -957,7 +958,7 @@ NestModule::TimeCommunicationAlltoallv_i_iFunction::execute( SLIInterpreter* i ) void NestModule::ProcessorNameFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel().mpi_manager.get_processor_name() ); + i->OStack.push( kernel::manager< MPIManager >().get_processor_name() ); i->EStack.pop(); } @@ -967,7 +968,7 @@ NestModule::MPIAbort_iFunction::execute( SLIInterpreter* i ) const { i->assert_stack_load( 1 ); long exitcode = getValue< long >( i->OStack.pick( 0 ) ); - kernel().mpi_manager.mpi_abort( exitcode ); + kernel::manager< MPIManager >().mpi_abort( exitcode ); i->EStack.pop(); } #endif @@ -1290,7 +1291,7 @@ NestModule::SetAcceptableLatencyFunction::execute( SLIInterpreter* i ) const std::string port_name = getValue< std::string >( i->OStack.pick( 1 ) ); double latency = getValue< double >( i->OStack.pick( 0 ) ); - kernel().music_manager.set_music_in_port_acceptable_latency( port_name, latency ); + kernel::manager< MUSICManager >().set_music_in_port_acceptable_latency( port_name, latency ); i->OStack.pop( 2 ); i->EStack.pop(); @@ -1304,7 +1305,7 @@ NestModule::SetMaxBufferedFunction::execute( SLIInterpreter* i ) const std::string port_name = getValue< std::string >( i->OStack.pick( 1 ) ); int maxBuffered = getValue< long >( i->OStack.pick( 0 ) ); - kernel().music_manager.set_music_in_port_max_buffered( port_name, maxBuffered ); + kernel::manager< MUSICManager >().set_music_in_port_max_buffered( port_name, maxBuffered ); i->OStack.pop( 2 ); i->EStack.pop(); @@ -1315,14 +1316,14 @@ NestModule::SetMaxBufferedFunction::execute( SLIInterpreter* i ) const void NestModule::EnableStructuralPlasticity_Function::execute( SLIInterpreter* i ) const { - kernel().sp_manager.enable_structural_plasticity(); + kernel::manager< SPManager >().enable_structural_plasticity(); i->EStack.pop(); } void NestModule::DisableStructuralPlasticity_Function::execute( SLIInterpreter* i ) const { - kernel().sp_manager.disable_structural_plasticity(); + kernel::manager< SPManager >().disable_structural_plasticity(); i->EStack.pop(); } @@ -1333,7 +1334,7 @@ NestModule::SetStdpEps_dFunction::execute( SLIInterpreter* i ) const i->assert_stack_load( 1 ); const double stdp_eps = getValue< double >( i->OStack.top() ); - kernel().connection_manager.set_stdp_eps( stdp_eps ); + kernel::manager< ConnectionManager >().set_stdp_eps( stdp_eps ); i->OStack.pop(); i->EStack.pop(); @@ -1895,7 +1896,7 @@ NestModule::Sub_M_MFunction::execute( SLIInterpreter* i ) const void NestModule::ConnectLayers_g_g_DFunction::execute( SLIInterpreter* i ) const { - kernel().connection_manager.sw_construction_connect.start(); + kernel::manager< ConnectionManager >().sw_construction_connect.start(); i->assert_stack_load( 3 ); @@ -1908,7 +1909,7 @@ NestModule::ConnectLayers_g_g_DFunction::execute( SLIInterpreter* i ) const i->OStack.pop( 3 ); i->EStack.pop(); - kernel().connection_manager.sw_construction_connect.stop(); + kernel::manager< ConnectionManager >().sw_construction_connect.stop(); } void @@ -2188,7 +2189,7 @@ NestModule::init( SLIInterpreter* i ) Token statusd = i->baselookup( Name( "statusdict" ) ); DictionaryDatum dd = getValue< DictionaryDatum >( statusd ); dd->insert( Name( "kernelname" ), new StringDatum( "NEST" ) ); - dd->insert( Name( "is_mpi" ), new BoolDatum( kernel().mpi_manager.is_mpi_used() ) ); + dd->insert( Name( "is_mpi" ), new BoolDatum( kernel::manager< MPIManager >().is_mpi_used() ) ); register_parameter< ConstantParameter >( "constant" ); register_parameter< UniformParameter >( "uniform" ); diff --git a/nestkernel/node.cpp b/nestkernel/node.cpp index 602c6431a9..2fcb87dcf6 100644 --- a/nestkernel/node.cpp +++ b/nestkernel/node.cpp @@ -116,14 +116,14 @@ Node::get_name() const return std::string( "UnknownNode" ); } - return kernel().model_manager.get_node_model( model_id_ )->get_name(); + return kernel::manager< ModelManager >().get_node_model( model_id_ )->get_name(); } Model& Node::get_model_() const { assert( model_id_ >= 0 ); - return *kernel().model_manager.get_node_model( model_id_ ); + return *kernel::manager< ModelManager >().get_node_model( model_id_ ); } DictionaryDatum @@ -151,7 +151,7 @@ Node::get_status_base() DictionaryDatum dict = get_status_dict_(); // add information available for all nodes - ( *dict )[ names::local ] = kernel().node_manager.is_local_node( this ); + ( *dict )[ names::local ] = kernel::manager< NodeManager >().is_local_node( this ); ( *dict )[ names::model ] = LiteralDatum( get_name() ); ( *dict )[ names::model_id ] = get_model_id(); ( *dict )[ names::global_id ] = get_node_id(); diff --git a/nestkernel/node_collection.cpp b/nestkernel/node_collection.cpp index 57a75c908b..edb95aed9a 100644 --- a/nestkernel/node_collection.cpp +++ b/nestkernel/node_collection.cpp @@ -63,24 +63,24 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( 0 ) - , step_( kind == NCIteratorKind::RANK_LOCAL - ? std::lcm( stride, kernel().mpi_manager.get_num_processes() ) - : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, kernel().vp_manager.get_num_virtual_processes() ) - : stride ) ) + , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >().get_num_processes() ) + : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, + kernel::manager< VPManager >().get_num_virtual_processes() ) + : stride ) ) , kind_( kind ) , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL - ? kernel().mpi_manager.get_rank() - : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel().vp_manager.get_vp() : invalid_thread ) ) + ? kernel::manager< MPIManager >().get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >().get_vp() : invalid_thread ) ) , primitive_collection_( &collection ) , composite_collection_( nullptr ) { assert( not collection_ptr.get() or collection_ptr.get() == &collection ); assert( element_idx_ <= collection.size() ); // allow == for end() - FULL_LOGGING_ONLY( - kernel().write_to_dump( String::compose( "NCIT Prim ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + String::compose( "NCIT Prim ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), part_idx_, element_idx_, step_, @@ -97,14 +97,14 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( part ) - , step_( kind == NCIteratorKind::RANK_LOCAL - ? std::lcm( stride, kernel().mpi_manager.get_num_processes() ) - : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, kernel().vp_manager.get_num_virtual_processes() ) - : stride ) ) + , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >().get_num_processes() ) + : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, + kernel::manager< VPManager >().get_num_virtual_processes() ) + : stride ) ) , kind_( kind ) , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL - ? kernel().mpi_manager.get_rank() - : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel().vp_manager.get_vp() : invalid_thread ) ) + ? kernel::manager< MPIManager >().get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >().get_vp() : invalid_thread ) ) , primitive_collection_( nullptr ) , composite_collection_( &collection ) { @@ -113,10 +113,10 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, // Allow <= for end iterator assert( ( part < collection.parts_.size() and offset <= collection.parts_[ part ].size() ) ); - FULL_LOGGING_ONLY( - kernel().write_to_dump( String::compose( "NCIT Comp ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + String::compose( "NCIT Comp ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), part_idx_, element_idx_, step_, @@ -225,20 +225,21 @@ nc_const_iterator::advance_local_iter_to_new_part_( size_t n ) { case NCIteratorKind::RANK_LOCAL: { - const size_t num_ranks = kernel().mpi_manager.get_num_processes(); - const size_t current_rank = kernel().mpi_manager.get_rank(); + const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); + const size_t current_rank = kernel::manager< MPIManager >().get_rank(); std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( num_ranks, current_rank, part_idx_, element_idx_, NodeCollectionComposite::gid_to_rank_ ); - FULL_LOGGING_ONLY( kernel().write_to_dump( - String::compose( "ACIL rk %1, pix %2, eix %3", kernel().mpi_manager.get_rank(), part_idx_, element_idx_ ) ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( String::compose( + "ACIL rk %1, pix %2, eix %3", kernel::manager< MPIManager >().get_rank(), part_idx_, element_idx_ ) ); ) break; } case NCIteratorKind::THREAD_LOCAL: { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); - const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); + const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const size_t current_vp = + kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( num_vps, current_vp, part_idx_, element_idx_, NodeCollectionComposite::gid_to_vp_ ); @@ -290,9 +291,9 @@ nc_const_iterator::operator*() const { if ( not composite_collection_->valid_idx_( part_idx_, element_idx_ ) ) { - FULL_LOGGING_ONLY( kernel().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( String::compose( "nci::op* comp err rk %1, lp %2, le %3, pix %4, eix %5, end_pix %6, end_eix %7", - kernel().mpi_manager.get_rank(), + kernel::manager< MPIManager >().get_rank(), composite_collection_->last_part_, composite_collection_->last_elem_, part_idx_, @@ -313,7 +314,7 @@ nc_const_iterator::operator*() const } NodeCollection::NodeCollection() - : fingerprint_( kernel().get_fingerprint() ) + : fingerprint_( kernel::manager< KernelManager >().get_fingerprint() ) { } @@ -402,7 +403,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) { size_t current_first = node_ids[ 0 ]; size_t current_last = current_first; - size_t current_model = kernel().modelrange_manager.get_model_id( node_ids[ 0 ] ); + size_t current_model = kernel::manager< ModelRangeManager >().get_model_id( node_ids[ 0 ] ); std::vector< NodeCollectionPrimitive > parts; @@ -415,7 +416,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) } old_node_id = *node_id; - const size_t next_model = kernel().modelrange_manager.get_model_id( *node_id ); + const size_t next_model = kernel::manager< ModelRangeManager >().get_model_id( *node_id ); if ( next_model == current_model and *node_id == ( current_last + 1 ) ) { @@ -448,7 +449,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) bool NodeCollection::valid() const { - return fingerprint_ == kernel().get_fingerprint(); + return fingerprint_ == kernel::manager< KernelManager >().get_fingerprint(); } void @@ -470,7 +471,7 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, , last_( last ) , model_id_( model_id ) , metadata_( meta ) - , nodes_have_no_proxies_( not kernel().model_manager.get_node_model( model_id_ )->has_proxies() ) + , nodes_have_no_proxies_( not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies() ) { assert( first_ <= last_ ); assert_consistent_model_ids_( model_id_ ); @@ -481,7 +482,7 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last, siz , last_( last ) , model_id_( model_id ) , metadata_( nullptr ) - , nodes_have_no_proxies_( not kernel().model_manager.get_node_model( model_id_ )->has_proxies() ) + , nodes_have_no_proxies_( not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies() ) { assert( first_ <= last_ ); } @@ -495,18 +496,18 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last ) assert( first_ <= last_ ); // find the model_id - const auto first_model_id = kernel().modelrange_manager.get_model_id( first ); + const auto first_model_id = kernel::manager< ModelRangeManager >().get_model_id( first ); const auto init_index = first + 1; for ( size_t node_id = init_index; node_id <= last; ++node_id ) { - const auto model_id = kernel().modelrange_manager.get_model_id( node_id ); + const auto model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); if ( model_id != first_model_id ) { throw BadProperty( "model ids does not match" ); } } model_id_ = first_model_id; - nodes_have_no_proxies_ = not kernel().model_manager.get_node_model( model_id_ )->has_proxies(); + nodes_have_no_proxies_ = not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies(); } NodeCollectionPrimitive::NodeCollectionPrimitive() @@ -534,7 +535,7 @@ NodeCollection::to_array( const std::string& selection ) const // We need to defined zero explicitly here, otherwise push_back() does strange things const size_t zero = 0; node_ids.push_back( zero ); - node_ids.push_back( kernel().vp_manager.get_thread_id() ); + node_ids.push_back( kernel::manager< VPManager >().get_thread_id() ); node_ids.push_back( zero ); const auto end_it = end(); @@ -647,10 +648,10 @@ NodeCollectionPrimitive::operator+( NodeCollectionPTR rhs ) const NodeCollection::const_iterator NodeCollectionPrimitive::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t num_processes = kernel().mpi_manager.get_num_processes(); - const size_t rank = kernel().mpi_manager.get_rank(); + const size_t num_processes = kernel::manager< MPIManager >().get_num_processes(); + const size_t rank = kernel::manager< MPIManager >().get_rank(); const size_t first_elem_rank = - kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( first_ ) ); + kernel::manager< MPIManager >().get_process_id_of_vp( kernel::manager< VPManager >().node_id_to_vp( first_ ) ); const size_t elem_idx = ( rank - first_elem_rank + num_processes ) % num_processes; if ( elem_idx > size() ) // Too few node IDs to be shared among all MPI processes. @@ -666,9 +667,10 @@ NodeCollectionPrimitive::rank_local_begin( NodeCollectionPTR cp ) const NodeCollection::const_iterator NodeCollectionPrimitive::thread_local_begin( NodeCollectionPTR cp ) const { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); - const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); - const size_t vp_first_node = kernel().vp_manager.node_id_to_vp( first_ ); + const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const size_t current_vp = + kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); + const size_t vp_first_node = kernel::manager< VPManager >().node_id_to_vp( first_ ); const size_t offset = ( current_vp - vp_first_node + num_vps ) % num_vps; if ( offset >= size() ) // Too few node IDs to be shared among all vps. @@ -735,7 +737,7 @@ void NodeCollectionPrimitive::print_primitive( std::ostream& out ) const { const std::string model = - model_id_ != invalid_index ? kernel().model_manager.get_node_model( model_id_ )->get_name() : "none"; + model_id_ != invalid_index ? kernel::manager< ModelManager >().get_node_model( model_id_ )->get_name() : "none"; out << "model=" << model << ", size=" << size(); @@ -766,11 +768,12 @@ NodeCollectionPrimitive::assert_consistent_model_ids_( const size_t expected_mod { for ( size_t node_id = first_; node_id <= last_; ++node_id ) { - const auto model_id = kernel().modelrange_manager.get_model_id( node_id ); + const auto model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); if ( model_id != expected_model_id ) { - const auto node_model = kernel().modelrange_manager.get_model_of_node_id( model_id )->get_name(); - const auto expected_model = kernel().modelrange_manager.get_model_of_node_id( expected_model_id )->get_name(); + const auto node_model = kernel::manager< ModelRangeManager >().get_model_of_node_id( model_id )->get_name(); + const auto expected_model = + kernel::manager< ModelRangeManager >().get_model_of_node_id( expected_model_id )->get_name(); const auto message = "All nodes must have the same model (node with ID " + std::to_string( node_id ) + " has model " + node_model + ", expected " + expected_model + ")"; throw BadProperty( message ); @@ -1131,11 +1134,11 @@ NodeCollectionComposite::specific_local_begin_( size_t period, elem_idx += first_elem; } - FULL_LOGGING_ONLY( - kernel().write_to_dump( String::compose( "SPLB rk %1, thr %2, phase_first %3, offs %4, stp %5, sto %6," - " pix %7, lp %8, le %9, primsz %10, nprts: %11, this: %12", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + String::compose( "SPLB rk %1, thr %2, phase_first %3, offs %4, stp %5, sto %6," + " pix %7, lp %8, le %9, primsz %10, nprts: %11, this: %12", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), phase_first_node, offset, first_part, @@ -1179,20 +1182,20 @@ NodeCollectionComposite::specific_local_begin_( size_t period, size_t NodeCollectionComposite::gid_to_vp_( size_t gid ) { - return kernel().vp_manager.node_id_to_vp( gid ); + return kernel::manager< VPManager >().node_id_to_vp( gid ); } size_t NodeCollectionComposite::gid_to_rank_( size_t gid ) { - return kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( gid ) ); + return kernel::manager< MPIManager >().get_process_id_of_vp( kernel::manager< VPManager >().node_id_to_vp( gid ) ); } NodeCollection::const_iterator NodeCollectionComposite::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t num_ranks = kernel().mpi_manager.get_num_processes(); - const size_t current_rank = kernel().mpi_manager.get_rank(); + const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); + const size_t current_rank = kernel::manager< MPIManager >().get_rank(); const auto [ part_index, part_offset ] = specific_local_begin_( num_ranks, current_rank, first_part_, first_elem_, gid_to_rank_ ); @@ -1214,8 +1217,9 @@ NodeCollectionComposite::rank_local_begin( NodeCollectionPTR cp ) const NodeCollection::const_iterator NodeCollectionComposite::thread_local_begin( NodeCollectionPTR cp ) const { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); - const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); + const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const size_t current_vp = + kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); const auto [ part_index, part_offset ] = specific_local_begin_( num_vps, current_vp, first_part_, first_elem_, gid_to_vp_ ); @@ -1252,9 +1256,9 @@ NodeCollectionComposite::slice( size_t start, size_t end, size_t stride ) const "InvalidNodeCollection: note that ResetKernel invalidates all previously created NodeCollections." ); } - FULL_LOGGING_ONLY( kernel().write_to_dump( "Calling NCC from slice()" ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( "Calling NCC from slice()" ); ) const auto new_composite = NodeCollectionComposite( *this, start, end, stride ); - FULL_LOGGING_ONLY( kernel().write_to_dump( "Calling NCC from slice() --- DONE" ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( "Calling NCC from slice() --- DONE" ); ) if ( stride == 1 and new_composite.first_part_ == new_composite.last_part_ ) { @@ -1263,8 +1267,8 @@ NodeCollectionComposite::slice( size_t start, size_t end, size_t stride ) const new_composite.first_elem_, new_composite.last_elem_ + 1 ); } - FULL_LOGGING_ONLY( - kernel().write_to_dump( String::compose( "NewComposite: fp %1, fe %2, lp %3, le %4, sz %5, strd %6", + FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + String::compose( "NewComposite: fp %1, fe %2, lp %3, le %4, sz %5, strd %6", new_composite.first_part_, new_composite.first_elem_, new_composite.last_part_, @@ -1416,7 +1420,7 @@ NodeCollectionComposite::print_me( std::ostream& out ) const { // Need to count the primitive, so can't start at begin() out << "\n" + space - << "model=" << kernel().model_manager.get_node_model( first_in_primitive.model_id )->get_name() + << "model=" << kernel::manager< ModelManager >().get_node_model( first_in_primitive.model_id )->get_name() << ", size=" << primitive_size << ", "; if ( primitive_size == 1 ) { @@ -1444,7 +1448,8 @@ NodeCollectionComposite::print_me( std::ostream& out ) const } // Need to also print the last primitive - out << "\n" + space << "model=" << kernel().model_manager.get_node_model( first_in_primitive.model_id )->get_name() + out << "\n" + space + << "model=" << kernel::manager< ModelManager >().get_node_model( first_in_primitive.model_id )->get_name() << ", size=" << primitive_size << ", "; if ( primitive_size == 1 ) { diff --git a/nestkernel/node_manager.cpp b/nestkernel/node_manager.cpp index 1de01dba85..2fac11c5ce 100644 --- a/nestkernel/node_manager.cpp +++ b/nestkernel/node_manager.cpp @@ -72,8 +72,8 @@ NodeManager::initialize( const bool adjust_number_of_threads_or_rng_only ) { // explicitly force construction of wfr_nodes_vec_ to ensure consistent state wfr_network_size_ = 0; - local_nodes_.resize( kernel().vp_manager.get_num_threads() ); - num_thread_local_devices_.resize( kernel().vp_manager.get_num_threads(), 0 ); + local_nodes_.resize( kernel::manager< VPManager >().get_num_threads() ); + num_thread_local_devices_.resize( kernel::manager< VPManager >().get_num_threads(), 0 ); ensure_valid_thread_local_ids(); if ( not adjust_number_of_threads_or_rng_only ) @@ -113,7 +113,7 @@ NodeManager::add_node( size_t model_id, long n ) throw BadProperty(); } - Model* model = kernel().model_manager.get_node_model( model_id ); + Model* model = kernel::manager< ModelManager >().get_node_model( model_id ); assert( model ); model->deprecation_warning( "Create" ); @@ -128,10 +128,10 @@ NodeManager::add_node( size_t model_id, long n ) throw KernelException( "OutOfMemory" ); } - kernel().modelrange_manager.add_range( model_id, min_node_id, max_node_id ); + kernel::manager< ModelRangeManager >().add_range( model_id, min_node_id, max_node_id ); // clear any exceptions from previous call - std::vector< std::shared_ptr< WrappedThreadException > >( kernel().vp_manager.get_num_threads() ) + std::vector< std::shared_ptr< WrappedThreadException > >( kernel::manager< VPManager >().get_num_threads() ) .swap( exceptions_raised_ ); auto nc_ptr = NodeCollectionPTR( new NodeCollectionPrimitive( min_node_id, max_node_id, model_id ) ); @@ -151,7 +151,7 @@ NodeManager::add_node( size_t model_id, long n ) } // check if any exceptions have been raised - for ( size_t t = 0; t < kernel().vp_manager.get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) { if ( exceptions_raised_.at( t ).get() ) { @@ -163,7 +163,7 @@ NodeManager::add_node( size_t model_id, long n ) // successfully if ( model->is_off_grid() ) { - kernel().event_delivery_manager.set_off_grid_communication( true ); + kernel::manager< EventDeliveryManager >().set_off_grid_communication( true ); LOG( M_INFO, "NodeManager::add_node", "Neuron models emitting precisely timed spikes exist: " @@ -174,12 +174,12 @@ NodeManager::add_node( size_t model_id, long n ) // resize the target table for delivery of events to devices to make sure the first dimension // matches the number of local nodes and the second dimension matches number of synapse types - kernel().connection_manager.resize_target_table_devices_to_number_of_neurons(); + kernel::manager< ConnectionManager >().resize_target_table_devices_to_number_of_neurons(); #pragma omp parallel { // must be called in parallel context to properly configure per-thread data structures - kernel().connection_manager.resize_target_table_devices_to_number_of_synapse_types(); + kernel::manager< ConnectionManager >().resize_target_table_devices_to_number_of_synapse_types(); } sw_construction_create_.stop(); @@ -190,7 +190,7 @@ NodeManager::add_node( size_t model_id, long n ) void NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id ) { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); + const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); // Upper limit for number of neurons per thread; in practice, either // max_new_per_thread-1 or max_new_per_thread nodes will be created. const size_t max_new_per_thread = @@ -198,7 +198,7 @@ NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); + const size_t t = kernel::manager< VPManager >().get_thread_id(); try { @@ -206,8 +206,8 @@ NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id // Need to find smallest node ID with: // - node ID local to this vp // - node_id >= min_node_id - const size_t vp = kernel().vp_manager.thread_to_vp( t ); - const size_t min_node_id_vp = kernel().vp_manager.node_id_to_vp( min_node_id ); + const size_t vp = kernel::manager< VPManager >().thread_to_vp( t ); + const size_t min_node_id_vp = kernel::manager< VPManager >().node_id_to_vp( min_node_id ); size_t node_id = min_node_id + ( num_vps + vp - min_node_id_vp ) % num_vps; @@ -241,7 +241,7 @@ NodeManager::add_devices_( Model& model, size_t min_node_id, size_t max_node_id #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); + const size_t t = kernel::manager< VPManager >().get_thread_id(); try { model.reserve_additional( t, n_per_thread ); @@ -255,7 +255,7 @@ NodeManager::add_devices_( Model& model, size_t min_node_id, size_t max_node_id node->set_node_id_( node_id ); node->set_model_id( model.get_model_id() ); node->set_thread( t ); - node->set_vp( kernel().vp_manager.thread_to_vp( t ) ); + node->set_vp( kernel::manager< VPManager >().thread_to_vp( t ) ); node->set_local_device_id( num_thread_local_devices_[ t ] - 1 ); node->set_initialized(); @@ -277,7 +277,7 @@ NodeManager::add_music_nodes_( Model& model, size_t min_node_id, size_t max_node { #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); + const size_t t = kernel::manager< VPManager >().get_thread_id(); try { if ( t == 0 ) @@ -291,7 +291,7 @@ NodeManager::add_music_nodes_( Model& model, size_t min_node_id, size_t max_node node->set_node_id_( node_id ); node->set_model_id( model.get_model_id() ); node->set_thread( 0 ); - node->set_vp( kernel().vp_manager.thread_to_vp( 0 ) ); + node->set_vp( kernel::manager< VPManager >().thread_to_vp( 0 ) ); node->set_local_device_id( num_thread_local_devices_[ t ] - 1 ); node->set_initialized(); @@ -348,10 +348,10 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) if ( params->empty() ) { std::vector< std::vector< long > > nodes_on_thread; - nodes_on_thread.resize( kernel().vp_manager.get_num_threads() ); + nodes_on_thread.resize( kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - size_t tid = kernel().vp_manager.get_thread_id(); + size_t tid = kernel::manager< VPManager >().get_thread_id(); for ( auto node : get_local_nodes( tid ) ) { @@ -368,7 +368,7 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) } else { - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { // Select those nodes fulfilling the key/value pairs of the dictionary for ( auto node : get_local_nodes( tid ) ) @@ -400,7 +400,7 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) if ( not local_only ) { std::vector< long > globalnodes; - kernel().mpi_manager.communicate( nodes, globalnodes ); + kernel::manager< MPIManager >().communicate( nodes, globalnodes ); for ( size_t i = 0; i < globalnodes.size(); ++i ) { @@ -427,21 +427,21 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) bool NodeManager::is_local_node( Node* n ) const { - return kernel().vp_manager.is_local_vp( n->get_vp() ); + return kernel::manager< VPManager >().is_local_vp( n->get_vp() ); } bool NodeManager::is_local_node_id( size_t node_id ) const { - const size_t vp = kernel().vp_manager.node_id_to_vp( node_id ); - return kernel().vp_manager.is_local_vp( vp ); + const size_t vp = kernel::manager< VPManager >().node_id_to_vp( node_id ); + return kernel::manager< VPManager >().is_local_vp( vp ); } size_t NodeManager::get_max_num_local_nodes() const { return static_cast< size_t >( - ceil( static_cast< double >( size() ) / kernel().vp_manager.get_num_virtual_processes() ) ); + ceil( static_cast< double >( size() ) / kernel::manager< VPManager >().get_num_virtual_processes() ) ); } size_t @@ -453,13 +453,13 @@ NodeManager::get_num_thread_local_devices( size_t t ) const Node* NodeManager::get_node_or_proxy( size_t node_id, size_t t ) { - assert( t < kernel().vp_manager.get_num_threads() ); + assert( t < kernel::manager< VPManager >().get_num_threads() ); assert( node_id <= size() ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel().model_manager.get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); } return node; @@ -470,17 +470,17 @@ NodeManager::get_node_or_proxy( size_t node_id ) { assert( 0 < node_id and node_id <= size() ); - size_t vp = kernel().vp_manager.node_id_to_vp( node_id ); - if ( not kernel().vp_manager.is_local_vp( vp ) ) + size_t vp = kernel::manager< VPManager >().node_id_to_vp( node_id ); + if ( not kernel::manager< VPManager >().is_local_vp( vp ) ) { - return kernel().model_manager.get_proxy_node( 0, node_id ); + return kernel::manager< ModelManager >().get_proxy_node( 0, node_id ); } - size_t t = kernel().vp_manager.vp_to_thread( vp ); + size_t t = kernel::manager< VPManager >().vp_to_thread( vp ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel().model_manager.get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); } return node; @@ -489,13 +489,13 @@ NodeManager::get_node_or_proxy( size_t node_id ) Node* NodeManager::get_mpi_local_node_or_device_head( size_t node_id ) { - size_t t = kernel().vp_manager.vp_to_thread( kernel().vp_manager.node_id_to_vp( node_id ) ); + size_t t = kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node_id ) ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel().model_manager.get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); } if ( not node->has_proxies() ) { @@ -508,7 +508,7 @@ NodeManager::get_mpi_local_node_or_device_head( size_t node_id ) std::vector< Node* > NodeManager::get_thread_siblings( size_t node_id ) const { - size_t num_threads = kernel().vp_manager.get_num_threads(); + size_t num_threads = kernel::manager< VPManager >().get_num_threads(); std::vector< Node* > siblings( num_threads ); for ( size_t t = 0; t < num_threads; ++t ) { @@ -552,9 +552,9 @@ NodeManager::ensure_valid_thread_local_ids() // We clear the existing wfr_nodes_vec_ and then rebuild it. wfr_nodes_vec_.clear(); - wfr_nodes_vec_.resize( kernel().vp_manager.get_num_threads() ); + wfr_nodes_vec_.resize( kernel::manager< VPManager >().get_num_threads() ); - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { wfr_nodes_vec_[ tid ].clear(); @@ -584,7 +584,7 @@ NodeManager::ensure_valid_thread_local_ids() // step, because gather_events() has to be done in an // openmp single section wfr_is_used_ = false; - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( wfr_nodes_vec_[ tid ].size() > 0 ) { @@ -600,7 +600,7 @@ NodeManager::destruct_nodes_() { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); for ( auto node : local_nodes_[ tid ] ) { delete node.get_node(); @@ -639,18 +639,19 @@ NodeManager::prepare_node_( Node* n ) void NodeManager::prepare_nodes() { - assert( kernel().is_initialized() ); + assert( kernel::manager< KernelManager >().is_initialized() ); // We initialize the buffers of each node and calibrate it. size_t num_active_nodes = 0; // counts nodes that will be updated size_t num_active_wfr_nodes = 0; // counts nodes that use waveform relaxation - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( + kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel reduction( + : num_active_nodes, num_active_wfr_nodes ) { - size_t t = kernel().vp_manager.get_thread_id(); + size_t t = kernel::manager< VPManager >().get_thread_id(); // We prepare nodes in a parallel region. Therefore, we need to catch // exceptions here and then handle them after the parallel region. @@ -677,7 +678,7 @@ NodeManager::prepare_nodes() } // omp parallel // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -704,7 +705,7 @@ NodeManager::post_run_cleanup() { #pragma omp parallel { - size_t t = kernel().vp_manager.get_thread_id(); + size_t t = kernel::manager< VPManager >().get_thread_id(); SparseNodeArray::const_iterator n; for ( n = local_nodes_[ t ].begin(); n != local_nodes_[ t ].end(); ++n ) { @@ -718,7 +719,7 @@ NodeManager::finalize_nodes() { #pragma omp parallel { - size_t tid = kernel().vp_manager.get_thread_id(); + size_t tid = kernel::manager< VPManager >().get_thread_id(); SparseNodeArray::const_iterator n; for ( n = local_nodes_[ tid ].begin(); n != local_nodes_[ tid ].end(); ++n ) { @@ -730,15 +731,15 @@ NodeManager::finalize_nodes() void NodeManager::check_wfr_use() { - wfr_is_used_ = kernel().mpi_manager.any_true( wfr_is_used_ ); + wfr_is_used_ = kernel::manager< MPIManager >().any_true( wfr_is_used_ ); - GapJunctionEvent::set_coeff_length( - kernel().connection_manager.get_min_delay() * ( kernel().simulation_manager.get_wfr_interpolation_order() + 1 ) ); - InstantaneousRateConnectionEvent::set_coeff_length( kernel().connection_manager.get_min_delay() ); - DelayedRateConnectionEvent::set_coeff_length( kernel().connection_manager.get_min_delay() ); - DiffusionConnectionEvent::set_coeff_length( kernel().connection_manager.get_min_delay() ); - LearningSignalConnectionEvent::set_coeff_length( kernel().connection_manager.get_min_delay() ); - SICEvent::set_coeff_length( kernel().connection_manager.get_min_delay() ); + GapJunctionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() + * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ) ); + InstantaneousRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); + DelayedRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); + DiffusionConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); + LearningSignalConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); + SICEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); } void @@ -748,13 +749,13 @@ NodeManager::print( std::ostream& out ) const const double max_node_id_width = std::floor( std::log10( max_node_id ) ); const double node_id_range_width = 6 + 2 * max_node_id_width; - for ( std::vector< modelrange >::const_iterator it = kernel().modelrange_manager.begin(); - it != kernel().modelrange_manager.end(); + for ( std::vector< modelrange >::const_iterator it = kernel::manager< ModelRangeManager >().begin(); + it != kernel::manager< ModelRangeManager >().end(); ++it ) { const size_t first_node_id = it->get_first_node_id(); const size_t last_node_id = it->get_last_node_id(); - const Model* mod = kernel().model_manager.get_node_model( it->get_model_id() ); + const Model* mod = kernel::manager< ModelManager >().get_node_model( it->get_model_id() ); std::stringstream node_id_range_strs; node_id_range_strs << std::setw( max_node_id_width + 1 ) << first_node_id; @@ -764,7 +765,7 @@ NodeManager::print( std::ostream& out ) const } out << std::setw( node_id_range_width ) << std::left << node_id_range_strs.str() << " " << mod->get_name(); - if ( it + 1 != kernel().modelrange_manager.end() ) + if ( it + 1 != kernel::manager< ModelRangeManager >().end() ) { out << std::endl; } @@ -774,7 +775,7 @@ NodeManager::print( std::ostream& out ) const void NodeManager::set_status( size_t node_id, const DictionaryDatum& d ) { - for ( size_t t = 0; t < kernel().vp_manager.get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) { Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( node ) diff --git a/nestkernel/parameter.cpp b/nestkernel/parameter.cpp index 3346b4101c..518a204f5e 100644 --- a/nestkernel/parameter.cpp +++ b/nestkernel/parameter.cpp @@ -91,14 +91,15 @@ NormalParameter::NormalParameter( const DictionaryDatum& d ) normal_distribution::param_type param( mean_, std_ ); dist.param( param ); assert( normal_dists_.size() == 0 ); - normal_dists_.resize( kernel().vp_manager.get_num_threads(), dist ); + normal_dists_.resize( kernel::manager< VPManager >().get_num_threads(), dist ); } double NormalParameter::value( RngPtr rng, Node* node ) { - const auto tid = node ? kernel().vp_manager.vp_to_thread( kernel().vp_manager.node_id_to_vp( node->get_node_id() ) ) - : kernel().vp_manager.get_thread_id(); + const auto tid = node + ? kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ) ) + : kernel::manager< VPManager >().get_thread_id(); return normal_dists_[ tid ]( rng ); } @@ -117,14 +118,15 @@ LognormalParameter::LognormalParameter( const DictionaryDatum& d ) const lognormal_distribution::param_type param( mean_, std_ ); dist.param( param ); assert( lognormal_dists_.size() == 0 ); - lognormal_dists_.resize( kernel().vp_manager.get_num_threads(), dist ); + lognormal_dists_.resize( kernel::manager< VPManager >().get_num_threads(), dist ); } double LognormalParameter::value( RngPtr rng, Node* node ) { - const auto tid = node ? kernel().vp_manager.vp_to_thread( kernel().vp_manager.node_id_to_vp( node->get_node_id() ) ) - : kernel().vp_manager.get_thread_id(); + const auto tid = node + ? kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ) ) + : kernel::manager< VPManager >().get_thread_id(); return lognormal_dists_[ tid ]( rng ); } @@ -136,7 +138,7 @@ NodePosParameter::get_node_pos_( Node* node ) const { throw KernelException( "NodePosParameter: not node" ); } - NodeCollectionPTR nc = kernel().node_manager.node_id_to_node_collection( node ); + NodeCollectionPTR nc = kernel::manager< NodeManager >().node_id_to_node_collection( node ); if ( not nc.get() ) { throw KernelException( "NodePosParameter: not nc" ); diff --git a/nestkernel/per_thread_bool_indicator.cpp b/nestkernel/per_thread_bool_indicator.cpp index 29622d9db6..6448879c65 100644 --- a/nestkernel/per_thread_bool_indicator.cpp +++ b/nestkernel/per_thread_bool_indicator.cpp @@ -49,7 +49,7 @@ PerThreadBoolIndicator::operator[]( const size_t tid ) void PerThreadBoolIndicator::initialize( const size_t num_threads, const bool status ) { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); per_thread_status_.clear(); per_thread_status_.resize( num_threads, BoolIndicatorUInt64( status ) ); size_ = num_threads; @@ -66,7 +66,7 @@ PerThreadBoolIndicator::initialize( const size_t num_threads, const bool status bool PerThreadBoolIndicator::all_false() const { - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); // We need two barriers here to ensure that no thread can continue and change the result // before all threads have determined the result. #pragma omp barrier @@ -75,42 +75,42 @@ PerThreadBoolIndicator::all_false() const bool ret = ( are_true_ == 0 ); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::all_true() const { - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ == size_ ); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_false() const { - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ < size_ ); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_true() const { - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ > 0 ); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); return ret; } diff --git a/nestkernel/proxynode.cpp b/nestkernel/proxynode.cpp index 43488457a2..7ccde535bc 100644 --- a/nestkernel/proxynode.cpp +++ b/nestkernel/proxynode.cpp @@ -46,56 +46,56 @@ proxynode::proxynode( size_t node_id, size_t model_id, size_t vp ) size_t proxynode::send_test_event( Node& target, size_t receptor_type, synindex syn_id, bool dummy_target ) { - Model* model = kernel().model_manager.get_node_model( get_model_id() ); + Model* model = kernel::manager< ModelManager >().get_node_model( get_model_id() ); return model->send_test_event( target, receptor_type, syn_id, dummy_target ); } void proxynode::sends_secondary_event( GapJunctionEvent& ge ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( ge ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( ge ); } void proxynode::sends_secondary_event( InstantaneousRateConnectionEvent& re ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( DiffusionConnectionEvent& de ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( de ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( de ); } void proxynode::sends_secondary_event( DelayedRateConnectionEvent& re ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( LearningSignalConnectionEvent& re ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( SICEvent& sic ) { - kernel().model_manager.get_node_model( get_model_id() )->sends_secondary_event( sic ); + kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( sic ); } nest::SignalType proxynode::sends_signal() const { - return kernel().model_manager.get_node_model( get_model_id() )->sends_signal(); + return kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_signal(); } void proxynode::get_status( DictionaryDatum& d ) const { - const Model* model = kernel().model_manager.get_node_model( model_id_ ); + const Model* model = kernel::manager< ModelManager >().get_node_model( model_id_ ); const Name element_type = model->get_prototype().get_element_type(); ( *d )[ names::element_type ] = LiteralDatum( element_type ); } diff --git a/nestkernel/random_manager.cpp b/nestkernel/random_manager.cpp index a7f3173f4c..94ff239dc8 100644 --- a/nestkernel/random_manager.cpp +++ b/nestkernel/random_manager.cpp @@ -87,13 +87,13 @@ nest::RandomManager::initialize( const bool adjust_number_of_threads_or_rng_only // Create new RNGs of the currently used RNG type. rank_synced_rng_ = rng_types_[ current_rng_type_ ]->create( { base_seed_, RANK_SYNCED_SEEDER_ } ); - vp_synced_rngs_.resize( kernel().vp_manager.get_num_threads() ); - vp_specific_rngs_.resize( kernel().vp_manager.get_num_threads() ); + vp_synced_rngs_.resize( kernel::manager< VPManager >().get_num_threads() ); + vp_specific_rngs_.resize( kernel::manager< VPManager >().get_num_threads() ); #pragma omp parallel { - const auto tid = kernel().vp_manager.get_thread_id(); - const std::uint32_t vp = kernel().vp_manager.get_vp(); // type required for rng initializer + const auto tid = kernel::manager< VPManager >().get_thread_id(); + const std::uint32_t vp = kernel::manager< VPManager >().get_vp(); // type required for rng initializer vp_synced_rngs_[ tid ] = rng_types_[ current_rng_type_ ]->create( { base_seed_, THREAD_SYNCED_SEEDER_ } ); vp_specific_rngs_[ tid ] = rng_types_[ current_rng_type_ ]->create( { base_seed_, THREAD_SPECIFIC_SEEDER_, vp } ); } @@ -189,7 +189,7 @@ nest::RandomManager::check_rng_synchrony() const for ( auto n = 0; n < NUM_ROUNDS; ++n ) { const auto r = rank_synced_rng_->drand(); - if ( not kernel().mpi_manager.equal_cross_ranks( r ) ) + if ( not kernel::manager< MPIManager >().equal_cross_ranks( r ) ) { throw KernelException( "Rank-synchronized random number generators are out of sync." ); } @@ -198,7 +198,7 @@ nest::RandomManager::check_rng_synchrony() const // We check thread-synchrony under all circumstances to keep the code simple. for ( auto n = 0; n < NUM_ROUNDS; ++n ) { - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); double local_min = std::numeric_limits< double >::max(); double local_max = std::numeric_limits< double >::min(); for ( size_t t = 0; t < num_threads; ++t ) @@ -214,7 +214,7 @@ nest::RandomManager::check_rng_synchrony() const local_min = -std::numeric_limits< double >::infinity(); } - if ( not kernel().mpi_manager.equal_cross_ranks( local_min ) ) + if ( not kernel::manager< MPIManager >().equal_cross_ranks( local_min ) ) { throw KernelException( "Thread-synchronized random number generators are out of sync." ); } diff --git a/nestkernel/recording_backend_ascii.cpp b/nestkernel/recording_backend_ascii.cpp index b7f6d8ff54..9294c78660 100644 --- a/nestkernel/recording_backend_ascii.cpp +++ b/nestkernel/recording_backend_ascii.cpp @@ -49,7 +49,7 @@ nest::RecordingBackendASCII::~RecordingBackendASCII() throw() void nest::RecordingBackendASCII::initialize() { - data_map tmp( kernel().vp_manager.get_num_threads() ); + data_map tmp( kernel::manager< VPManager >().get_num_threads() ); device_data_.swap( tmp ); } @@ -160,8 +160,8 @@ nest::RecordingBackendASCII::write( const RecordingDevice& device, const std::string nest::RecordingBackendASCII::compute_vp_node_id_string_( const RecordingDevice& device ) const { - const double num_vps = kernel().vp_manager.get_num_virtual_processes(); - const double num_nodes = kernel().node_manager.size(); + const double num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const double num_nodes = kernel::manager< NodeManager >().size(); const int vp_digits = static_cast< int >( std::floor( std::log10( num_vps ) ) + 1 ); const int node_id_digits = static_cast< int >( std::floor( std::log10( num_nodes ) ) + 1 ); @@ -255,7 +255,7 @@ nest::RecordingBackendASCII::DeviceData::open_file() std::string filename = compute_filename_(); std::ifstream test( filename.c_str() ); - if ( test.good() and not kernel().io_manager.overwrite_files() ) + if ( test.good() and not kernel::manager< IOManager >().overwrite_files() ) { std::string msg = String::compose( "The file '%1' already exists and overwriting files is disabled. To overwrite files, set " @@ -348,7 +348,7 @@ nest::RecordingBackendASCII::DeviceData::set_status( const DictionaryDatum& d ) bool time_in_steps = false; if ( updateValue< bool >( d, names::time_in_steps, time_in_steps ) ) { - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { throw BadProperty( "Property time_in_steps cannot be set after Simulate has been called." ); } @@ -360,7 +360,7 @@ nest::RecordingBackendASCII::DeviceData::set_status( const DictionaryDatum& d ) std::string nest::RecordingBackendASCII::DeviceData::compute_filename_() const { - std::string data_path = kernel().io_manager.get_data_path(); + std::string data_path = kernel::manager< IOManager >().get_data_path(); if ( not data_path.empty() and not( data_path[ data_path.size() - 1 ] == '/' ) ) { data_path += '/'; @@ -372,7 +372,7 @@ nest::RecordingBackendASCII::DeviceData::compute_filename_() const label = modelname_; } - std::string data_prefix = kernel().io_manager.get_data_prefix(); + std::string data_prefix = kernel::manager< IOManager >().get_data_prefix(); return data_path + data_prefix + label + vp_node_id_string_ + "." + file_extension_; } diff --git a/nestkernel/recording_backend_memory.cpp b/nestkernel/recording_backend_memory.cpp index 6c484f6e66..bef37d88e3 100644 --- a/nestkernel/recording_backend_memory.cpp +++ b/nestkernel/recording_backend_memory.cpp @@ -36,7 +36,7 @@ nest::RecordingBackendMemory::~RecordingBackendMemory() throw() void nest::RecordingBackendMemory::initialize() { - device_data_map tmp( kernel().vp_manager.get_num_threads() ); + device_data_map tmp( kernel::manager< VPManager >().get_num_threads() ); device_data_.swap( tmp ); } @@ -265,7 +265,7 @@ nest::RecordingBackendMemory::DeviceData::set_status( const DictionaryDatum& d ) bool time_in_steps = false; if ( updateValue< bool >( d, names::time_in_steps, time_in_steps ) ) { - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { throw BadProperty( "Property time_in_steps cannot be set after Simulate has been called." ); } diff --git a/nestkernel/recording_backend_mpi.cpp b/nestkernel/recording_backend_mpi.cpp index 127adb1647..2b4a18f8f4 100644 --- a/nestkernel/recording_backend_mpi.cpp +++ b/nestkernel/recording_backend_mpi.cpp @@ -46,7 +46,7 @@ nest::RecordingBackendMPI::~RecordingBackendMPI() throw() void nest::RecordingBackendMPI::initialize() { - auto nthreads = kernel().vp_manager.get_num_threads(); + auto nthreads = kernel::manager< VPManager >().get_num_threads(); std::vector< std::vector< std::vector< std::array< double, 3 > > > > empty_vector( nthreads ); buffer_.swap( empty_vector ); device_map devices( nthreads ); @@ -139,7 +139,7 @@ nest::RecordingBackendMPI::prepare() // Create the connection with MPI // 1) take all the ports of the connections // get port and update the list of devices - thread_id_master = kernel().vp_manager.get_thread_id(); + thread_id_master = kernel::manager< VPManager >().get_thread_id(); } } int count_max = 0; @@ -193,7 +193,7 @@ nest::RecordingBackendMPI::prepare() #pragma omp parallel default( none ) shared( thread_id_master ) { // Update all the threads - size_t thread_id = kernel().vp_manager.get_thread_id(); + size_t thread_id = kernel::manager< VPManager >().get_thread_id(); if ( thread_id != thread_id_master ) { for ( auto& it_device : devices_[ thread_id ] ) @@ -293,7 +293,7 @@ nest::RecordingBackendMPI::cleanup() } // clear map of device commMap_.clear(); - size_t thread_id_master = kernel().vp_manager.get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); for ( auto& it_device : devices_[ thread_id_master ] ) { std::get< 0 >( it_device.second ) = -1; @@ -329,7 +329,7 @@ nest::RecordingBackendMPI::write( const RecordingDevice& device, const std::vector< long >& ) { // For each event send a message through the right MPI communicator - const size_t thread_id = kernel().get_kernel_manager().vp_manager.get_thread_id(); + const size_t thread_id = kernel::manager< VPManager >().get_thread_id(); const size_t sender = event.get_sender_node_id(); const size_t recorder = device.get_node_id(); const Time stamp = event.get_stamp(); @@ -388,12 +388,12 @@ nest::RecordingBackendMPI::get_port( const size_t index_node, const std::string& // path of the file : path+label+id+.txt // (file contains only one line with name of the port ) std::ostringstream basename; - const std::string& path = kernel().io_manager.get_data_path(); + const std::string& path = kernel::manager< IOManager >().get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel().io_manager.get_data_prefix(); + basename << kernel::manager< IOManager >().get_data_prefix(); if ( not label.empty() ) { diff --git a/nestkernel/recording_backend_screen.cpp b/nestkernel/recording_backend_screen.cpp index 9b31ea6f57..73e3722537 100644 --- a/nestkernel/recording_backend_screen.cpp +++ b/nestkernel/recording_backend_screen.cpp @@ -31,7 +31,7 @@ void nest::RecordingBackendScreen::initialize() { - device_data_map tmp( kernel().vp_manager.get_num_threads() ); + device_data_map tmp( kernel::manager< VPManager >().get_num_threads() ); device_data_.swap( tmp ); } diff --git a/nestkernel/recording_backend_sionlib.cpp b/nestkernel/recording_backend_sionlib.cpp index 33753117e5..2131cd3fc9 100644 --- a/nestkernel/recording_backend_sionlib.cpp +++ b/nestkernel/recording_backend_sionlib.cpp @@ -59,7 +59,7 @@ nest::RecordingBackendSIONlib::~RecordingBackendSIONlib() throw() void nest::RecordingBackendSIONlib::initialize() { - device_map devices( kernel().vp_manager.get_num_threads() ); + device_map devices( kernel::manager< VPManager >().get_num_threads() ); devices_.swap( devices ); } @@ -168,11 +168,11 @@ nest::RecordingBackendSIONlib::open_files_() WrappedThreadException* we = nullptr; // This code is executed in a parallel region (opened above)! - const size_t t = kernel().vp_manager.get_thread_id(); - const size_t task = kernel().vp_manager.thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); if ( not task ) { - t_start_ = kernel().simulation_manager.get_time().get_ms(); + t_start_ = kernel::manager< SimulationManager >().get_time().get_ms(); } // set n_rec counters to zero in every device on every thread @@ -197,7 +197,7 @@ nest::RecordingBackendSIONlib::open_files_() std::string filename = build_filename_(); std::ifstream test( filename.c_str() ); - if ( test.good() & not kernel().io_manager.overwrite_files() ) + if ( test.good() & not kernel::manager< IOManager >().overwrite_files() ) { std::string msg = String::compose( "The device file '%1' exists already and will not be overwritten. " @@ -216,12 +216,12 @@ nest::RecordingBackendSIONlib::open_files_() #endif /* BG_MULTIFILE */ sion_int32 fs_block_size = -1; sion_int64 sion_chunksize = P_.sion_chunksize_; - int rank = kernel().mpi_manager.get_rank(); + int rank = kernel::manager< MPIManager >().get_rank(); file.sid = sion_paropen_ompi( filename.c_str(), P_.sion_collective_ ? "bw,cmerge,collsize=-1" : "bw", &n_files, - kernel().mpi_manager.get_communicator(), + kernel::manager< MPIManager >().get_communicator(), &local_comm, &sion_chunksize, &fs_block_size, @@ -271,8 +271,8 @@ nest::RecordingBackendSIONlib::close_files_() #pragma omp parallel { - const size_t t = kernel().vp_manager.get_thread_id(); - const size_t task = kernel().vp_manager.thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); assert( ( files_.find( task ) != files_.end() ) and "initialize() was not called before calling cleanup()" ); @@ -305,7 +305,8 @@ nest::RecordingBackendSIONlib::close_files_() // accumulate number of recorded data points over all ranks unsigned long n_rec_total = 0; - MPI_Reduce( &n_rec, &n_rec_total, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, kernel().mpi_manager.get_communicator() ); + MPI_Reduce( + &n_rec, &n_rec_total, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, kernel::manager< MPIManager >().get_communicator() ); assert( sizeof( unsigned long ) <= sizeof( sion_uint64 ) ); it->second.info.n_rec = static_cast< sion_uint64 >( n_rec_total ); } @@ -326,7 +327,7 @@ nest::RecordingBackendSIONlib::close_files_() sion_int64 info_pos; } data_end = { info_blk, info_pos }; - double t_end = kernel().simulation_manager.get_time().get_ms(); + double t_end = kernel::manager< SimulationManager >().get_time().get_ms(); double resolution = Time::get_resolution().get_ms(); sion_fwrite( &t_start_, sizeof( double ), 1, file.sid ); @@ -519,12 +520,12 @@ const std::string nest::RecordingBackendSIONlib::build_filename_() const { std::ostringstream basename; - const std::string& path = kernel().io_manager.get_data_path(); + const std::string& path = kernel::manager< IOManager >().get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel().io_manager.get_data_prefix(); + basename << kernel::manager< IOManager >().get_data_prefix(); return basename.str() + P_.filename_; } @@ -674,8 +675,8 @@ nest::RecordingBackendSIONlib::post_step_hook() return; } - const size_t t = kernel().vp_manager.get_thread_id(); - const size_t task = kernel().vp_manager.thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); FileEntry& file = files_[ task ]; SIONBuffer& buffer = file.buffer; diff --git a/nestkernel/recording_device.cpp b/nestkernel/recording_device.cpp index c827d4da90..0a2dd8ba3b 100644 --- a/nestkernel/recording_device.cpp +++ b/nestkernel/recording_device.cpp @@ -46,7 +46,7 @@ nest::RecordingDevice::RecordingDevice( const RecordingDevice& rd ) void nest::RecordingDevice::set_initialized_() { - kernel().io_manager.enroll_recorder( P_.record_to_, *this, backend_params_ ); + kernel::manager< IOManager >().enroll_recorder( P_.record_to_, *this, backend_params_ ); } void @@ -54,7 +54,8 @@ nest::RecordingDevice::pre_run_hook( const std::vector< Name >& double_value_nam const std::vector< Name >& long_value_names ) { Device::pre_run_hook(); - kernel().io_manager.set_recording_value_names( P_.record_to_, *this, double_value_names, long_value_names ); + kernel::manager< IOManager >().set_recording_value_names( + P_.record_to_, *this, double_value_names, long_value_names ); } const std::string& @@ -84,7 +85,7 @@ nest::RecordingDevice::Parameters_::set( const DictionaryDatum& d ) std::string record_to; if ( updateValue< std::string >( d, names::record_to, record_to ) ) { - if ( not kernel().io_manager.is_valid_recording_backend( record_to ) ) + if ( not kernel::manager< IOManager >().is_valid_recording_backend( record_to ) ) { std::string msg = String::compose( "Unknown recording backend '%1'", record_to ); throw BadProperty( msg ); @@ -125,7 +126,7 @@ nest::RecordingDevice::State_::set( const DictionaryDatum& d ) void nest::RecordingDevice::set_status( const DictionaryDatum& d ) { - if ( kernel().simulation_manager.has_been_prepared() ) + if ( kernel::manager< SimulationManager >().has_been_prepared() ) { throw BadProperty( "Recorder parameters cannot be changed while inside a Prepare/Run/Cleanup context." ); } @@ -151,7 +152,7 @@ nest::RecordingDevice::set_status( const DictionaryDatum& d ) } } - kernel().io_manager.check_recording_backend_device_status( ptmp.record_to_, backend_params ); + kernel::manager< IOManager >().check_recording_backend_device_status( ptmp.record_to_, backend_params ); // cache all properties accessed by the backend in private member backend_params_->clear(); @@ -166,7 +167,7 @@ nest::RecordingDevice::set_status( const DictionaryDatum& d ) } else { - kernel().io_manager.enroll_recorder( ptmp.record_to_, *this, d ); + kernel::manager< IOManager >().enroll_recorder( ptmp.record_to_, *this, d ); } // if we get here, temporaries contain consistent set of properties @@ -187,7 +188,7 @@ nest::RecordingDevice::get_status( DictionaryDatum& d ) const if ( get_node_id() == 0 ) // this is a model prototype, not an actual instance { // first get the defaults from the backend - kernel().io_manager.get_recording_backend_device_defaults( P_.record_to_, d ); + kernel::manager< IOManager >().get_recording_backend_device_defaults( P_.record_to_, d ); // then overwrite with cached parameters for ( auto kv_pair = backend_params_->begin(); kv_pair != backend_params_->end(); ++kv_pair ) @@ -197,7 +198,7 @@ nest::RecordingDevice::get_status( DictionaryDatum& d ) const } else { - kernel().io_manager.get_recording_backend_device_status( P_.record_to_, *this, d ); + kernel::manager< IOManager >().get_recording_backend_device_status( P_.record_to_, *this, d ); } } @@ -214,6 +215,6 @@ nest::RecordingDevice::write( const Event& event, const std::vector< double >& double_values, const std::vector< long >& long_values ) { - kernel().io_manager.write( P_.record_to_, *this, event, double_values, long_values ); + kernel::manager< IOManager >().write( P_.record_to_, *this, event, double_values, long_values ); S_.n_events_++; } diff --git a/nestkernel/ring_buffer.cpp b/nestkernel/ring_buffer.cpp index 1c0275ed2e..b4b8cba504 100644 --- a/nestkernel/ring_buffer.cpp +++ b/nestkernel/ring_buffer.cpp @@ -24,14 +24,17 @@ #include "connection_manager.h" nest::RingBuffer::RingBuffer() - : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), 0.0 ) + : buffer_( + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), + 0.0 ) { } void nest::RingBuffer::resize() { - size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); + size_t size = + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); @@ -48,14 +51,17 @@ nest::RingBuffer::clear() nest::MultRBuffer::MultRBuffer() - : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), 0.0 ) + : buffer_( + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), + 0.0 ) { } void nest::MultRBuffer::resize() { - size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); + size_t size = + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); @@ -71,14 +77,16 @@ nest::MultRBuffer::clear() nest::ListRingBuffer::ListRingBuffer() - : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay() ) + : buffer_( + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay() ) { } void nest::ListRingBuffer::resize() { - size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); + size_t size = + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); diff --git a/nestkernel/ring_buffer.h b/nestkernel/ring_buffer.h index e4bbf70ddf..96c104a8e7 100644 --- a/nestkernel/ring_buffer.h +++ b/nestkernel/ring_buffer.h @@ -171,7 +171,7 @@ inline double RingBuffer::get_value( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel().connection_manager.get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -185,7 +185,7 @@ inline double RingBuffer::get_value_wfr_update( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel().connection_manager.get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -197,7 +197,7 @@ RingBuffer::get_value_wfr_update( const long offs ) inline size_t RingBuffer::get_index_( const long d ) const { - const long idx = kernel().event_delivery_manager.get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); assert( 0 <= idx ); assert( static_cast< size_t >( idx ) < buffer_.size() ); return idx; @@ -267,7 +267,7 @@ inline double MultRBuffer::get_value( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel().connection_manager.get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -280,7 +280,7 @@ MultRBuffer::get_value( const long offs ) inline size_t MultRBuffer::get_index_( const long d ) const { - const long idx = kernel().event_delivery_manager.get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); assert( 0 <= idx and static_cast< size_t >( idx ) < buffer_.size() ); return idx; } @@ -348,7 +348,7 @@ inline std::list< double >& ListRingBuffer::get_list( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel().connection_manager.get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -359,7 +359,7 @@ ListRingBuffer::get_list( const long offs ) inline size_t ListRingBuffer::get_index_( const long d ) const { - const long idx = kernel().event_delivery_manager.get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); assert( 0 <= idx ); assert( static_cast< size_t >( idx ) < buffer_.size() ); return idx; @@ -425,7 +425,8 @@ MultiChannelInputBuffer< num_channels >::size() const template < unsigned int num_channels > MultiChannelInputBuffer< num_channels >::MultiChannelInputBuffer() - : buffer_( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(), + : buffer_( + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), std::array< double, num_channels >() ) { } @@ -434,7 +435,8 @@ template < unsigned int num_channels > void MultiChannelInputBuffer< num_channels >::resize() { - const size_t size = kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay(); + const size_t size = + kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size, std::array< double, num_channels >() ); diff --git a/nestkernel/secondary_event.h b/nestkernel/secondary_event.h index 746e8bc0e4..b6163fb5db 100644 --- a/nestkernel/secondary_event.h +++ b/nestkernel/secondary_event.h @@ -476,7 +476,7 @@ template < typename DataType, typename Subclass > void DataSecondaryEvent< DataType, Subclass >::add_syn_id( const synindex synid ) { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); // This is done during connection model cloning, which happens thread-parallel. // To not risk trashing the set data structure, we let only master register the @@ -493,7 +493,7 @@ template < typename DataType, typename Subclass > void DataSecondaryEvent< DataType, Subclass >::set_coeff_length( const size_t coeff_length ) { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); coeff_length_ = coeff_length; } diff --git a/nestkernel/send_buffer_position.cpp b/nestkernel/send_buffer_position.cpp index 3e652aa618..4c76e59d21 100644 --- a/nestkernel/send_buffer_position.cpp +++ b/nestkernel/send_buffer_position.cpp @@ -26,12 +26,12 @@ #include "send_buffer_position.h" nest::SendBufferPosition::SendBufferPosition() - : begin_( kernel().mpi_manager.get_num_processes(), 0 ) - , end_( kernel().mpi_manager.get_num_processes(), 0 ) - , idx_( kernel().mpi_manager.get_num_processes(), 0 ) + : begin_( kernel::manager< MPIManager >().get_num_processes(), 0 ) + , end_( kernel::manager< MPIManager >().get_num_processes(), 0 ) + , idx_( kernel::manager< MPIManager >().get_num_processes(), 0 ) { - const size_t num_procs = kernel().mpi_manager.get_num_processes(); - const size_t send_recv_count_per_rank = kernel().mpi_manager.get_send_recv_count_spike_data_per_rank(); + const size_t num_procs = kernel::manager< MPIManager >().get_num_processes(); + const size_t send_recv_count_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); for ( size_t rank = 0; rank < num_procs; ++rank ) { diff --git a/nestkernel/simulation_manager.cpp b/nestkernel/simulation_manager.cpp index 3ec93ffc11..eb14b15959 100644 --- a/nestkernel/simulation_manager.cpp +++ b/nestkernel/simulation_manager.cpp @@ -177,7 +177,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) from_step_ = 0; slice_ = 0; // clear all old spikes - kernel().event_delivery_manager.configure_spike_data_buffers(); + kernel::manager< EventDeliveryManager >().configure_spike_data_buffers(); } } @@ -194,7 +194,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) if ( tics_per_ms_updated or res_updated ) { std::vector< std::string > errors; - if ( kernel().node_manager.size() > 0 ) + if ( kernel::manager< NodeManager >().size() > 0 ) { errors.push_back( "Nodes have already been created" ); } @@ -202,7 +202,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) { errors.push_back( "Network has been simulated" ); } - if ( kernel().model_manager.are_model_defaults_modified() ) + if ( kernel::manager< ModelManager >().are_model_defaults_modified() ) { errors.push_back( "Model defaults were modified" ); } @@ -241,8 +241,8 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) // adjust to new resolution clock_.calibrate(); // adjust delays in the connection system to new resolution - kernel().connection_manager.calibrate( time_converter ); - kernel().model_manager.calibrate( time_converter ); + kernel::manager< ConnectionManager >().calibrate( time_converter ); + kernel::manager< ModelManager >().calibrate( time_converter ); std::string msg = String::compose( "Tics per ms and resolution changed from %1 tics and %2 ms to %3 tics and %4 ms.", @@ -278,8 +278,8 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) Time::set_resolution( resd ); clock_.calibrate(); // adjust to new resolution // adjust delays in the connection system to new resolution - kernel().connection_manager.calibrate( time_converter ); - kernel().model_manager.calibrate( time_converter ); + kernel::manager< ConnectionManager >().calibrate( time_converter ); + kernel::manager< ModelManager >().calibrate( time_converter ); std::string msg = String::compose( "Temporal resolution changed from %1 to %2 ms.", old_res, resd ); LOG( M_INFO, "SimulationManager::set_status", msg ); @@ -305,7 +305,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) bool wfr; if ( updateValue< bool >( d, names::use_wfr, wfr ) ) { - if ( kernel().node_manager.size() > 0 ) + if ( kernel::manager< NodeManager >().size() > 0 ) { LOG( M_ERROR, "SimulationManager::set_status", @@ -339,7 +339,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) "relaxation is disabled. Set use_wfr to true first." ); throw KernelException(); } - else if ( kernel().connection_manager.get_num_connections() != 0 ) + else if ( kernel::manager< ConnectionManager >().get_num_connections() != 0 ) { LOG( M_ERROR, "SimulationManager::set_status", @@ -507,7 +507,7 @@ nest::SimulationManager::get_status( DictionaryDatum& d ) void nest::SimulationManager::prepare() { - assert( kernel().is_initialized() ); + assert( kernel::manager< KernelManager >().is_initialized() ); if ( prepared_ ) { @@ -528,7 +528,7 @@ nest::SimulationManager::prepare() // reset profiling timers reset_timers_for_dynamics(); - kernel().event_delivery_manager.reset_timers_for_dynamics(); + kernel::manager< EventDeliveryManager >().reset_timers_for_dynamics(); t_real_ = 0; t_slice_begin_ = timeval(); // set to timeval{0, 0} as unset flag @@ -536,38 +536,39 @@ nest::SimulationManager::prepare() // find shortest and longest delay across all MPI processes // this call sets the member variables - kernel().connection_manager.update_delay_extrema_(); - kernel().event_delivery_manager.init_moduli(); + kernel::manager< ConnectionManager >().update_delay_extrema_(); + kernel::manager< EventDeliveryManager >().init_moduli(); // if at the beginning of a simulation, set up spike buffers if ( not simulated_ ) { - kernel().event_delivery_manager.configure_spike_data_buffers(); + kernel::manager< EventDeliveryManager >().configure_spike_data_buffers(); } - kernel().node_manager.ensure_valid_thread_local_ids(); - kernel().node_manager.prepare_nodes(); + kernel::manager< NodeManager >().ensure_valid_thread_local_ids(); + kernel::manager< NodeManager >().prepare_nodes(); // we have to do enter_runtime after prepare_nodes, since we use // calibrate to map the ports of MUSIC devices, which has to be done // before enter_runtime if ( not simulated_ ) // only enter the runtime mode once { - double tick = Time::get_resolution().get_ms() * kernel().connection_manager.get_min_delay(); - kernel().music_manager.enter_runtime( tick ); + double tick = Time::get_resolution().get_ms() * kernel::manager< ConnectionManager >().get_min_delay(); + kernel::manager< MUSICManager >().enter_runtime( tick ); } prepared_ = true; // check whether waveform relaxation is used on any MPI process; // needs to be called before update_connection_intrastructure_since // it resizes coefficient arrays for secondary events - kernel().node_manager.check_wfr_use(); + kernel::manager< NodeManager >().check_wfr_use(); - if ( kernel().node_manager.have_nodes_changed() or kernel().connection_manager.connections_have_changed() ) + if ( kernel::manager< NodeManager >().have_nodes_changed() + or kernel::manager< ConnectionManager >().connections_have_changed() ) { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); update_connection_infrastructure( tid ); } // of omp parallel } @@ -619,7 +620,7 @@ nest::SimulationManager::run( Time const& t ) { assert_valid_simtime( t ); - kernel().random_manager.check_rng_synchrony(); + kernel::manager< RandomManager >().check_rng_synchrony(); if ( not prepared_ ) { @@ -636,10 +637,10 @@ nest::SimulationManager::run( Time const& t ) return; } - kernel().io_manager.pre_run_hook(); + kernel::manager< IOManager >().pre_run_hook(); // Reset local spike counters within event_delivery_manager - kernel().event_delivery_manager.reset_counters(); + kernel::manager< EventDeliveryManager >().reset_counters(); sw_simulate_.start(); @@ -647,14 +648,14 @@ nest::SimulationManager::run( Time const& t ) // of a simulation, it has been reset properly elsewhere. If // a simulation was ended and is now continued, from_step_ will // have the proper value. to_step_ is set as in advance_time(). - to_step_ = std::min( from_step_ + to_do_, kernel().connection_manager.get_min_delay() ); + to_step_ = std::min( from_step_ + to_do_, kernel::manager< ConnectionManager >().get_min_delay() ); // Warn about possible inconsistencies, see #504. // This test cannot come any earlier, because we first need to compute // min_delay_ // above. - if ( t.get_steps() % kernel().connection_manager.get_min_delay() != 0 ) + if ( t.get_steps() % kernel::manager< ConnectionManager >().get_min_delay() != 0 ) { LOG( M_WARNING, "SimulationManager::run", @@ -668,8 +669,8 @@ nest::SimulationManager::run( Time const& t ) call_update_(); - kernel().io_manager.post_run_hook(); - kernel().random_manager.check_rng_synchrony(); + kernel::manager< IOManager >().post_run_hook(); + kernel::manager< RandomManager >().check_rng_synchrony(); sw_simulate_.stop(); } @@ -690,30 +691,30 @@ nest::SimulationManager::cleanup() return; } - kernel().node_manager.finalize_nodes(); + kernel::manager< NodeManager >().finalize_nodes(); prepared_ = false; } void nest::SimulationManager::call_update_() { - assert( kernel().is_initialized() and not inconsistent_state_ ); + assert( kernel::manager< KernelManager >().is_initialized() and not inconsistent_state_ ); std::ostringstream os; double t_sim = to_do_ * Time::get_resolution().get_ms(); - size_t num_active_nodes = kernel().node_manager.get_num_active_nodes(); + size_t num_active_nodes = kernel::manager< NodeManager >().get_num_active_nodes(); os << "Number of local nodes: " << num_active_nodes << std::endl; os << "Simulation time (ms): " << t_sim; #ifdef _OPENMP - os << std::endl << "Number of OpenMP threads: " << kernel().vp_manager.get_num_threads(); + os << std::endl << "Number of OpenMP threads: " << kernel::manager< VPManager >().get_num_threads(); #else os << std::endl << "Not using OpenMP"; #endif #ifdef HAVE_MPI - os << std::endl << "Number of MPI processes: " << kernel().mpi_manager.get_num_processes(); + os << std::endl << "Number of MPI processes: " << kernel::manager< MPIManager >().get_num_processes(); #else os << std::endl << "Not using MPI"; #endif @@ -745,7 +746,7 @@ nest::SimulationManager::call_update_() std::cout << std::endl; } - kernel().mpi_manager.synchronize(); + kernel::manager< MPIManager >().synchronize(); LOG( M_INFO, "SimulationManager::run", "Simulation finished." ); } @@ -759,10 +760,10 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) sw_communicate_prepare_.start(); - kernel().connection_manager.sort_connections( tid ); + kernel::manager< ConnectionManager >().sort_connections( tid ); sw_gather_target_data_.start(); - kernel().connection_manager.restructure_connection_tables( tid ); - kernel().connection_manager.collect_compressed_spike_data( tid ); + kernel::manager< ConnectionManager >().restructure_connection_tables( tid ); + kernel::manager< ConnectionManager >().collect_compressed_spike_data( tid ); sw_gather_target_data_.stop(); get_omp_synchronization_construction_stopwatch().start(); @@ -771,22 +772,22 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) #pragma omp single { - kernel().connection_manager.compute_target_data_buffer_size(); - kernel().event_delivery_manager.resize_send_recv_buffers_target_data(); + kernel::manager< ConnectionManager >().compute_target_data_buffer_size(); + kernel::manager< EventDeliveryManager >().resize_send_recv_buffers_target_data(); // check whether primary and secondary connections exists on any // compute node - kernel().connection_manager.sync_has_primary_connections(); - kernel().connection_manager.check_secondary_connections_exist(); + kernel::manager< ConnectionManager >().sync_has_primary_connections(); + kernel::manager< ConnectionManager >().check_secondary_connections_exist(); } - if ( kernel().connection_manager.secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) { get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier get_omp_synchronization_construction_stopwatch().stop(); - kernel().connection_manager.compute_compressed_secondary_recv_buffer_positions( tid ); + kernel::manager< ConnectionManager >().compute_compressed_secondary_recv_buffer_positions( tid ); get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier @@ -794,8 +795,8 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) #pragma omp single { - kernel().mpi_manager.communicate_recv_counts_secondary_events(); - kernel().event_delivery_manager.configure_secondary_buffers(); + kernel::manager< MPIManager >().communicate_recv_counts_secondary_events(); + kernel::manager< EventDeliveryManager >().configure_secondary_buffers(); } } @@ -803,25 +804,25 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) // communicate connection information from postsynaptic to // presynaptic side - if ( kernel().connection_manager.use_compressed_spikes() ) + if ( kernel::manager< ConnectionManager >().use_compressed_spikes() ) { #pragma omp barrier #pragma omp single { - kernel().connection_manager.initialize_iteration_state(); // could possibly be combined with s'th above + kernel::manager< ConnectionManager >().initialize_iteration_state(); // could possibly be combined with s'th above } - kernel().event_delivery_manager.gather_target_data_compressed( tid ); + kernel::manager< EventDeliveryManager >().gather_target_data_compressed( tid ); } else { - kernel().event_delivery_manager.gather_target_data( tid ); + kernel::manager< EventDeliveryManager >().gather_target_data( tid ); } sw_gather_target_data_.stop(); - if ( kernel().connection_manager.secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) { - kernel().connection_manager.compress_secondary_send_buffer_pos( tid ); + kernel::manager< ConnectionManager >().compress_secondary_send_buffer_pos( tid ); } get_omp_synchronization_construction_stopwatch().start(); @@ -829,9 +830,9 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { - kernel().connection_manager.clear_compressed_spike_data_map(); - kernel().node_manager.set_have_nodes_changed( false ); - kernel().connection_manager.unset_connections_have_changed(); + kernel::manager< ConnectionManager >().clear_compressed_spike_data_map(); + kernel::manager< NodeManager >().set_have_nodes_changed( false ); + kernel::manager< ConnectionManager >().unset_connections_have_changed(); } sw_communicate_prepare_.stop(); } @@ -853,12 +854,13 @@ nest::SimulationManager::update_() double start_current_update = sw_simulate_.elapsed(); bool update_time_limit_exceeded = false; - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( + kernel::manager< VPManager >().get_num_threads() ); // parallel section begins #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); // We update in a parallel region. Therefore, we need to catch // exceptions here and then handle them after the parallel region. @@ -881,18 +883,18 @@ nest::SimulationManager::update_() // reach target neurons before spikes are propagated through eprop synapses. // This sequence safeguards the gradient computation from missing critical information // from the time step preceding the arrival of the spike triggering the weight update. - if ( kernel().connection_manager.secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) { sw_deliver_secondary_data_.start(); - kernel().event_delivery_manager.deliver_secondary_events( tid, false ); + kernel::manager< EventDeliveryManager >().deliver_secondary_events( tid, false ); sw_deliver_secondary_data_.stop(); } - if ( kernel().connection_manager.has_primary_connections() ) + if ( kernel::manager< ConnectionManager >().has_primary_connections() ) { sw_deliver_spike_data_.start(); // Deliver spikes from receive buffer to ring buffers. - kernel().event_delivery_manager.deliver_events( tid ); + kernel::manager< EventDeliveryManager >().deliver_events( tid ); sw_deliver_spike_data_.stop(); } @@ -917,11 +919,11 @@ nest::SimulationManager::update_() // MUSIC *before* MUSIC time is advanced if ( slice_ > 0 ) { - kernel().music_manager.advance_music_time(); + kernel::manager< MUSICManager >().advance_music_time(); } // the following could be made thread-safe - kernel().music_manager.update_music_event_handlers( clock_, from_step_, to_step_ ); + kernel::manager< MUSICManager >().update_music_event_handlers( clock_, from_step_, to_step_ ); } // end of master section, all threads have to synchronize at this point #pragma omp barrier @@ -931,7 +933,8 @@ nest::SimulationManager::update_() // preliminary update of nodes that use waveform relaxtion, only // necessary if secondary connections exist and any node uses // wfr - if ( kernel().connection_manager.secondary_connections_exist() and kernel().node_manager.wfr_is_used() ) + if ( kernel::manager< ConnectionManager >().secondary_connections_exist() + and kernel::manager< NodeManager >().wfr_is_used() ) { #pragma omp single { @@ -942,14 +945,15 @@ nest::SimulationManager::update_() // needs to be done in omp single since to_step_ is a scheduler // variable old_to_step = to_step_; - if ( to_step_ < kernel().connection_manager.get_min_delay() ) + if ( to_step_ < kernel::manager< ConnectionManager >().get_min_delay() ) { - to_step_ = kernel().connection_manager.get_min_delay(); + to_step_ = kernel::manager< ConnectionManager >().get_min_delay(); } } bool max_iterations_reached = true; - const std::vector< Node* >& thread_local_wfr_nodes = kernel().node_manager.get_wfr_nodes_on_thread( tid ); + const std::vector< Node* >& thread_local_wfr_nodes = + kernel::manager< NodeManager >().get_wfr_nodes_on_thread( tid ); for ( long n = 0; n < wfr_max_iterations_; ++n ) { bool done_p = true; @@ -984,7 +988,7 @@ nest::SimulationManager::update_() } // gather SecondaryEvents (e.g. GapJunctionEvents) - kernel().event_delivery_manager.gather_secondary_events( done_all ); + kernel::manager< EventDeliveryManager >().gather_secondary_events( done_all ); // reset done and done_all //(needs to be in the single threaded part) @@ -994,7 +998,7 @@ nest::SimulationManager::update_() // deliver SecondaryEvents generated during wfr_update // returns the done value over all threads - done_p = kernel().event_delivery_manager.deliver_secondary_events( tid, true ); + done_p = kernel::manager< EventDeliveryManager >().deliver_secondary_events( tid, true ); if ( done_p ) { @@ -1018,14 +1022,14 @@ nest::SimulationManager::update_() } // of if(wfr_is_used) // end of preliminary update - if ( kernel().sp_manager.is_structural_plasticity_enabled() + if ( kernel::manager< SPManager >().is_structural_plasticity_enabled() and ( std::fmod( Time( Time::step( clock_.get_steps() + from_step_ ) ).get_ms(), - kernel().sp_manager.get_structural_plasticity_update_interval() ) + kernel::manager< SPManager >().get_structural_plasticity_update_interval() ) == 0 ) ) { #pragma omp barrier - for ( SparseNodeArray::const_iterator i = kernel().node_manager.get_local_nodes( tid ).begin(); - i != kernel().node_manager.get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1036,11 +1040,11 @@ nest::SimulationManager::update_() get_omp_synchronization_simulation_stopwatch().stop(); #pragma omp single { - kernel().sp_manager.update_structural_plasticity(); + kernel::manager< SPManager >().update_structural_plasticity(); } // Remove 10% of the vacant elements - for ( SparseNodeArray::const_iterator i = kernel().node_manager.get_local_nodes( tid ).begin(); - i != kernel().node_manager.get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1056,7 +1060,7 @@ nest::SimulationManager::update_() } // of structural plasticity sw_update_.start(); - const SparseNodeArray& thread_local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& thread_local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); for ( SparseNodeArray::const_iterator n = thread_local_nodes.begin(); n != thread_local_nodes.end(); ++n ) { @@ -1079,18 +1083,18 @@ nest::SimulationManager::update_() #pragma omp master { // gather and deliver only at end of slice, i.e., end of min_delay step - if ( to_step_ == kernel().connection_manager.get_min_delay() ) + if ( to_step_ == kernel::manager< ConnectionManager >().get_min_delay() ) { - if ( kernel().connection_manager.has_primary_connections() ) + if ( kernel::manager< ConnectionManager >().has_primary_connections() ) { sw_gather_spike_data_.start(); - kernel().event_delivery_manager.gather_spike_data(); + kernel::manager< EventDeliveryManager >().gather_spike_data(); sw_gather_spike_data_.stop(); } - if ( kernel().connection_manager.secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) { sw_gather_secondary_data_.start(); - kernel().event_delivery_manager.gather_secondary_events( true ); + kernel::manager< EventDeliveryManager >().gather_secondary_events( true ); sw_gather_secondary_data_.stop(); } } @@ -1116,7 +1120,7 @@ nest::SimulationManager::update_() // if block to avoid omp barrier if SIONLIB is not used #ifdef HAVE_SIONLIB - kernel().io_manager.post_step_hook(); + kernel::manager< IOManager >().post_step_hook(); // enforce synchronization after post-step activities of the recording backends get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier @@ -1134,8 +1138,8 @@ nest::SimulationManager::update_() } while ( to_do_ > 0 and not update_time_limit_exceeded and not exceptions_raised.at( tid ) ); // End of the slice, we update the number of synaptic elements - for ( SparseNodeArray::const_iterator i = kernel().node_manager.get_local_nodes( tid ).begin(); - i != kernel().node_manager.get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1156,7 +1160,7 @@ nest::SimulationManager::update_() } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -1174,11 +1178,11 @@ nest::SimulationManager::advance_time_() to_do_ -= to_step_ - from_step_; // advance clock, update modulos, slice counter only if slice completed - if ( to_step_ == kernel().connection_manager.get_min_delay() ) + if ( to_step_ == kernel::manager< ConnectionManager >().get_min_delay() ) { - clock_ += Time::step( kernel().connection_manager.get_min_delay() ); + clock_ += Time::step( kernel::manager< ConnectionManager >().get_min_delay() ); ++slice_; - kernel().event_delivery_manager.update_moduli(); + kernel::manager< EventDeliveryManager >().update_moduli(); from_step_ = 0; } else @@ -1188,17 +1192,17 @@ nest::SimulationManager::advance_time_() long end_sim = from_step_ + to_do_; - if ( kernel().connection_manager.get_min_delay() < end_sim ) + if ( kernel::manager< ConnectionManager >().get_min_delay() < end_sim ) { // update to end of time slice - to_step_ = kernel().connection_manager.get_min_delay(); + to_step_ = kernel::manager< ConnectionManager >().get_min_delay(); } else { to_step_ = end_sim; // update to end of simulation time } - assert( to_step_ - from_step_ <= kernel().connection_manager.get_min_delay() ); + assert( to_step_ - from_step_ <= kernel::manager< ConnectionManager >().get_min_delay() ); } void @@ -1231,5 +1235,5 @@ nest::SimulationManager::print_progress_() nest::Time const nest::SimulationManager::get_previous_slice_origin() const { - return clock_ - Time::step( kernel().connection_manager.get_min_delay() ); + return clock_ - Time::step( kernel::manager< ConnectionManager >().get_min_delay() ); } diff --git a/nestkernel/slice_ring_buffer.cpp b/nestkernel/slice_ring_buffer.cpp index 6a6c4563ac..7ed103b5d9 100644 --- a/nestkernel/slice_ring_buffer.cpp +++ b/nestkernel/slice_ring_buffer.cpp @@ -38,9 +38,10 @@ nest::SliceRingBuffer::SliceRingBuffer() void nest::SliceRingBuffer::resize() { - long newsize = static_cast< long >( std::ceil( - static_cast< double >( kernel().connection_manager.get_min_delay() + kernel().connection_manager.get_max_delay() ) - / kernel().connection_manager.get_min_delay() ) ); + long newsize = + static_cast< long >( std::ceil( static_cast< double >( kernel::manager< ConnectionManager >().get_min_delay() + + kernel::manager< ConnectionManager >().get_max_delay() ) + / kernel::manager< ConnectionManager >().get_min_delay() ) ); if ( queue_.size() != static_cast< unsigned long >( newsize ) ) { queue_.resize( newsize ); @@ -69,7 +70,7 @@ void nest::SliceRingBuffer::prepare_delivery() { // vector to deliver from in this slice - deliver_ = &( queue_[ kernel().event_delivery_manager.get_slice_modulo( 0 ) ] ); + deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >().get_slice_modulo( 0 ) ] ); // sort events, first event last std::sort( deliver_->begin(), deliver_->end(), std::greater< SpikeInfo >() ); @@ -79,7 +80,7 @@ void nest::SliceRingBuffer::discard_events() { // vector to deliver from in this slice - deliver_ = &( queue_[ kernel().event_delivery_manager.get_slice_modulo( 0 ) ] ); + deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >().get_slice_modulo( 0 ) ] ); deliver_->clear(); } diff --git a/nestkernel/slice_ring_buffer.h b/nestkernel/slice_ring_buffer.h index e14bfe9926..9576e57301 100644 --- a/nestkernel/slice_ring_buffer.h +++ b/nestkernel/slice_ring_buffer.h @@ -158,7 +158,7 @@ class SliceRingBuffer inline void SliceRingBuffer::add_spike( const long rel_delivery, const long stamp, const double ps_offset, const double weight ) { - const long idx = kernel().event_delivery_manager.get_slice_modulo( rel_delivery ); + const long idx = kernel::manager< EventDeliveryManager >().get_slice_modulo( rel_delivery ); assert( static_cast< size_t >( idx ) < queue_.size() ); assert( ps_offset >= 0 ); diff --git a/nestkernel/sonata_connector.cpp b/nestkernel/sonata_connector.cpp index 59b7583c66..c6b2c27cd6 100644 --- a/nestkernel/sonata_connector.cpp +++ b/nestkernel/sonata_connector.cpp @@ -401,7 +401,8 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off read_subset_( delay_dset_, delay_data_subset, H5::PredType::NATIVE_DOUBLE, hyperslab_size, offset ); } - std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( kernel().vp_manager.get_num_threads() ); + std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( + kernel::manager< VPManager >().get_num_threads() ); // Retrieve the correct NodeCollections const auto nest_nodes = getValue< DictionaryDatum >( graph_specs_->lookup( "nodes" ) ); @@ -438,7 +439,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off #pragma omp parallel { - const auto tid = kernel().vp_manager.get_thread_id(); + const auto tid = kernel::manager< VPManager >().get_thread_id(); RngPtr rng = get_vp_specific_rng( tid ); try @@ -450,7 +451,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off const auto sonata_tgt_id = tgt_node_id_data_subset[ i ]; const size_t tnode_id = ( *( tnode_begin + sonata_tgt_id ) ).node_id; - if ( not kernel().vp_manager.is_node_id_vp_local( tnode_id ) ) + if ( not kernel::manager< VPManager >().is_node_id_vp_local( tnode_id ) ) { continue; } @@ -458,7 +459,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off const auto sonata_src_id = src_node_id_data_subset[ i ]; const size_t snode_id = ( *( snode_begin + sonata_src_id ) ).node_id; - Node* target = kernel().node_manager.get_node_or_proxy( tnode_id, tid ); + Node* target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); const auto edge_type_id = edge_type_id_data_subset[ i ]; @@ -469,7 +470,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off get_synapse_params_( snode_id, *target, target_thread, rng, edge_type_id ); - kernel().connection_manager.connect( snode_id, + kernel::manager< ConnectionManager >().connect( snode_id, target, target_thread, edge_type_id_2_syn_model_.at( edge_type_id ), @@ -489,7 +490,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off } // end parallel region // Check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -571,7 +572,7 @@ SonataConnector::create_edge_type_id_2_syn_spec_( DictionaryDatum edge_params ) const auto syn_name = getValue< std::string >( ( *d )[ "synapse_model" ] ); // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel().model_manager.get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); set_synapse_params_( d, synapse_model_id, type_id ); edge_type_id_2_syn_model_[ type_id ] = synapse_model_id; @@ -581,7 +582,7 @@ SonataConnector::create_edge_type_id_2_syn_spec_( DictionaryDatum edge_params ) void SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_model_id, int type_id ) { - DictionaryDatum syn_defaults = kernel().model_manager.get_connector_defaults( synapse_model_id ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); ConnParameterMap synapse_params; for ( Dictionary::const_iterator default_it = syn_defaults->begin(); default_it != syn_defaults->end(); ++default_it ) @@ -596,13 +597,13 @@ SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_m { synapse_params[ param_name ] = std::shared_ptr< ConnParameter >( - ConnParameter::create( ( *syn_dict )[ param_name ], kernel().vp_manager.get_num_threads() ) ); + ConnParameter::create( ( *syn_dict )[ param_name ], kernel::manager< VPManager >().get_num_threads() ) ); } } // Now create dictionary with dummy values that we will use to pass settings to the synapses created. We // create it here once to avoid re-creating the object over and over again. - edge_type_id_2_param_dicts_[ type_id ].resize( kernel().vp_manager.get_num_threads(), nullptr ); + edge_type_id_2_param_dicts_[ type_id ].resize( kernel::manager< VPManager >().get_num_threads(), nullptr ); edge_type_id_2_syn_spec_[ type_id ] = synapse_params; // TODO: Once NEST is SLIless, the below loop over threads should be parallelizable. In order to parallelize, the @@ -610,7 +611,7 @@ SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_m // region. Currently, creation of NumericDatum objects is not thread-safe because sli::pool memory is a static // member variable; thus is also the new operator a static member function. // Note that this also applies to the equivalent loop in conn_builder.cpp - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { edge_type_id_2_param_dicts_[ type_id ][ tid ] = new Dictionary; diff --git a/nestkernel/source_table.cpp b/nestkernel/source_table.cpp index e12349785e..38f6da87b5 100644 --- a/nestkernel/source_table.cpp +++ b/nestkernel/source_table.cpp @@ -41,7 +41,7 @@ void nest::SourceTable::initialize() { assert( sizeof( Source ) == 8 ); - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); sources_.resize( num_threads ); is_cleared_.initialize( num_threads, false ); saved_entry_point_.initialize( num_threads, false ); @@ -51,7 +51,7 @@ nest::SourceTable::initialize() #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); sources_.at( tid ).resize( 0 ); resize_sources(); compressible_sources_.at( tid ).resize( 0 ); @@ -93,7 +93,7 @@ nest::SourceTablePosition nest::SourceTable::find_maximal_position() const { SourceTablePosition max_position( -1, -1, -1 ); - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { if ( max_position < saved_positions_[ tid ] ) { @@ -152,7 +152,7 @@ nest::SourceTable::clean( const size_t tid ) size_t nest::SourceTable::get_node_id( const size_t tid, const synindex syn_id, const size_t lcid ) const { - if ( not kernel().connection_manager.get_keep_source_table() ) + if ( not kernel::manager< ConnectionManager >().get_keep_source_table() ) { throw KernelException( "Cannot use SourceTable::get_node_id when get_keep_source_table is false" ); } @@ -211,7 +211,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t // targets on the same process, but different threads for ( size_t syn_id = 0; syn_id < sources_[ tid ].size(); ++syn_id ) { - const ConnectorModel& conn_model = kernel().model_manager.get_connection_model( syn_id, tid ); + const ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); if ( not is_primary ) @@ -227,23 +227,25 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t } } } - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel().simulation_manager.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { // compute receive buffer positions for all unique pairs of source // node ID and synapse-type id on this MPI rank - std::vector< int > recv_counts_secondary_events_in_int_per_rank( kernel().mpi_manager.get_num_processes(), 0 ); + std::vector< int > recv_counts_secondary_events_in_int_per_rank( + kernel::manager< MPIManager >().get_num_processes(), 0 ); for ( std::set< std::pair< size_t, size_t > >::const_iterator cit = ( *unique_secondary_source_node_id_syn_id ).begin(); cit != ( *unique_secondary_source_node_id_syn_id ).end(); ++cit ) { - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( cit->first ); - const size_t event_size = kernel().model_manager.get_secondary_event_prototype( cit->second, tid ).size(); + const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( cit->first ); + const size_t event_size = + kernel::manager< ModelManager >().get_secondary_event_prototype( cit->second, tid ).size(); buffer_pos_of_source_node_id_syn_id.insert( std::make_pair( pack_source_node_id_and_syn_id( cit->first, cit->second ), @@ -259,7 +261,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t ++recv_count; } - kernel().mpi_manager.set_recv_counts_secondary_events_in_int_per_rank( + kernel::manager< MPIManager >().set_recv_counts_secondary_events_in_int_per_rank( recv_counts_secondary_events_in_int_per_rank ); delete unique_secondary_source_node_id_syn_id; } // of omp single @@ -268,8 +270,9 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t void nest::SourceTable::resize_sources() { - kernel().vp_manager.assert_thread_parallel(); - sources_.at( kernel().vp_manager.get_thread_id() ).resize( kernel().model_manager.get_num_connection_models() ); + kernel::manager< VPManager >().assert_thread_parallel(); + sources_.at( kernel::manager< VPManager >().get_thread_id() ) + .resize( kernel::manager< ModelManager >().get_num_connection_models() ); } bool @@ -277,7 +280,7 @@ nest::SourceTable::source_should_be_processed_( const size_t rank_start, const size_t rank_end, const Source& source ) const { - const size_t source_rank = kernel().mpi_manager.get_process_id_of_node_id( source.get_node_id() ); + const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source.get_node_id() ); return not( source.is_processed() or source.is_disabled() @@ -318,13 +321,14 @@ nest::SourceTable::populate_target_data_fields_( const SourceTablePosition& curr const size_t source_rank, TargetData& next_target_data ) const { - assert( not kernel().connection_manager.use_compressed_spikes() ); // handled elsewhere + assert( not kernel::manager< ConnectionManager >().use_compressed_spikes() ); // handled elsewhere const auto node_id = current_source.get_node_id(); // set values of next_target_data - next_target_data.set_source_lid( kernel().vp_manager.node_id_to_lid( node_id ) ); - next_target_data.set_source_tid( kernel().vp_manager.vp_to_thread( kernel().vp_manager.node_id_to_vp( node_id ) ) ); + next_target_data.set_source_lid( kernel::manager< VPManager >().node_id_to_lid( node_id ) ); + next_target_data.set_source_tid( + kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node_id ) ) ); next_target_data.reset_marker(); if ( current_source.is_primary() ) // primary connection, i.e., chemical synapses @@ -345,9 +349,9 @@ nest::SourceTable::populate_target_data_fields_( const SourceTablePosition& curr // the source rank will write to the buffer position relative to // the first position from the absolute position in the receive // buffer - const size_t relative_recv_buffer_pos = kernel().connection_manager.get_secondary_recv_buffer_position( + const size_t relative_recv_buffer_pos = kernel::manager< ConnectionManager >().get_secondary_recv_buffer_position( current_position.tid, current_position.syn_id, current_position.lcid ) - - kernel().mpi_manager.get_recv_displacement_secondary_events_in_int( source_rank ); + - kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); SecondaryTargetDataFields& secondary_fields = next_target_data.secondary_data; secondary_fields.set_recv_buffer_pos( relative_recv_buffer_pos ); @@ -392,7 +396,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // we need to set a marker stating whether the entry following this // entry, if existent, has the same source - kernel().connection_manager.set_source_has_more_targets( current_position.tid, + kernel::manager< ConnectionManager >().set_source_has_more_targets( current_position.tid, current_position.syn_id, current_position.lcid, next_entry_has_same_source_( current_position, current_source ) ); @@ -409,7 +413,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // communicated via MPI, so we prepare to return the relevant data // set the source rank - source_rank = kernel().mpi_manager.get_process_id_of_node_id( current_source.get_node_id() ); + source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( current_source.get_node_id() ); if ( not populate_target_data_fields_( current_position, current_source, source_rank, next_target_data ) ) { @@ -432,7 +436,7 @@ nest::SourceTable::resize_compressible_sources() { compressible_sources_[ tid ].clear(); compressible_sources_[ tid ].resize( - kernel().model_manager.get_num_connection_models(), std::map< size_t, SpikeData >() ); + kernel::manager< ModelManager >().get_num_connection_models(), std::map< size_t, SpikeData >() ); } } @@ -455,13 +459,13 @@ nest::SourceTable::collect_compressible_sources( const size_t tid ) ++lcid; while ( ( lcid < syn_sources.size() ) and ( syn_sources[ lcid ].get_node_id() == old_source_node_id ) ) { - kernel().connection_manager.set_source_has_more_targets( tid, syn_id, lcid - 1, true ); + kernel::manager< ConnectionManager >().set_source_has_more_targets( tid, syn_id, lcid - 1, true ); ++lcid; } // Mark last connection in sequence as not having successor. This is essential if connections are // delete, e.g., by structural plasticity, because we do not globally reset the more_targets flag. assert( lcid - 1 < syn_sources.size() ); - kernel().connection_manager.set_source_has_more_targets( tid, syn_id, lcid - 1, false ); + kernel::manager< ConnectionManager >().set_source_has_more_targets( tid, syn_id, lcid - 1, false ); } } } @@ -474,11 +478,11 @@ nest::SourceTable::dump_sources() const { for ( size_t lcid = 0; lcid < sources_[ tid ][ syn_id ].size(); ++lcid ) { - kernel().write_to_dump( String::compose( "src : r%1 t%2 s%3 tg%4 l%5 tt%6", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + kernel::manager< KernelManager >().write_to_dump( String::compose( "src : r%1 t%2 s%3 tg%4 l%5 tt%6", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), sources_[ tid ][ syn_id ][ lcid ].get_node_id(), - kernel().connection_manager.get_target_node_id( tid, syn_id, lcid ), + kernel::manager< ConnectionManager >().get_target_node_id( tid, syn_id, lcid ), lcid, tid ) ); } @@ -495,9 +499,9 @@ nest::SourceTable::dump_compressible_sources() const { for ( const auto& entry : compressible_sources_[ tid ][ syn_id ] ) { - kernel().write_to_dump( String::compose( "csrc : r%1 t%2 s%3 l%4 tt%5", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + kernel::manager< KernelManager >().write_to_dump( String::compose( "csrc : r%1 t%2 s%3 l%4 tt%5", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), entry.first, entry.second.get_lcid(), entry.second.get_tid() ) ); @@ -510,7 +514,7 @@ void nest::SourceTable::fill_compressed_spike_data( std::vector< std::vector< std::vector< SpikeData > > >& compressed_spike_data ) { - const size_t num_synapse_models = kernel().model_manager.get_num_connection_models(); + const size_t num_synapse_models = kernel::manager< ModelManager >().get_num_connection_models(); compressed_spike_data.clear(); compressed_spike_data.resize( num_synapse_models ); compressed_spike_data_map_.clear(); @@ -523,7 +527,7 @@ nest::SourceTable::fill_compressed_spike_data( // TODO: I believe that at this point compressible_sources_ is ordered by source gid. // Maybe one can exploit that to avoid searching with find() below. - for ( synindex syn_id = 0; syn_id < kernel().model_manager.get_num_connection_models(); ++syn_id ) + for ( synindex syn_id = 0; syn_id < kernel::manager< ModelManager >().get_num_connection_models(); ++syn_id ) { for ( size_t target_thread = 0; target_thread < static_cast< size_t >( compressible_sources_.size() ); ++target_thread ) @@ -537,7 +541,7 @@ nest::SourceTable::fill_compressed_spike_data( // Set up entry for new source const auto new_source_index = compressed_spike_data[ syn_id ].size(); - compressed_spike_data[ syn_id ].emplace_back( kernel().vp_manager.get_num_threads(), + compressed_spike_data[ syn_id ].emplace_back( kernel::manager< VPManager >().get_num_threads(), SpikeData( invalid_targetindex, invalid_synindex, invalid_lcid, 0 ) ); compressed_spike_data_map_[ syn_id ].insert( @@ -567,9 +571,9 @@ nest::SourceTable::dump_compressed_spike_data( : compressed_spike_data_map_ ) { for ( const auto& entry : tab ) { - kernel().write_to_dump( String::compose( "csdm : r%1 t%2 s%3 sx%4 tt%5", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + kernel::manager< KernelManager >().write_to_dump( String::compose( "csdm : r%1 t%2 s%3 sx%4 tt%5", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), entry.first, entry.second.get_source_index(), entry.second.get_target_thread() ) ); @@ -582,9 +586,9 @@ nest::SourceTable::dump_compressed_spike_data( { for ( size_t tx = 0; tx < tab[ six ].size(); ++tx ) { - kernel().write_to_dump( String::compose( "csd : r%1 t%2 six%3 tx%4 l%5 tt%6", - kernel().mpi_manager.get_rank(), - kernel().vp_manager.get_thread_id(), + kernel::manager< KernelManager >().write_to_dump( String::compose( "csd : r%1 t%2 six%3 tx%4 l%5 tt%6", + kernel::manager< MPIManager >().get_rank(), + kernel::manager< VPManager >().get_thread_id(), six, tx, tab[ six ][ tx ].get_lcid(), diff --git a/nestkernel/sp_manager.cpp b/nestkernel/sp_manager.cpp index 294fb6ec0c..8522fe7fa7 100644 --- a/nestkernel/sp_manager.cpp +++ b/nestkernel/sp_manager.cpp @@ -101,7 +101,8 @@ SPManager::get_status( DictionaryDatum& d ) sp_synapse = DictionaryDatum( new Dictionary() ); def< std::string >( sp_synapse, names::pre_synaptic_element, ( *i )->get_pre_synaptic_element_name() ); def< std::string >( sp_synapse, names::post_synaptic_element, ( *i )->get_post_synaptic_element_name() ); - const std::string model = kernel().model_manager.get_connection_model( ( *i )->get_synapse_model(), 0 ).get_name(); + const std::string model = + kernel::manager< ModelManager >().get_connection_model( ( *i )->get_synapse_model(), 0 ).get_name(); def< std::string >( sp_synapse, names::synapse_model, model ); def< bool >( sp_synapse, names::allow_autapses, ( *i )->allows_autapses() ); def< bool >( sp_synapse, names::allow_multapses, ( *i )->allows_multapses() ); @@ -162,7 +163,8 @@ SPManager::set_status( const DictionaryDatum& d ) // check that the user defined the min and max delay properly, if the // default delay is not used. - if ( not conn_builder->get_default_delay() and not kernel().connection_manager.get_user_set_delay_extrema() ) + if ( not conn_builder->get_default_delay() + and not kernel::manager< ConnectionManager >().get_user_set_delay_extrema() ) { throw BadProperty( "Structural Plasticity: to use different delays for synapses you must " @@ -203,11 +205,11 @@ SPManager::builder_max_delay() const void SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread, const size_t syn_id ) { - Node* const source = kernel().node_manager.get_node_or_proxy( snode_id ); + Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); // normal nodes and devices with proxies if ( target->has_proxies() ) { - kernel().connection_manager.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } else if ( target->local_receiver() ) // normal devices { @@ -218,10 +220,10 @@ SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread if ( ( source->get_thread() != target_thread ) and ( source->has_proxies() ) ) { target_thread = source->get_thread(); - target = kernel().node_manager.get_node_or_proxy( target->get_node_id(), target_thread ); + target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), target_thread ); } - kernel().connection_manager.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } else // globally receiving devices iterate over all target threads { @@ -230,12 +232,12 @@ SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread { return; } - const size_t n_threads = kernel().vp_manager.get_num_threads(); + const size_t n_threads = kernel::manager< VPManager >().get_num_threads(); for ( size_t t = 0; t < n_threads; t++ ) { - target = kernel().node_manager.get_node_or_proxy( target->get_node_id(), t ); + target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), t ); target_thread = target->get_thread(); - kernel().connection_manager.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } } } @@ -246,12 +248,12 @@ SPManager::disconnect( NodeCollectionPTR sources, DictionaryDatum& conn_spec, DictionaryDatum& syn_spec ) { - if ( kernel().connection_manager.connections_have_changed() ) + if ( kernel::manager< ConnectionManager >().connections_have_changed() ) { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); - kernel().simulation_manager.update_connection_infrastructure( tid ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + kernel::manager< SimulationManager >().update_connection_infrastructure( tid ); } } @@ -265,7 +267,7 @@ SPManager::disconnect( NodeCollectionPTR sources, } const std::string rule_name = ( *conn_spec )[ names::rule ]; - if ( not kernel().connection_manager.valid_connection_rule( rule_name ) ) + if ( not kernel::manager< ConnectionManager >().valid_connection_rule( rule_name ) ) { throw BadProperty( "Unknown connectivity rule: " + rule_name ); } @@ -276,9 +278,9 @@ SPManager::disconnect( NodeCollectionPTR sources, for ( std::vector< SPBuilder* >::const_iterator i = sp_conn_builders_.begin(); i != sp_conn_builders_.end(); i++ ) { std::string synModel = getValue< std::string >( syn_spec, names::synapse_model ); - if ( ( *i )->get_synapse_model() == kernel().model_manager.get_synapse_model_id( synModel ) ) + if ( ( *i )->get_synapse_model() == kernel::manager< ModelManager >().get_synapse_model_id( synModel ) ) { - cb = kernel().connection_manager.get_conn_builder( rule_name, + cb = kernel::manager< ConnectionManager >().get_conn_builder( rule_name, sources, targets, /* third_out */ nullptr, @@ -291,7 +293,7 @@ SPManager::disconnect( NodeCollectionPTR sources, } else { - cb = kernel().connection_manager.get_conn_builder( rule_name, + cb = kernel::manager< ConnectionManager >().get_conn_builder( rule_name, sources, targets, /* third_out */ nullptr, @@ -305,7 +307,7 @@ SPManager::disconnect( NodeCollectionPTR sources, ALL_ENTRIES_ACCESSED( *syn_spec, "Connect", "Unread dictionary entries: " ); // Set flag before calling cb->disconnect() in case exception is thrown after some connections have been removed. - kernel().connection_manager.set_connections_have_changed(); + kernel::manager< ConnectionManager >().set_connections_have_changed(); cb->disconnect(); delete cb; @@ -347,8 +349,8 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) sp_builder->get_pre_synaptic_element_name(), pre_vacant_id, pre_vacant_n, pre_deleted_id, pre_deleted_n ); // Communicate the number of deleted pre-synaptic elements - kernel().mpi_manager.communicate( pre_deleted_id, pre_deleted_id_global, displacements ); - kernel().mpi_manager.communicate( pre_deleted_n, pre_deleted_n_global, displacements ); + kernel::manager< MPIManager >().communicate( pre_deleted_id, pre_deleted_id_global, displacements ); + kernel::manager< MPIManager >().communicate( pre_deleted_n, pre_deleted_n_global, displacements ); if ( pre_deleted_id_global.size() > 0 ) { @@ -365,8 +367,8 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) get_synaptic_elements( sp_builder->get_post_synaptic_element_name(), post_vacant_id, post_vacant_n, post_deleted_id, post_deleted_n ); // Communicate the number of deleted postsynaptic elements - kernel().mpi_manager.communicate( post_deleted_id, post_deleted_id_global, displacements ); - kernel().mpi_manager.communicate( post_deleted_n, post_deleted_n_global, displacements ); + kernel::manager< MPIManager >().communicate( post_deleted_id, post_deleted_id_global, displacements ); + kernel::manager< MPIManager >().communicate( post_deleted_n, post_deleted_n_global, displacements ); if ( post_deleted_id_global.size() > 0 ) { @@ -382,10 +384,10 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) } // Communicate vacant elements - kernel().mpi_manager.communicate( pre_vacant_id, pre_vacant_id_global, displacements ); - kernel().mpi_manager.communicate( pre_vacant_n, pre_vacant_n_global, displacements ); - kernel().mpi_manager.communicate( post_vacant_id, post_vacant_id_global, displacements ); - kernel().mpi_manager.communicate( post_vacant_n, post_vacant_n_global, displacements ); + kernel::manager< MPIManager >().communicate( pre_vacant_id, pre_vacant_id_global, displacements ); + kernel::manager< MPIManager >().communicate( pre_vacant_n, pre_vacant_n_global, displacements ); + kernel::manager< MPIManager >().communicate( post_vacant_id, post_vacant_id_global, displacements ); + kernel::manager< MPIManager >().communicate( post_vacant_n, post_vacant_n_global, displacements ); bool synapses_created = false; if ( pre_vacant_id_global.size() > 0 and post_vacant_id_global.size() > 0 ) @@ -395,7 +397,7 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) } if ( synapses_created or post_deleted_id.size() > 0 or pre_deleted_id.size() > 0 ) { - kernel().connection_manager.set_connections_have_changed(); + kernel::manager< ConnectionManager >().set_connections_have_changed(); } } @@ -455,7 +457,7 @@ SPManager::delete_synapses_from_pre( const std::vector< size_t >& pre_deleted_id std::vector< size_t >::const_iterator id_it; std::vector< int >::iterator n_it; - kernel().connection_manager.get_targets( pre_deleted_id, synapse_model, se_post_name, connectivity ); + kernel::manager< ConnectionManager >().get_targets( pre_deleted_id, synapse_model, se_post_name, connectivity ); id_it = pre_deleted_id.begin(); n_it = pre_deleted_n.begin(); @@ -463,7 +465,7 @@ SPManager::delete_synapses_from_pre( const std::vector< size_t >& pre_deleted_id for ( ; id_it != pre_deleted_id.end() and n_it != pre_deleted_n.end(); id_it++, n_it++, connectivity_it++ ) { // Communicate the list of targets - kernel().mpi_manager.communicate( *connectivity_it, global_targets, displacements ); + kernel::manager< MPIManager >().communicate( *connectivity_it, global_targets, displacements ); // shuffle only the first n items, n is the number of deleted synaptic // elements if ( -( *n_it ) > static_cast< int >( global_targets.size() ) ) @@ -487,10 +489,10 @@ SPManager::delete_synapse( const size_t snode_id, const std::string se_post_name ) { // get thread id - const size_t tid = kernel().vp_manager.get_thread_id(); - if ( kernel().node_manager.is_local_node_id( snode_id ) ) + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + if ( kernel::manager< NodeManager >().is_local_node_id( snode_id ) ) { - Node* const source = kernel().node_manager.get_node_or_proxy( snode_id ); + Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); const size_t source_thread = source->get_thread(); if ( tid == source_thread ) { @@ -498,13 +500,13 @@ SPManager::delete_synapse( const size_t snode_id, } } - if ( kernel().node_manager.is_local_node_id( tnode_id ) ) + if ( kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) { - Node* const target = kernel().node_manager.get_node_or_proxy( tnode_id ); + Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id ); const size_t target_thread = target->get_thread(); if ( tid == target_thread ) { - kernel().connection_manager.disconnect( tid, syn_id, snode_id, tnode_id ); + kernel::manager< ConnectionManager >().disconnect( tid, syn_id, snode_id, tnode_id ); target->connect_synaptic_element( se_post_name, -1 ); } @@ -533,7 +535,7 @@ SPManager::delete_synapses_from_post( std::vector< size_t >& post_deleted_id, std::vector< int >::iterator n_it; // Retrieve the connected sources - kernel().connection_manager.get_sources( post_deleted_id, synapse_model, connectivity ); + kernel::manager< ConnectionManager >().get_sources( post_deleted_id, synapse_model, connectivity ); id_it = post_deleted_id.begin(); n_it = post_deleted_n.begin(); @@ -542,7 +544,7 @@ SPManager::delete_synapses_from_post( std::vector< size_t >& post_deleted_id, for ( ; id_it != post_deleted_id.end() and n_it != post_deleted_n.end(); id_it++, n_it++, connectivity_it++ ) { // Communicate the list of sources - kernel().mpi_manager.communicate( *connectivity_it, global_sources, displacements ); + kernel::manager< MPIManager >().communicate( *connectivity_it, global_sources, displacements ); // shuffle only the first n items, n is the number of deleted synaptic // elements if ( -( *n_it ) > static_cast< int >( global_sources.size() ) ) @@ -570,7 +572,7 @@ nest::SPManager::get_synaptic_elements( std::string se_name, size_t n_deleted_id = 0; size_t node_id; int n; - size_t n_nodes = kernel().node_manager.size(); + size_t n_nodes = kernel::manager< NodeManager >().size(); se_vacant_id.clear(); se_vacant_n.clear(); se_deleted_id.clear(); @@ -587,9 +589,9 @@ nest::SPManager::get_synaptic_elements( std::string se_name, std::vector< int >::iterator deleted_n_it = se_deleted_n.begin(); SparseNodeArray::const_iterator node_it; - for ( size_t tid = 0; tid < kernel().vp_manager.get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) { - const SparseNodeArray& local_nodes = kernel().node_manager.get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); SparseNodeArray::const_iterator node_it; for ( node_it = local_nodes.begin(); node_it < local_nodes.end(); node_it++ ) { @@ -672,17 +674,17 @@ nest::SPManager::global_shuffle( std::vector< size_t >& v, size_t n ) void nest::SPManager::enable_structural_plasticity() { - if ( kernel().vp_manager.get_num_threads() > 1 ) + if ( kernel::manager< VPManager >().get_num_threads() > 1 ) { throw KernelException( "Structural plasticity can not be used with multiple threads" ); } - if ( not kernel().connection_manager.get_keep_source_table() ) + if ( not kernel::manager< ConnectionManager >().get_keep_source_table() ) { throw KernelException( "Structural plasticity can not be enabled if keep_source_table has been " "set to false." ); } - if ( not kernel().connection_manager.use_compressed_spikes() ) + if ( not kernel::manager< ConnectionManager >().use_compressed_spikes() ) { throw KernelException( "Structural plasticity can not be enabled if use_compressed_spikes " diff --git a/nestkernel/sparse_node_array.cpp b/nestkernel/sparse_node_array.cpp index b2385a8471..1b8a459793 100644 --- a/nestkernel/sparse_node_array.cpp +++ b/nestkernel/sparse_node_array.cpp @@ -86,7 +86,8 @@ nest::SparseNodeArray::add_local_node( Node& node ) left_side_has_proxies_ = node.has_proxies(); // we now know which scale applies on which side of the split - const double proxy_scale = 1.0 / static_cast< double >( kernel().vp_manager.get_num_virtual_processes() ); + const double proxy_scale = + 1.0 / static_cast< double >( kernel::manager< VPManager >().get_num_virtual_processes() ); if ( left_side_has_proxies_ ) { left_scale_ = proxy_scale; diff --git a/nestkernel/spatial.cpp b/nestkernel/spatial.cpp index f776ba2216..4dd27c6fbe 100644 --- a/nestkernel/spatial.cpp +++ b/nestkernel/spatial.cpp @@ -92,7 +92,7 @@ get_position( NodeCollectionPTR layer_nc ) { size_t node_id = ( *it ).node_id; - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "GetPosition is currently implemented for local nodes only." ); } @@ -109,12 +109,12 @@ get_position( NodeCollectionPTR layer_nc ) std::vector< double > get_position( const size_t node_id ) { - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "GetPosition is currently implemented for local nodes only." ); } - NodeCollectionPTR nc = kernel().node_manager.node_id_to_node_collection( node_id ); + NodeCollectionPTR nc = kernel::manager< NodeManager >().node_id_to_node_collection( node_id ); NodeCollectionMetadataPTR meta = nc->get_metadata(); if ( not meta ) @@ -149,7 +149,7 @@ displacement( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) if ( layer_from_nc->size() == 1 ) { size_t node_id = layer_from_nc->operator[]( 0 ); - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -168,7 +168,7 @@ displacement( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) for ( NodeCollection::const_iterator it = layer_from_nc->begin(); it < layer_from_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -203,7 +203,7 @@ displacement( NodeCollectionPTR layer_nc, const ArrayDatum point ) for ( NodeCollection::const_iterator it = layer_nc->begin(); it != layer_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -241,7 +241,7 @@ distance( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) if ( layer_from_nc->size() == 1 ) { size_t node_id = layer_from_nc->operator[]( 0 ); - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -260,7 +260,7 @@ distance( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) for ( NodeCollection::const_iterator it = layer_from_nc->begin(); it < layer_from_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -295,7 +295,7 @@ distance( NodeCollectionPTR layer_nc, const ArrayDatum point ) for ( NodeCollection::const_iterator it = layer_nc->begin(); it < layer_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel().node_manager.is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -333,13 +333,13 @@ distance( const ArrayDatum conns ) size_t trgt = conn_id.get_target_node_id(); - if ( not kernel().node_manager.is_local_node_id( trgt ) ) + if ( not kernel::manager< NodeManager >().is_local_node_id( trgt ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } - NodeCollectionPTR trgt_nc = kernel().node_manager.node_id_to_node_collection( trgt ); + NodeCollectionPTR trgt_nc = kernel::manager< NodeManager >().node_id_to_node_collection( trgt ); NodeCollectionMetadataPTR meta = trgt_nc->get_metadata(); // distance is NaN if source, target is not spatially distributed @@ -406,7 +406,7 @@ connect_layers( NodeCollectionPTR source_nc, NodeCollectionPTR target_nc, const ALL_ENTRIES_ACCESSED( *connection_dict, "nest::CreateLayers", "Unread dictionary entries: " ); // Set flag before calling source->connect() in case exception is thrown after some connections have been created. - kernel().connection_manager.set_connections_have_changed(); + kernel::manager< ConnectionManager >().set_connections_have_changed(); source->connect( source_nc, target, target_nc, connector ); } diff --git a/nestkernel/stimulation_backend_mpi.cpp b/nestkernel/stimulation_backend_mpi.cpp index 14c1ace62f..7fd4469198 100644 --- a/nestkernel/stimulation_backend_mpi.cpp +++ b/nestkernel/stimulation_backend_mpi.cpp @@ -49,7 +49,7 @@ nest::StimulationBackendMPI::~StimulationBackendMPI() noexcept void nest::StimulationBackendMPI::initialize() { - auto nthreads = kernel().vp_manager.get_num_threads(); + auto nthreads = kernel::manager< VPManager >().get_num_threads(); device_map devices( nthreads ); devices_.swap( devices ); } @@ -117,7 +117,7 @@ nest::StimulationBackendMPI::prepare() } // need to be run only by the master thread : it is the case because this part is not running in parallel - size_t thread_id_master = kernel().vp_manager.get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); // Create the connection with MPI // 1) take all the ports of the connections. Get port and update the list of device only for master for ( auto& it_device : devices_[ thread_id_master ] ) @@ -132,7 +132,7 @@ nest::StimulationBackendMPI::prepare() // it's not a new communicator comm = std::get< 0 >( comm_it->second ); // add the id of the device if there are a connection with the device. - if ( kernel().connection_manager.get_device_connected( + if ( kernel::manager< ConnectionManager >().get_device_connected( thread_id_master, it_device.second.second->get_local_device_id() ) ) { std::get< 1 >( comm_it->second )->push_back( it_device.second.second->get_node_id() ); @@ -146,10 +146,12 @@ nest::StimulationBackendMPI::prepare() // This is because the management of threads here is using MPI_THREAD_FUNNELED (see mpi_manager.cpp:119). comm = new MPI_Comm; auto vector_id_device = new std::vector< int >; // vector of ID device for the rank - int* vector_nb_device_th { new int[ kernel().vp_manager.get_num_threads() ] {} }; // number of device by thread - std::fill_n( vector_nb_device_th, kernel().vp_manager.get_num_threads(), 0 ); + int* vector_nb_device_th { + new int[ kernel::manager< VPManager >().get_num_threads() ] {} + }; // number of device by thread + std::fill_n( vector_nb_device_th, kernel::manager< VPManager >().get_num_threads(), 0 ); // add the id of the device if there is a connection with the device. - if ( kernel().connection_manager.get_device_connected( + if ( kernel::manager< ConnectionManager >().get_device_connected( thread_id_master, it_device.second.second->get_local_device_id() ) ) { vector_id_device->push_back( it_device.second.second->get_node_id() ); @@ -163,7 +165,7 @@ nest::StimulationBackendMPI::prepare() } // Add the id of device of the other thread in the vector_id_device and update the count of all device - for ( size_t id_thread = 0; id_thread < kernel().vp_manager.get_num_threads(); id_thread++ ) + for ( size_t id_thread = 0; id_thread < kernel::manager< VPManager >().get_num_threads(); id_thread++ ) { // don't do it again for the master thread if ( id_thread != thread_id_master ) @@ -171,7 +173,7 @@ nest::StimulationBackendMPI::prepare() for ( auto& it_device : devices_[ id_thread ] ) { // add the id of the device if there is a connection with the device. - if ( kernel().connection_manager.get_device_connected( + if ( kernel::manager< ConnectionManager >().get_device_connected( id_thread, it_device.second.second->get_local_device_id() ) ) { std::string port_name; @@ -280,7 +282,7 @@ nest::StimulationBackendMPI::cleanup() } // clear map of devices commMap_.clear(); - size_t thread_id_master = kernel().vp_manager.get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); for ( auto& it_device : devices_[ thread_id_master ] ) { it_device.second.first = nullptr; @@ -316,12 +318,12 @@ nest::StimulationBackendMPI::get_port( const size_t index_node, const std::strin // (file contains only one line with name of the port) std::ostringstream basename; // get the path from the kernel - const std::string& path = kernel().io_manager.get_data_path(); + const std::string& path = kernel::manager< IOManager >().get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel().io_manager.get_data_prefix(); + basename << kernel::manager< IOManager >().get_data_prefix(); // add the path from the label of the device if ( not label.empty() ) @@ -384,7 +386,7 @@ nest::StimulationBackendMPI::update_device( int* array_index, if ( data.first[ 0 ] != 0 ) { // if there are some data - size_t thread_id = kernel().vp_manager.get_thread_id(); + size_t thread_id = kernel::manager< VPManager >().get_thread_id(); int index_id_device = 0; // the index for the array of device in the data // get the first id of the device for the current thread // if the thread_id == 0, the index_id_device equals 0 diff --git a/nestkernel/stimulation_device.cpp b/nestkernel/stimulation_device.cpp index 77a7eb70b8..dcdd771cee 100644 --- a/nestkernel/stimulation_device.cpp +++ b/nestkernel/stimulation_device.cpp @@ -79,7 +79,7 @@ nest::StimulationDevice::pre_run_hook() void nest::StimulationDevice::set_initialized_() { - kernel().io_manager.enroll_stimulator( P_.stimulus_source_, *this, backend_params_ ); + kernel::manager< IOManager >().enroll_stimulator( P_.stimulus_source_, *this, backend_params_ ); } const std::string& @@ -111,7 +111,7 @@ nest::StimulationDevice::Parameters_::set( const DictionaryDatum& d ) if ( updateValue< std::string >( d, names::stimulus_source, stimulus_source ) ) { - if ( not kernel().io_manager.is_valid_stimulation_backend( stimulus_source ) ) + if ( not kernel::manager< IOManager >().is_valid_stimulation_backend( stimulus_source ) ) { std::string msg = String::compose( "Unknown input backend '%1'", stimulus_source ); throw BadProperty( msg ); @@ -155,7 +155,7 @@ nest::StimulationDevice::set_status( const DictionaryDatum& d ) } else { - kernel().io_manager.enroll_stimulator( ptmp.stimulus_source_, *this, d ); + kernel::manager< IOManager >().enroll_stimulator( ptmp.stimulus_source_, *this, d ); } // if we get here, temporaries contain consistent set of properties diff --git a/nestkernel/stopwatch.h b/nestkernel/stopwatch.h index 637bb8ad80..aae04768ad 100644 --- a/nestkernel/stopwatch.h +++ b/nestkernel/stopwatch.h @@ -528,10 +528,10 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].start(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].start(); + walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].start(); + cputime_timers_[ kernel::manager< VPManager >().get_thread_id() ].start(); } template < StopwatchGranularity detailed_timer > @@ -541,10 +541,10 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); - cputime_timers_[ kernel().vp_manager.get_thread_id() ].stop(); + walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].stop(); + cputime_timers_[ kernel::manager< VPManager >().get_thread_id() ].stop(); } template < StopwatchGranularity detailed_timer > @@ -554,9 +554,9 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].is_running_(); + return walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].is_running_(); } template < StopwatchGranularity detailed_timer > @@ -567,9 +567,9 @@ Stopwatch< detailed_timer, and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t timeunit ) const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - return walltime_timers_[ kernel().vp_manager.get_thread_id() ].elapsed( timeunit ); + return walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].elapsed( timeunit ); } template < StopwatchGranularity detailed_timer > @@ -581,9 +581,9 @@ Stopwatch< detailed_timer, timers::timeunit_t timeunit, std::ostream& os ) const { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - walltime_timers_[ kernel().vp_manager.get_thread_id() ].print( msg, timeunit, os ); + walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].print( msg, timeunit, os ); } template < StopwatchGranularity detailed_timer > @@ -593,9 +593,9 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() { - kernel().vp_manager.assert_single_threaded(); + kernel::manager< VPManager >().assert_single_threaded(); - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); walltime_timers_.resize( num_threads ); cputime_timers_.resize( num_threads ); for ( size_t i = 0; i < num_threads; ++i ) diff --git a/nestkernel/synaptic_element.cpp b/nestkernel/synaptic_element.cpp index a06e866f4f..7b0e1a730d 100644 --- a/nestkernel/synaptic_element.cpp +++ b/nestkernel/synaptic_element.cpp @@ -55,7 +55,7 @@ nest::SynapticElement::SynapticElement( const SynapticElement& se ) , growth_rate_( se.growth_rate_ ) , tau_vacant_( se.tau_vacant_ ) { - growth_curve_ = kernel().sp_manager.new_growth_curve( se.growth_curve_->get_name() ); + growth_curve_ = kernel::manager< SPManager >().new_growth_curve( se.growth_curve_->get_name() ); assert( growth_curve_ ); DictionaryDatum nc_parameters = DictionaryDatum( new Dictionary ); se.get( nc_parameters ); @@ -68,7 +68,7 @@ nest::SynapticElement::operator=( const SynapticElement& other ) if ( this != &other ) { // 1: allocate new memory and copy the elements - GrowthCurve* new_nc = kernel().sp_manager.new_growth_curve( other.growth_curve_->get_name() ); + GrowthCurve* new_nc = kernel::manager< SPManager >().new_growth_curve( other.growth_curve_->get_name() ); DictionaryDatum nc_parameters = DictionaryDatum( new Dictionary ); other.get( nc_parameters ); @@ -123,7 +123,7 @@ nest::SynapticElement::set( const DictionaryDatum& d ) Name growth_curve_name( getValue< std::string >( d, names::growth_curve ) ); if ( not growth_curve_->is( growth_curve_name ) ) { - growth_curve_ = kernel().sp_manager.new_growth_curve( growth_curve_name ); + growth_curve_ = kernel::manager< SPManager >().new_growth_curve( growth_curve_name ); } } growth_curve_->set( d ); diff --git a/nestkernel/target_identifier.h b/nestkernel/target_identifier.h index 94f9bbb88c..a7edf4234c 100644 --- a/nestkernel/target_identifier.h +++ b/nestkernel/target_identifier.h @@ -140,7 +140,7 @@ class TargetIdentifierIndex get_target_ptr( const size_t tid ) const { assert( target_ != invalid_targetindex ); - return kernel().node_manager.thread_lid_to_node( tid, target_ ); + return kernel::manager< NodeManager >().thread_lid_to_node( tid, target_ ); } size_t @@ -170,7 +170,7 @@ class TargetIdentifierIndex inline void TargetIdentifierIndex::set_target( Node* target ) { - kernel().node_manager.ensure_valid_thread_local_ids(); + kernel::manager< NodeManager >().ensure_valid_thread_local_ids(); size_t target_lid = target->get_thread_lid(); if ( target_lid > max_targetindex ) diff --git a/nestkernel/target_table.cpp b/nestkernel/target_table.cpp index 10af1c991e..4fc30fdafc 100644 --- a/nestkernel/target_table.cpp +++ b/nestkernel/target_table.cpp @@ -33,13 +33,13 @@ void nest::TargetTable::initialize() { - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); targets_.resize( num_threads ); secondary_send_buffer_pos_.resize( num_threads ); #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); targets_[ tid ] = std::vector< std::vector< Target > >(); secondary_send_buffer_pos_[ tid ] = std::vector< std::vector< std::vector< size_t > > >(); } // of omp parallel @@ -57,7 +57,7 @@ nest::TargetTable::prepare( const size_t tid ) { // add one to max_num_local_nodes to avoid possible overflow in case // of rounding errors - const size_t num_local_nodes = kernel().node_manager.get_max_num_local_nodes() + 1; + const size_t num_local_nodes = kernel::manager< NodeManager >().get_max_num_local_nodes() + 1; targets_[ tid ].resize( num_local_nodes ); @@ -66,7 +66,7 @@ nest::TargetTable::prepare( const size_t tid ) for ( size_t lid = 0; lid < num_local_nodes; ++lid ) { // resize to maximal possible synapse-type index - secondary_send_buffer_pos_[ tid ][ lid ].resize( kernel().model_manager.get_num_connection_models() ); + secondary_send_buffer_pos_[ tid ][ lid ].resize( kernel::manager< ModelManager >().get_num_connection_models() ); } } @@ -104,7 +104,7 @@ nest::TargetTable::add_target( const size_t tid, const size_t target_rank, const { const SecondaryTargetDataFields& secondary_fields = target_data.secondary_data; const size_t send_buffer_pos = secondary_fields.get_recv_buffer_pos() - + kernel().mpi_manager.get_send_displacement_secondary_events_in_int( target_rank ); + + kernel::manager< MPIManager >().get_send_displacement_secondary_events_in_int( target_rank ); const synindex syn_id = secondary_fields.get_syn_id(); assert( syn_id < secondary_send_buffer_pos_[ tid ][ lid ].size() ); diff --git a/nestkernel/target_table_devices.cpp b/nestkernel/target_table_devices.cpp index c162239f00..4aadb09a37 100644 --- a/nestkernel/target_table_devices.cpp +++ b/nestkernel/target_table_devices.cpp @@ -41,7 +41,7 @@ TargetTableDevices::~TargetTableDevices() void TargetTableDevices::initialize() { - const size_t num_threads = kernel().vp_manager.get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); target_to_devices_.resize( num_threads ); target_from_devices_.resize( num_threads ); sending_devices_node_ids_.resize( num_threads ); @@ -82,28 +82,30 @@ TargetTableDevices::resize_to_number_of_neurons() { #pragma omp parallel { - const size_t tid = kernel().vp_manager.get_thread_id(); - target_to_devices_[ tid ].resize( kernel().node_manager.get_max_num_local_nodes() + 1 ); - target_from_devices_[ tid ].resize( kernel().node_manager.get_num_thread_local_devices( tid ) + 1 ); - sending_devices_node_ids_[ tid ].resize( kernel().node_manager.get_num_thread_local_devices( tid ) + 1 ); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); + target_to_devices_[ tid ].resize( kernel::manager< NodeManager >().get_max_num_local_nodes() + 1 ); + target_from_devices_[ tid ].resize( kernel::manager< NodeManager >().get_num_thread_local_devices( tid ) + 1 ); + sending_devices_node_ids_[ tid ].resize( kernel::manager< NodeManager >().get_num_thread_local_devices( tid ) + 1 ); } // end omp parallel } void TargetTableDevices::resize_to_number_of_synapse_types() { - kernel().vp_manager.assert_thread_parallel(); + kernel::manager< VPManager >().assert_thread_parallel(); - const size_t tid = kernel().vp_manager.get_thread_id(); + const size_t tid = kernel::manager< VPManager >().get_thread_id(); for ( size_t lid = 0; lid < target_to_devices_.at( tid ).size(); ++lid ) { // make sure this device has support for all synapse types - target_to_devices_.at( tid ).at( lid ).resize( kernel().model_manager.get_num_connection_models(), nullptr ); + target_to_devices_.at( tid ).at( lid ).resize( + kernel::manager< ModelManager >().get_num_connection_models(), nullptr ); } for ( size_t ldid = 0; ldid < target_from_devices_.at( tid ).size(); ++ldid ) { // make sure this device has support for all synapse types - target_from_devices_.at( tid ).at( ldid ).resize( kernel().model_manager.get_num_connection_models(), nullptr ); + target_from_devices_.at( tid ).at( ldid ).resize( + kernel::manager< ModelManager >().get_num_connection_models(), nullptr ); } } @@ -117,8 +119,8 @@ TargetTableDevices::get_connections_to_devices_( const size_t requested_source_n { if ( requested_source_node_id != 0 ) { - const size_t lid = kernel().vp_manager.node_id_to_lid( requested_source_node_id ); - if ( kernel().vp_manager.lid_to_node_id( lid ) != requested_source_node_id ) + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( requested_source_node_id ); + if ( kernel::manager< VPManager >().lid_to_node_id( lid ) != requested_source_node_id ) { return; } @@ -143,7 +145,7 @@ TargetTableDevices::get_connections_to_device_for_lid_( const size_t lid, { if ( target_to_devices_[ tid ][ lid ].size() > 0 ) { - const size_t source_node_id = kernel().vp_manager.lid_to_node_id( lid ); + const size_t source_node_id = kernel::manager< VPManager >().lid_to_node_id( lid ); // not the valid connector if ( source_node_id > 0 and target_to_devices_[ tid ][ lid ][ syn_id ] ) { @@ -168,7 +170,7 @@ TargetTableDevices::get_connections_from_devices_( const size_t requested_source const size_t source_node_id = *it; if ( source_node_id > 0 and ( requested_source_node_id == source_node_id or requested_source_node_id == 0 ) ) { - const Node* source = kernel().node_manager.get_node_or_proxy( source_node_id, tid ); + const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); const size_t ldid = source->get_local_device_id(); if ( target_from_devices_[ tid ][ ldid ].size() > 0 ) @@ -210,12 +212,12 @@ TargetTableDevices::add_connection_to_device( Node& source, const double d, const double w ) { - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); assert( lid < target_to_devices_[ tid ].size() ); assert( syn_id < target_to_devices_[ tid ][ lid ].size() ); - kernel() - .model_manager.get_connection_model( syn_id, tid ) + kernel::manager< ModelManager >() + .get_connection_model( syn_id, tid ) .add_connection( source, target, target_to_devices_[ tid ][ lid ], syn_id, p, d, w ); } @@ -233,8 +235,8 @@ TargetTableDevices::add_connection_from_device( Node& source, assert( ldid < target_from_devices_[ tid ].size() ); assert( syn_id < target_from_devices_[ tid ][ ldid ].size() ); - kernel() - .model_manager.get_connection_model( syn_id, tid ) + kernel::manager< ModelManager >() + .get_connection_model( syn_id, tid ) .add_connection( source, target, target_from_devices_[ tid ][ ldid ], syn_id, p, d, w ); // store node ID of sending device diff --git a/nestkernel/target_table_devices.h b/nestkernel/target_table_devices.h index 6d72f42ad3..d8feef6367 100644 --- a/nestkernel/target_table_devices.h +++ b/nestkernel/target_table_devices.h @@ -268,7 +268,7 @@ TargetTableDevices::send_to_device( const size_t tid, Event& e, const std::vector< ConnectorModel* >& cm ) { - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); for ( std::vector< ConnectorBase* >::iterator it = target_to_devices_[ tid ][ lid ].begin(); it != target_to_devices_[ tid ][ lid ].end(); ++it ) @@ -286,7 +286,7 @@ TargetTableDevices::send_to_device( const size_t tid, SecondaryEvent& e, const std::vector< ConnectorModel* >& cm ) { - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); for ( auto& synid : e.get_supported_syn_ids() ) { if ( target_to_devices_[ tid ][ lid ][ synid ] ) @@ -303,7 +303,7 @@ TargetTableDevices::get_synapse_status_to_device( const size_t tid, DictionaryDatum& dict, const size_t lcid ) const { - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) { target_to_devices_[ tid ][ lid ][ syn_id ]->get_synapse_status( tid, lcid, dict ); @@ -318,7 +318,7 @@ TargetTableDevices::set_synapse_status_to_device( const size_t tid, const DictionaryDatum& dict, const size_t lcid ) { - const size_t lid = kernel().vp_manager.node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) { target_to_devices_[ tid ][ lid ][ syn_id ]->set_synapse_status( lcid, dict, cm ); diff --git a/nestkernel/universal_data_logger.h b/nestkernel/universal_data_logger.h index f78bf95be7..4dab40df54 100644 --- a/nestkernel/universal_data_logger.h +++ b/nestkernel/universal_data_logger.h @@ -626,7 +626,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // Next recording step is in current slice or beyond, indicates that // buffer is properly initialized. - if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) + if ( next_rec_step_ >= kernel::manager< SimulationManager >().get_slice_origin().get_steps() ) { return; } @@ -643,14 +643,15 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // left of update intervals, and we want time stamps at right end of // update interval to be multiples of recording interval. Need to add // +1 because the division result is rounded down. - next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + next_rec_step_ = + ( kernel::manager< SimulationManager >().get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; // If offset is not 0, adjust next recording step to account for it by first setting next recording // step to be offset and then iterating until the variable is greater than current simulation time. if ( recording_offset_.get_steps() != 0 ) { next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) + while ( next_rec_step_ <= kernel::manager< SimulationManager >().get_time().get_steps() ) { next_rec_step_ += rec_int_steps_; } @@ -658,7 +659,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // number of data points per slice const long recs_per_slice = static_cast< long >( - std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + std::ceil( kernel::manager< ConnectionManager >().get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); @@ -675,7 +676,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode return; } - const size_t wt = kernel().event_delivery_manager.write_toggle(); + const size_t wt = kernel::manager< EventDeliveryManager >().write_toggle(); assert( wt < next_rec_.size() ); assert( wt < data_.size() ); @@ -723,13 +724,13 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, con assert( data_.size() == 2 ); // get read toggle and start and end of slice - const size_t rt = kernel().event_delivery_manager.read_toggle(); + const size_t rt = kernel::manager< EventDeliveryManager >().read_toggle(); assert( not data_[ rt ].empty() ); // Check if we have valid data, i.e., data with time stamps within the // past time slice. This may not be the case if the node has been frozen. // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) + if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >().get_previous_slice_origin() ) { next_rec_[ rt ] = 0; return; @@ -757,7 +758,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, con reply.set_port( request.get_port() ); // send it off - kernel().event_delivery_manager.send_to_node( reply ); + kernel::manager< EventDeliveryManager >().send_to_node( reply ); } template < typename HostNode > @@ -827,7 +828,7 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // Next recording step is in current slice or beyond, indicates that // buffer is properly initialized. - if ( next_rec_step_ >= kernel().simulation_manager.get_slice_origin().get_steps() ) + if ( next_rec_step_ >= kernel::manager< SimulationManager >().get_slice_origin().get_steps() ) { return; } @@ -844,14 +845,15 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // left of update intervals, and we want time stamps at right end of // update interval to be multiples of recording interval. Need to add // +1 because the division result is rounded down. - next_rec_step_ = ( kernel().simulation_manager.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + next_rec_step_ = + ( kernel::manager< SimulationManager >().get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; // If offset is not 0, adjust next recording step to account for it by first setting next recording // step to be offset and then iterating until the variable is greater than current simulation time. if ( recording_offset_.get_steps() != 0 ) { next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel().simulation_manager.get_time().get_steps() ) + while ( next_rec_step_ <= kernel::manager< SimulationManager >().get_time().get_steps() ) { next_rec_step_ += rec_int_steps_; } @@ -859,7 +861,7 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // number of data points per slice const long recs_per_slice = static_cast< long >( - std::ceil( kernel().connection_manager.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + std::ceil( kernel::manager< ConnectionManager >().get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); @@ -876,7 +878,7 @@ UniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode& host, return; } - const size_t wt = kernel().event_delivery_manager.write_toggle(); + const size_t wt = kernel::manager< EventDeliveryManager >().write_toggle(); assert( wt < next_rec_.size() ); assert( wt < data_.size() ); @@ -925,13 +927,13 @@ UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const Data assert( data_.size() == 2 ); // get read toggle and start and end of slice - const size_t rt = kernel().event_delivery_manager.read_toggle(); + const size_t rt = kernel::manager< EventDeliveryManager >().read_toggle(); assert( not data_[ rt ].empty() ); // Check if we have valid data, i.e., data with time stamps within the // past time slice. This may not be the case if the node has been frozen. // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel().simulation_manager.get_previous_slice_origin() ) + if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >().get_previous_slice_origin() ) { next_rec_[ rt ] = 0; return; @@ -959,7 +961,7 @@ UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const Data reply.set_port( request.get_port() ); // send it off - kernel().event_delivery_manager.send_to_node( reply ); + kernel::manager< EventDeliveryManager >().send_to_node( reply ); } } // namespace nest diff --git a/nestkernel/vp_manager.cpp b/nestkernel/vp_manager.cpp index e9f3b21770..69f09b1a23 100644 --- a/nestkernel/vp_manager.cpp +++ b/nestkernel/vp_manager.cpp @@ -112,11 +112,11 @@ nest::VPManager::set_status( const DictionaryDatum& d ) { if ( not n_threads_updated ) { - n_threads = n_vps / kernel().mpi_manager.get_num_processes(); + n_threads = n_vps / kernel::manager< MPIManager >().get_num_processes(); } - const bool n_threads_conflict = n_vps / kernel().mpi_manager.get_num_processes() != n_threads; - const bool n_procs_conflict = n_vps % kernel().mpi_manager.get_num_processes() != 0; + const bool n_threads_conflict = n_vps / kernel::manager< MPIManager >().get_num_processes() != n_threads; + const bool n_procs_conflict = n_vps % kernel::manager< MPIManager >().get_num_processes() != 0; if ( n_threads_conflict or n_procs_conflict ) { throw BadProperty( @@ -133,23 +133,23 @@ nest::VPManager::set_status( const DictionaryDatum& d ) if ( n_threads_updated or n_vps_updated ) { std::vector< std::string > errors; - if ( kernel().node_manager.size() > 0 ) + if ( kernel::manager< NodeManager >().size() > 0 ) { errors.push_back( "Nodes exist" ); } - if ( kernel().connection_manager.get_user_set_delay_extrema() ) + if ( kernel::manager< ConnectionManager >().get_user_set_delay_extrema() ) { errors.push_back( "Delay extrema have been set" ); } - if ( kernel().simulation_manager.has_been_simulated() ) + if ( kernel::manager< SimulationManager >().has_been_simulated() ) { errors.push_back( "Network has been simulated" ); } - if ( kernel().model_manager.are_model_defaults_modified() ) + if ( kernel::manager< ModelManager >().are_model_defaults_modified() ) { errors.push_back( "Model defaults were modified" ); } - if ( kernel().sp_manager.is_structural_plasticity_enabled() and n_threads > 1 ) + if ( kernel::manager< SPManager >().is_structural_plasticity_enabled() and n_threads > 1 ) { errors.push_back( "Structural plasticity enabled: multithreading cannot be enabled" ); } @@ -175,7 +175,7 @@ nest::VPManager::set_status( const DictionaryDatum& d ) LOG( M_WARNING, "VPManager::set_status()", msg ); } - kernel().change_number_of_threads( n_threads ); + kernel::manager< KernelManager >().change_number_of_threads( n_threads ); } } @@ -189,7 +189,7 @@ nest::VPManager::get_status( DictionaryDatum& d ) void nest::VPManager::set_num_threads( size_t n_threads ) { - assert( not( kernel().sp_manager.is_structural_plasticity_enabled() and n_threads > 1 ) ); + assert( not( kernel::manager< SPManager >().is_structural_plasticity_enabled() and n_threads > 1 ) ); n_threads_ = n_threads; #ifdef _OPENMP diff --git a/nestkernel/vp_manager.h b/nestkernel/vp_manager.h index c4227d2596..b7621eac7a 100644 --- a/nestkernel/vp_manager.h +++ b/nestkernel/vp_manager.h @@ -209,7 +209,8 @@ VPManager::assert_thread_parallel() const inline size_t VPManager::get_vp() const { - return kernel().mpi_manager.get_rank() + get_thread_id() * kernel().mpi_manager.get_num_processes(); + return kernel::manager< MPIManager >().get_rank() + + get_thread_id() * kernel::manager< MPIManager >().get_num_processes(); } inline size_t @@ -221,25 +222,25 @@ VPManager::node_id_to_vp( const size_t node_id ) const inline size_t VPManager::vp_to_thread( const size_t vp ) const { - return vp / kernel().mpi_manager.get_num_processes(); + return vp / kernel::manager< MPIManager >().get_num_processes(); } inline size_t VPManager::get_num_virtual_processes() const { - return get_num_threads() * kernel().mpi_manager.get_num_processes(); + return get_num_threads() * kernel::manager< MPIManager >().get_num_processes(); } inline bool VPManager::is_local_vp( const size_t vp ) const { - return kernel().mpi_manager.get_process_id_of_vp( vp ) == kernel().mpi_manager.get_rank(); + return kernel::manager< MPIManager >().get_process_id_of_vp( vp ) == kernel::manager< MPIManager >().get_rank(); } inline size_t VPManager::thread_to_vp( const size_t tid ) const { - return tid * kernel().mpi_manager.get_num_processes() + kernel().mpi_manager.get_rank(); + return tid * kernel::manager< MPIManager >().get_num_processes() + kernel::manager< MPIManager >().get_rank(); } inline bool @@ -265,7 +266,7 @@ VPManager::lid_to_node_id( const size_t lid ) const inline size_t VPManager::get_num_assigned_ranks_per_thread() const { - return std::ceil( static_cast< double >( kernel().mpi_manager.get_num_processes() ) / n_threads_ ); + return std::ceil( static_cast< double >( kernel::manager< MPIManager >().get_num_processes() ) / n_threads_ ); } inline size_t @@ -282,9 +283,9 @@ VPManager::get_end_rank_per_thread( const size_t rank_start, const size_t num_as // if we have more threads than ranks, or if ranks can not be // distributed evenly on threads, we need to make sure, that all // threads care only about existing ranks - if ( rank_end > kernel().mpi_manager.get_num_processes() ) + if ( rank_end > kernel::manager< MPIManager >().get_num_processes() ) { - rank_end = std::max( rank_start, kernel().mpi_manager.get_num_processes() ); + rank_end = std::max( rank_start, kernel::manager< MPIManager >().get_num_processes() ); } return rank_end; From 27358c0efa42d97e9853380262749ec8dea97303 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:06:39 +0200 Subject: [PATCH 09/23] Minimized layer_impl.h --- nestkernel/layer.h | 490 ++++++++++++++++++++++++++++++++++++++++ nestkernel/layer_impl.h | 490 ---------------------------------------- 2 files changed, 490 insertions(+), 490 deletions(-) diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 8e425e896d..054f79f35c 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -527,6 +527,496 @@ class MaskedLayer MaskDatum mask_; }; +inline void +AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) +{ + node_collection_ = node_collection; +} + + +inline NodeCollectionPTR +AbstractLayer::get_node_collection() +{ + return node_collection_; +} + +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( node_collection ); + + check_mask_( layer, allow_oversized ); +} + +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + Layer< D >& target, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( + target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); + + check_mask_( target, allow_oversized ); + mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); +} + +template < int D > +inline MaskedLayer< D >::~MaskedLayer() +{ +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::begin( const Position< D >& anchor ) +{ + try + { + return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); + } + catch ( std::bad_cast& e ) + { + throw BadProperty( "Mask is incompatible with layer." ); + } +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::end() +{ + return ntree_->masked_end(); +} + +template < int D > +inline Layer< D >::Layer() +{ + // Default center (0,0) and extent (1,1) + for ( int i = 0; i < D; ++i ) + { + lower_left_[ i ] = -0.5; + extent_[ i ] = 1.0; + } +} + +template < int D > +inline Layer< D >::Layer( const Layer& other_layer ) + : AbstractLayer( other_layer ) + , lower_left_( other_layer.lower_left_ ) + , extent_( other_layer.extent_ ) + , periodic_( other_layer.periodic_ ) +{ +} + +template < int D > +inline Layer< D >::~Layer() +{ + if ( cached_ntree_md_ == get_metadata() ) + { + clear_ntree_cache_(); + } + + if ( cached_vector_md_ == get_metadata() ) + { + clear_vector_cache_(); + } +} + +template < int D > +inline Position< D > +Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const +{ + return compute_displacement( from_pos, get_position( to_lid ) ); +} + +template < int D > +inline std::vector< double > +Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const +{ + return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); +} + +template < int D > +inline double +Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const +{ + return compute_displacement( from_pos, lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const +{ + return compute_displacement( Position< D >( from_pos ), lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const +{ + double squared_displacement = 0; + for ( unsigned int i = 0; i < D; ++i ) + { + const double displacement = compute_displacement( from_pos, to_pos, i ); + squared_displacement += displacement * displacement; + } + return std::sqrt( squared_displacement ); +} + +template < int D > +inline std::vector< double > +Layer< D >::get_position_vector( const size_t sind ) const +{ + return get_position( sind ).get_vector(); +} + +template < int D > +inline void +Layer< D >::clear_ntree_cache_() const +{ + cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); + cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); +} + +template < int D > +inline void +Layer< D >::clear_vector_cache_() const +{ + if ( cached_vector_ != 0 ) + { + delete cached_vector_; + } + cached_vector_ = 0; + cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > Layer< D >::cached_ntree_; + +template < int D > +std::vector< std::pair< Position< D >, size_t > >* Layer< D >::cached_vector_ = 0; + +template < int D > +Position< D > +Layer< D >::compute_displacement( const Position< D >& from_pos, const Position< D >& to_pos ) const +{ + Position< D > displ = to_pos; + for ( int i = 0; i < D; ++i ) + { + displ[ i ] -= from_pos[ i ]; + if ( periodic_[ i ] ) + { + displ[ i ] = -0.5 * extent_[ i ] + std::fmod( displ[ i ] + 0.5 * extent_[ i ], extent_[ i ] ); + if ( displ[ i ] < -0.5 * extent_[ i ] ) + { + displ[ i ] += extent_[ i ]; + } + } + } + return displ; +} + +template < int D > +double +Layer< D >::compute_displacement( const std::vector< double >& from_pos, + const std::vector< double >& to_pos, + const unsigned int dimension ) const +{ + double displacement = to_pos[ dimension ] - from_pos[ dimension ]; + if ( periodic_[ dimension ] ) + { + displacement -= extent_[ dimension ] * std::round( displacement * ( 1 / extent_[ dimension ] ) ); + } + return displacement; +} + +template < int D > +void +Layer< D >::set_status( const DictionaryDatum& d ) +{ + if ( d->known( names::edge_wrap ) ) + { + if ( getValue< bool >( d, names::edge_wrap ) ) + { + periodic_ = ( 1 << D ) - 1; // All dimensions periodic + } + } +} + +template < int D > +void +Layer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const +{ + ( *d )[ names::extent ] = std::vector< double >( extent_.get_vector() ); + ( *d )[ names::center ] = std::vector< double >( ( lower_left_ + extent_ / 2 ).get_vector() ); + + if ( periodic_.none() ) + { + ( *d )[ names::edge_wrap ] = BoolDatum( false ); + } + else if ( periodic_.count() == D ) + { + ( *d )[ names::edge_wrap ] = true; + } + + if ( nc ) + { + // This is for backward compatibility with some tests and scripts + // TODO: Rename parameter + ( *d )[ names::network_size ] = nc->size(); + } +} + +template < int D > +void +Layer< D >::connect( NodeCollectionPTR source_nc, + AbstractLayerPTR target_layer, + NodeCollectionPTR target_nc, + ConnectionCreator& connector ) +{ + // We need to extract the real pointer here to be able to cast to the + // dimension-specific subclass. + AbstractLayer* target_abs = target_layer.get(); + assert( target_abs ); + + try + { + Layer< D >& tgt = dynamic_cast< Layer< D >& >( *target_abs ); + connector.connect( *this, source_nc, tgt, target_nc ); + } + catch ( std::bad_cast& e ) + { + throw BadProperty( "Target layer must have same number of dimensions as source layer." ); + } +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::get_global_positions_ntree( NodeCollectionPTR node_collection ) +{ + if ( cached_ntree_md_ == node_collection->get_metadata() ) + { + assert( cached_ntree_.get() ); + return cached_ntree_; + } + + clear_ntree_cache_(); + + cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >( + new Ntree< D, size_t >( this->lower_left_, this->extent_, this->periodic_ ) ); + + return do_get_global_positions_ntree_( node_collection ); +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::get_global_positions_ntree( std::bitset< D > periodic, + Position< D > lower_left, + Position< D > extent, + NodeCollectionPTR node_collection ) +{ + clear_ntree_cache_(); + clear_vector_cache_(); + + // Keep layer geometry for non-periodic dimensions + for ( int i = 0; i < D; ++i ) + { + if ( not periodic[ i ] ) + { + extent[ i ] = extent_[ i ]; + lower_left[ i ] = lower_left_[ i ]; + } + } + + cached_ntree_ = + std::shared_ptr< Ntree< D, size_t > >( new Ntree< D, size_t >( this->lower_left_, extent, periodic ) ); + + do_get_global_positions_ntree_( node_collection ); + + // Do not use cache since the periodic bits and extents were altered. + cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); + + return cached_ntree_; +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::do_get_global_positions_ntree_( NodeCollectionPTR node_collection ) +{ + if ( cached_vector_md_ == node_collection->get_metadata() ) + { + // Convert from vector to Ntree + + typename std::insert_iterator< Ntree< D, size_t > > to = std::inserter( *cached_ntree_, cached_ntree_->end() ); + + for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator from = cached_vector_->begin(); + from != cached_vector_->end(); + ++from ) + { + *to = *from; + } + } + else + { + + insert_global_positions_ntree_( *cached_ntree_, node_collection ); + } + + clear_vector_cache_(); + + cached_ntree_md_ = node_collection->get_metadata(); + + return cached_ntree_; +} + +template < int D > +std::vector< std::pair< Position< D >, size_t > >* +Layer< D >::get_global_positions_vector( NodeCollectionPTR node_collection ) +{ + if ( cached_vector_md_ == node_collection->get_metadata() ) + { + assert( cached_vector_ ); + return cached_vector_; + } + + clear_vector_cache_(); + + cached_vector_ = new std::vector< std::pair< Position< D >, size_t > >; + + if ( cached_ntree_md_ == node_collection->get_metadata() ) + { + // Convert from NTree to vector + + typename std::back_insert_iterator< std::vector< std::pair< Position< D >, size_t > > > to = + std::back_inserter( *cached_vector_ ); + + for ( typename Ntree< D, size_t >::iterator from = cached_ntree_->begin(); from != cached_ntree_->end(); ++from ) + { + *to = *from; + } + } + else + { + insert_global_positions_vector_( *cached_vector_, node_collection ); + } + + clear_ntree_cache_(); + + cached_vector_md_ = node_collection->get_metadata(); + + return cached_vector_; +} + +template < int D > +std::vector< std::pair< Position< D >, size_t > > +Layer< D >::get_global_positions_vector( const MaskDatum& mask, + const Position< D >& anchor, + bool allow_oversized, + NodeCollectionPTR node_collection ) +{ + MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); + std::vector< std::pair< Position< D >, size_t > > positions; + + for ( typename Ntree< D, size_t >::masked_iterator iter = masked_layer.begin( anchor ); iter != masked_layer.end(); + ++iter ) + { + positions.push_back( *iter ); + } + + return positions; +} + +template < int D > +std::vector< size_t > +Layer< D >::get_global_nodes( const MaskDatum& mask, + const std::vector< double >& anchor, + bool allow_oversized, + NodeCollectionPTR node_collection ) +{ + MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); + std::vector< size_t > nodes; + for ( typename Ntree< D, size_t >::masked_iterator i = masked_layer.begin( anchor ); i != masked_layer.end(); ++i ) + { + nodes.push_back( i->second ); + } + return nodes; +} + +template < int D > +void +Layer< D >::dump_nodes( std::ostream& out ) const +{ + for ( NodeCollection::const_iterator it = this->node_collection_->rank_local_begin(); + it < this->node_collection_->end(); + ++it ) + { + out << ( *it ).node_id << ' '; + get_position( ( *it ).nc_index ).print( out ); + out << std::endl; + } +} + +template < int D > +void +Layer< D >::dump_connections( std::ostream& out, + NodeCollectionPTR node_collection, + AbstractLayerPTR target_layer, + const Token& syn_model ) +{ + // Find all connections for given sources, targets and synapse model + DictionaryDatum conn_filter( new Dictionary ); + def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); + def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); + def( conn_filter, names::synapse_model, syn_model ); + ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); + + // Get positions of remote nodes + std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); + + // Iterate over connectome and write every connection, looking up source position only if source neuron changes + size_t previous_source_node_id = 0; // dummy initial value, cannot be node_id of any node + Position< D > source_pos; // dummy value + for ( const auto& entry : connectome ) + { + ConnectionDatum conn = getValue< ConnectionDatum >( entry ); + const size_t source_node_id = conn.get_source_node_id(); + + // Search source_pos for source node only if it is a different node + if ( source_node_id != previous_source_node_id ) + { + const auto it = std::find_if( src_vec->begin(), + src_vec->end(), + [ source_node_id ]( const std::pair< Position< D >, size_t >& p ) { return p.second == source_node_id; } ); + assert( it != src_vec->end() ); // internal error if node not found + + source_pos = it->first; + previous_source_node_id = source_node_id; + } + + DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, + conn.get_target_node_id(), + conn.get_target_thread(), + conn.get_synapse_model_id(), + conn.get_port() ); + const long target_node_id = getValue< long >( result_dict, names::target ); + const double weight = getValue< double >( result_dict, names::weight ); + const double delay = getValue< double >( result_dict, names::delay ); + const Layer< D >* const tgt_layer = dynamic_cast< Layer< D >* >( target_layer.get() ); + const long tnode_lid = tgt_layer->node_collection_->get_nc_index( target_node_id ); + assert( tnode_lid >= 0 ); + + // Print source, target, weight, delay, rports + out << source_node_id << ' ' << target_node_id << ' ' << weight << ' ' << delay << ' '; + tgt_layer->compute_displacement( source_pos, tnode_lid ).print( out ); + out << '\n'; + } +} + } // namespace nest #endif diff --git a/nestkernel/layer_impl.h b/nestkernel/layer_impl.h index 3cbd823bd5..f181e1ec6d 100644 --- a/nestkernel/layer_impl.h +++ b/nestkernel/layer_impl.h @@ -25,496 +25,6 @@ namespace nest { -inline void -AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) -{ - node_collection_ = node_collection; -} - - -inline NodeCollectionPTR -AbstractLayer::get_node_collection() -{ - return node_collection_; -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( node_collection ); - - check_mask_( layer, allow_oversized ); -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - Layer< D >& target, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( - target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); - - check_mask_( target, allow_oversized ); - mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); -} - -template < int D > -inline MaskedLayer< D >::~MaskedLayer() -{ -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::begin( const Position< D >& anchor ) -{ - try - { - return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); - } - catch ( std::bad_cast& e ) - { - throw BadProperty( "Mask is incompatible with layer." ); - } -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::end() -{ - return ntree_->masked_end(); -} - -template < int D > -inline Layer< D >::Layer() -{ - // Default center (0,0) and extent (1,1) - for ( int i = 0; i < D; ++i ) - { - lower_left_[ i ] = -0.5; - extent_[ i ] = 1.0; - } -} - -template < int D > -inline Layer< D >::Layer( const Layer& other_layer ) - : AbstractLayer( other_layer ) - , lower_left_( other_layer.lower_left_ ) - , extent_( other_layer.extent_ ) - , periodic_( other_layer.periodic_ ) -{ -} - -template < int D > -inline Layer< D >::~Layer() -{ - if ( cached_ntree_md_ == get_metadata() ) - { - clear_ntree_cache_(); - } - - if ( cached_vector_md_ == get_metadata() ) - { - clear_vector_cache_(); - } -} - -template < int D > -inline Position< D > -Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const -{ - return compute_displacement( from_pos, get_position( to_lid ) ); -} - -template < int D > -inline std::vector< double > -Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const -{ - return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); -} - -template < int D > -inline double -Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const -{ - return compute_displacement( from_pos, lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const -{ - return compute_displacement( Position< D >( from_pos ), lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const -{ - double squared_displacement = 0; - for ( unsigned int i = 0; i < D; ++i ) - { - const double displacement = compute_displacement( from_pos, to_pos, i ); - squared_displacement += displacement * displacement; - } - return std::sqrt( squared_displacement ); -} - -template < int D > -inline std::vector< double > -Layer< D >::get_position_vector( const size_t sind ) const -{ - return get_position( sind ).get_vector(); -} - -template < int D > -inline void -Layer< D >::clear_ntree_cache_() const -{ - cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); - cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); -} - -template < int D > -inline void -Layer< D >::clear_vector_cache_() const -{ - if ( cached_vector_ != 0 ) - { - delete cached_vector_; - } - cached_vector_ = 0; - cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > Layer< D >::cached_ntree_; - -template < int D > -std::vector< std::pair< Position< D >, size_t > >* Layer< D >::cached_vector_ = 0; - -template < int D > -Position< D > -Layer< D >::compute_displacement( const Position< D >& from_pos, const Position< D >& to_pos ) const -{ - Position< D > displ = to_pos; - for ( int i = 0; i < D; ++i ) - { - displ[ i ] -= from_pos[ i ]; - if ( periodic_[ i ] ) - { - displ[ i ] = -0.5 * extent_[ i ] + std::fmod( displ[ i ] + 0.5 * extent_[ i ], extent_[ i ] ); - if ( displ[ i ] < -0.5 * extent_[ i ] ) - { - displ[ i ] += extent_[ i ]; - } - } - } - return displ; -} - -template < int D > -double -Layer< D >::compute_displacement( const std::vector< double >& from_pos, - const std::vector< double >& to_pos, - const unsigned int dimension ) const -{ - double displacement = to_pos[ dimension ] - from_pos[ dimension ]; - if ( periodic_[ dimension ] ) - { - displacement -= extent_[ dimension ] * std::round( displacement * ( 1 / extent_[ dimension ] ) ); - } - return displacement; -} - -template < int D > -void -Layer< D >::set_status( const DictionaryDatum& d ) -{ - if ( d->known( names::edge_wrap ) ) - { - if ( getValue< bool >( d, names::edge_wrap ) ) - { - periodic_ = ( 1 << D ) - 1; // All dimensions periodic - } - } -} - -template < int D > -void -Layer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const -{ - ( *d )[ names::extent ] = std::vector< double >( extent_.get_vector() ); - ( *d )[ names::center ] = std::vector< double >( ( lower_left_ + extent_ / 2 ).get_vector() ); - - if ( periodic_.none() ) - { - ( *d )[ names::edge_wrap ] = BoolDatum( false ); - } - else if ( periodic_.count() == D ) - { - ( *d )[ names::edge_wrap ] = true; - } - - if ( nc ) - { - // This is for backward compatibility with some tests and scripts - // TODO: Rename parameter - ( *d )[ names::network_size ] = nc->size(); - } -} - -template < int D > -void -Layer< D >::connect( NodeCollectionPTR source_nc, - AbstractLayerPTR target_layer, - NodeCollectionPTR target_nc, - ConnectionCreator& connector ) -{ - // We need to extract the real pointer here to be able to cast to the - // dimension-specific subclass. - AbstractLayer* target_abs = target_layer.get(); - assert( target_abs ); - - try - { - Layer< D >& tgt = dynamic_cast< Layer< D >& >( *target_abs ); - connector.connect( *this, source_nc, tgt, target_nc ); - } - catch ( std::bad_cast& e ) - { - throw BadProperty( "Target layer must have same number of dimensions as source layer." ); - } -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::get_global_positions_ntree( NodeCollectionPTR node_collection ) -{ - if ( cached_ntree_md_ == node_collection->get_metadata() ) - { - assert( cached_ntree_.get() ); - return cached_ntree_; - } - - clear_ntree_cache_(); - - cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >( - new Ntree< D, size_t >( this->lower_left_, this->extent_, this->periodic_ ) ); - - return do_get_global_positions_ntree_( node_collection ); -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::get_global_positions_ntree( std::bitset< D > periodic, - Position< D > lower_left, - Position< D > extent, - NodeCollectionPTR node_collection ) -{ - clear_ntree_cache_(); - clear_vector_cache_(); - - // Keep layer geometry for non-periodic dimensions - for ( int i = 0; i < D; ++i ) - { - if ( not periodic[ i ] ) - { - extent[ i ] = extent_[ i ]; - lower_left[ i ] = lower_left_[ i ]; - } - } - - cached_ntree_ = - std::shared_ptr< Ntree< D, size_t > >( new Ntree< D, size_t >( this->lower_left_, extent, periodic ) ); - - do_get_global_positions_ntree_( node_collection ); - - // Do not use cache since the periodic bits and extents were altered. - cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); - - return cached_ntree_; -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::do_get_global_positions_ntree_( NodeCollectionPTR node_collection ) -{ - if ( cached_vector_md_ == node_collection->get_metadata() ) - { - // Convert from vector to Ntree - - typename std::insert_iterator< Ntree< D, size_t > > to = std::inserter( *cached_ntree_, cached_ntree_->end() ); - - for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator from = cached_vector_->begin(); - from != cached_vector_->end(); - ++from ) - { - *to = *from; - } - } - else - { - - insert_global_positions_ntree_( *cached_ntree_, node_collection ); - } - - clear_vector_cache_(); - - cached_ntree_md_ = node_collection->get_metadata(); - - return cached_ntree_; -} - -template < int D > -std::vector< std::pair< Position< D >, size_t > >* -Layer< D >::get_global_positions_vector( NodeCollectionPTR node_collection ) -{ - if ( cached_vector_md_ == node_collection->get_metadata() ) - { - assert( cached_vector_ ); - return cached_vector_; - } - - clear_vector_cache_(); - - cached_vector_ = new std::vector< std::pair< Position< D >, size_t > >; - - if ( cached_ntree_md_ == node_collection->get_metadata() ) - { - // Convert from NTree to vector - - typename std::back_insert_iterator< std::vector< std::pair< Position< D >, size_t > > > to = - std::back_inserter( *cached_vector_ ); - - for ( typename Ntree< D, size_t >::iterator from = cached_ntree_->begin(); from != cached_ntree_->end(); ++from ) - { - *to = *from; - } - } - else - { - insert_global_positions_vector_( *cached_vector_, node_collection ); - } - - clear_ntree_cache_(); - - cached_vector_md_ = node_collection->get_metadata(); - - return cached_vector_; -} - -template < int D > -std::vector< std::pair< Position< D >, size_t > > -Layer< D >::get_global_positions_vector( const MaskDatum& mask, - const Position< D >& anchor, - bool allow_oversized, - NodeCollectionPTR node_collection ) -{ - MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); - std::vector< std::pair< Position< D >, size_t > > positions; - - for ( typename Ntree< D, size_t >::masked_iterator iter = masked_layer.begin( anchor ); iter != masked_layer.end(); - ++iter ) - { - positions.push_back( *iter ); - } - - return positions; -} - -template < int D > -std::vector< size_t > -Layer< D >::get_global_nodes( const MaskDatum& mask, - const std::vector< double >& anchor, - bool allow_oversized, - NodeCollectionPTR node_collection ) -{ - MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); - std::vector< size_t > nodes; - for ( typename Ntree< D, size_t >::masked_iterator i = masked_layer.begin( anchor ); i != masked_layer.end(); ++i ) - { - nodes.push_back( i->second ); - } - return nodes; -} - -template < int D > -void -Layer< D >::dump_nodes( std::ostream& out ) const -{ - for ( NodeCollection::const_iterator it = this->node_collection_->rank_local_begin(); - it < this->node_collection_->end(); - ++it ) - { - out << ( *it ).node_id << ' '; - get_position( ( *it ).nc_index ).print( out ); - out << std::endl; - } -} - -template < int D > -void -Layer< D >::dump_connections( std::ostream& out, - NodeCollectionPTR node_collection, - AbstractLayerPTR target_layer, - const Token& syn_model ) -{ - // Find all connections for given sources, targets and synapse model - DictionaryDatum conn_filter( new Dictionary ); - def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); - def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); - def( conn_filter, names::synapse_model, syn_model ); - ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); - - // Get positions of remote nodes - std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); - - // Iterate over connectome and write every connection, looking up source position only if source neuron changes - size_t previous_source_node_id = 0; // dummy initial value, cannot be node_id of any node - Position< D > source_pos; // dummy value - for ( const auto& entry : connectome ) - { - ConnectionDatum conn = getValue< ConnectionDatum >( entry ); - const size_t source_node_id = conn.get_source_node_id(); - - // Search source_pos for source node only if it is a different node - if ( source_node_id != previous_source_node_id ) - { - const auto it = std::find_if( src_vec->begin(), - src_vec->end(), - [ source_node_id ]( const std::pair< Position< D >, size_t >& p ) { return p.second == source_node_id; } ); - assert( it != src_vec->end() ); // internal error if node not found - - source_pos = it->first; - previous_source_node_id = source_node_id; - } - - DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, - conn.get_target_node_id(), - conn.get_target_thread(), - conn.get_synapse_model_id(), - conn.get_port() ); - const long target_node_id = getValue< long >( result_dict, names::target ); - const double weight = getValue< double >( result_dict, names::weight ); - const double delay = getValue< double >( result_dict, names::delay ); - const Layer< D >* const tgt_layer = dynamic_cast< Layer< D >* >( target_layer.get() ); - const long tnode_lid = tgt_layer->node_collection_->get_nc_index( target_node_id ); - assert( tnode_lid >= 0 ); - - // Print source, target, weight, delay, rports - out << source_node_id << ' ' << target_node_id << ' ' << weight << ' ' << delay << ' '; - tgt_layer->compute_displacement( source_pos, tnode_lid ).print( out ); - out << '\n'; - } -} - template < int D > void MaskedLayer< D >::check_mask_( Layer< D >& layer, bool allow_oversized ) From d85b30e55aa6627214f6340b20b4932dce2fedef Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:06:39 +0200 Subject: [PATCH 10/23] Minimized layer_impl.h --- nestkernel/layer.h | 490 ++++++++++++++++++++++++++++++++++++++++ nestkernel/layer_impl.h | 490 ---------------------------------------- 2 files changed, 490 insertions(+), 490 deletions(-) diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 8e425e896d..054f79f35c 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -527,6 +527,496 @@ class MaskedLayer MaskDatum mask_; }; +inline void +AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) +{ + node_collection_ = node_collection; +} + + +inline NodeCollectionPTR +AbstractLayer::get_node_collection() +{ + return node_collection_; +} + +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( node_collection ); + + check_mask_( layer, allow_oversized ); +} + +template < int D > +inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, + const MaskDatum& maskd, + bool allow_oversized, + Layer< D >& target, + NodeCollectionPTR node_collection ) + : mask_( maskd ) +{ + ntree_ = layer.get_global_positions_ntree( + target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); + + check_mask_( target, allow_oversized ); + mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); +} + +template < int D > +inline MaskedLayer< D >::~MaskedLayer() +{ +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::begin( const Position< D >& anchor ) +{ + try + { + return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); + } + catch ( std::bad_cast& e ) + { + throw BadProperty( "Mask is incompatible with layer." ); + } +} + +template < int D > +inline typename Ntree< D, size_t >::masked_iterator +MaskedLayer< D >::end() +{ + return ntree_->masked_end(); +} + +template < int D > +inline Layer< D >::Layer() +{ + // Default center (0,0) and extent (1,1) + for ( int i = 0; i < D; ++i ) + { + lower_left_[ i ] = -0.5; + extent_[ i ] = 1.0; + } +} + +template < int D > +inline Layer< D >::Layer( const Layer& other_layer ) + : AbstractLayer( other_layer ) + , lower_left_( other_layer.lower_left_ ) + , extent_( other_layer.extent_ ) + , periodic_( other_layer.periodic_ ) +{ +} + +template < int D > +inline Layer< D >::~Layer() +{ + if ( cached_ntree_md_ == get_metadata() ) + { + clear_ntree_cache_(); + } + + if ( cached_vector_md_ == get_metadata() ) + { + clear_vector_cache_(); + } +} + +template < int D > +inline Position< D > +Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const +{ + return compute_displacement( from_pos, get_position( to_lid ) ); +} + +template < int D > +inline std::vector< double > +Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const +{ + return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); +} + +template < int D > +inline double +Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const +{ + return compute_displacement( from_pos, lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const +{ + return compute_displacement( Position< D >( from_pos ), lid ).length(); +} + +template < int D > +inline double +Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const +{ + double squared_displacement = 0; + for ( unsigned int i = 0; i < D; ++i ) + { + const double displacement = compute_displacement( from_pos, to_pos, i ); + squared_displacement += displacement * displacement; + } + return std::sqrt( squared_displacement ); +} + +template < int D > +inline std::vector< double > +Layer< D >::get_position_vector( const size_t sind ) const +{ + return get_position( sind ).get_vector(); +} + +template < int D > +inline void +Layer< D >::clear_ntree_cache_() const +{ + cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); + cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); +} + +template < int D > +inline void +Layer< D >::clear_vector_cache_() const +{ + if ( cached_vector_ != 0 ) + { + delete cached_vector_; + } + cached_vector_ = 0; + cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > Layer< D >::cached_ntree_; + +template < int D > +std::vector< std::pair< Position< D >, size_t > >* Layer< D >::cached_vector_ = 0; + +template < int D > +Position< D > +Layer< D >::compute_displacement( const Position< D >& from_pos, const Position< D >& to_pos ) const +{ + Position< D > displ = to_pos; + for ( int i = 0; i < D; ++i ) + { + displ[ i ] -= from_pos[ i ]; + if ( periodic_[ i ] ) + { + displ[ i ] = -0.5 * extent_[ i ] + std::fmod( displ[ i ] + 0.5 * extent_[ i ], extent_[ i ] ); + if ( displ[ i ] < -0.5 * extent_[ i ] ) + { + displ[ i ] += extent_[ i ]; + } + } + } + return displ; +} + +template < int D > +double +Layer< D >::compute_displacement( const std::vector< double >& from_pos, + const std::vector< double >& to_pos, + const unsigned int dimension ) const +{ + double displacement = to_pos[ dimension ] - from_pos[ dimension ]; + if ( periodic_[ dimension ] ) + { + displacement -= extent_[ dimension ] * std::round( displacement * ( 1 / extent_[ dimension ] ) ); + } + return displacement; +} + +template < int D > +void +Layer< D >::set_status( const DictionaryDatum& d ) +{ + if ( d->known( names::edge_wrap ) ) + { + if ( getValue< bool >( d, names::edge_wrap ) ) + { + periodic_ = ( 1 << D ) - 1; // All dimensions periodic + } + } +} + +template < int D > +void +Layer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const +{ + ( *d )[ names::extent ] = std::vector< double >( extent_.get_vector() ); + ( *d )[ names::center ] = std::vector< double >( ( lower_left_ + extent_ / 2 ).get_vector() ); + + if ( periodic_.none() ) + { + ( *d )[ names::edge_wrap ] = BoolDatum( false ); + } + else if ( periodic_.count() == D ) + { + ( *d )[ names::edge_wrap ] = true; + } + + if ( nc ) + { + // This is for backward compatibility with some tests and scripts + // TODO: Rename parameter + ( *d )[ names::network_size ] = nc->size(); + } +} + +template < int D > +void +Layer< D >::connect( NodeCollectionPTR source_nc, + AbstractLayerPTR target_layer, + NodeCollectionPTR target_nc, + ConnectionCreator& connector ) +{ + // We need to extract the real pointer here to be able to cast to the + // dimension-specific subclass. + AbstractLayer* target_abs = target_layer.get(); + assert( target_abs ); + + try + { + Layer< D >& tgt = dynamic_cast< Layer< D >& >( *target_abs ); + connector.connect( *this, source_nc, tgt, target_nc ); + } + catch ( std::bad_cast& e ) + { + throw BadProperty( "Target layer must have same number of dimensions as source layer." ); + } +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::get_global_positions_ntree( NodeCollectionPTR node_collection ) +{ + if ( cached_ntree_md_ == node_collection->get_metadata() ) + { + assert( cached_ntree_.get() ); + return cached_ntree_; + } + + clear_ntree_cache_(); + + cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >( + new Ntree< D, size_t >( this->lower_left_, this->extent_, this->periodic_ ) ); + + return do_get_global_positions_ntree_( node_collection ); +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::get_global_positions_ntree( std::bitset< D > periodic, + Position< D > lower_left, + Position< D > extent, + NodeCollectionPTR node_collection ) +{ + clear_ntree_cache_(); + clear_vector_cache_(); + + // Keep layer geometry for non-periodic dimensions + for ( int i = 0; i < D; ++i ) + { + if ( not periodic[ i ] ) + { + extent[ i ] = extent_[ i ]; + lower_left[ i ] = lower_left_[ i ]; + } + } + + cached_ntree_ = + std::shared_ptr< Ntree< D, size_t > >( new Ntree< D, size_t >( this->lower_left_, extent, periodic ) ); + + do_get_global_positions_ntree_( node_collection ); + + // Do not use cache since the periodic bits and extents were altered. + cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); + + return cached_ntree_; +} + +template < int D > +std::shared_ptr< Ntree< D, size_t > > +Layer< D >::do_get_global_positions_ntree_( NodeCollectionPTR node_collection ) +{ + if ( cached_vector_md_ == node_collection->get_metadata() ) + { + // Convert from vector to Ntree + + typename std::insert_iterator< Ntree< D, size_t > > to = std::inserter( *cached_ntree_, cached_ntree_->end() ); + + for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator from = cached_vector_->begin(); + from != cached_vector_->end(); + ++from ) + { + *to = *from; + } + } + else + { + + insert_global_positions_ntree_( *cached_ntree_, node_collection ); + } + + clear_vector_cache_(); + + cached_ntree_md_ = node_collection->get_metadata(); + + return cached_ntree_; +} + +template < int D > +std::vector< std::pair< Position< D >, size_t > >* +Layer< D >::get_global_positions_vector( NodeCollectionPTR node_collection ) +{ + if ( cached_vector_md_ == node_collection->get_metadata() ) + { + assert( cached_vector_ ); + return cached_vector_; + } + + clear_vector_cache_(); + + cached_vector_ = new std::vector< std::pair< Position< D >, size_t > >; + + if ( cached_ntree_md_ == node_collection->get_metadata() ) + { + // Convert from NTree to vector + + typename std::back_insert_iterator< std::vector< std::pair< Position< D >, size_t > > > to = + std::back_inserter( *cached_vector_ ); + + for ( typename Ntree< D, size_t >::iterator from = cached_ntree_->begin(); from != cached_ntree_->end(); ++from ) + { + *to = *from; + } + } + else + { + insert_global_positions_vector_( *cached_vector_, node_collection ); + } + + clear_ntree_cache_(); + + cached_vector_md_ = node_collection->get_metadata(); + + return cached_vector_; +} + +template < int D > +std::vector< std::pair< Position< D >, size_t > > +Layer< D >::get_global_positions_vector( const MaskDatum& mask, + const Position< D >& anchor, + bool allow_oversized, + NodeCollectionPTR node_collection ) +{ + MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); + std::vector< std::pair< Position< D >, size_t > > positions; + + for ( typename Ntree< D, size_t >::masked_iterator iter = masked_layer.begin( anchor ); iter != masked_layer.end(); + ++iter ) + { + positions.push_back( *iter ); + } + + return positions; +} + +template < int D > +std::vector< size_t > +Layer< D >::get_global_nodes( const MaskDatum& mask, + const std::vector< double >& anchor, + bool allow_oversized, + NodeCollectionPTR node_collection ) +{ + MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); + std::vector< size_t > nodes; + for ( typename Ntree< D, size_t >::masked_iterator i = masked_layer.begin( anchor ); i != masked_layer.end(); ++i ) + { + nodes.push_back( i->second ); + } + return nodes; +} + +template < int D > +void +Layer< D >::dump_nodes( std::ostream& out ) const +{ + for ( NodeCollection::const_iterator it = this->node_collection_->rank_local_begin(); + it < this->node_collection_->end(); + ++it ) + { + out << ( *it ).node_id << ' '; + get_position( ( *it ).nc_index ).print( out ); + out << std::endl; + } +} + +template < int D > +void +Layer< D >::dump_connections( std::ostream& out, + NodeCollectionPTR node_collection, + AbstractLayerPTR target_layer, + const Token& syn_model ) +{ + // Find all connections for given sources, targets and synapse model + DictionaryDatum conn_filter( new Dictionary ); + def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); + def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); + def( conn_filter, names::synapse_model, syn_model ); + ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); + + // Get positions of remote nodes + std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); + + // Iterate over connectome and write every connection, looking up source position only if source neuron changes + size_t previous_source_node_id = 0; // dummy initial value, cannot be node_id of any node + Position< D > source_pos; // dummy value + for ( const auto& entry : connectome ) + { + ConnectionDatum conn = getValue< ConnectionDatum >( entry ); + const size_t source_node_id = conn.get_source_node_id(); + + // Search source_pos for source node only if it is a different node + if ( source_node_id != previous_source_node_id ) + { + const auto it = std::find_if( src_vec->begin(), + src_vec->end(), + [ source_node_id ]( const std::pair< Position< D >, size_t >& p ) { return p.second == source_node_id; } ); + assert( it != src_vec->end() ); // internal error if node not found + + source_pos = it->first; + previous_source_node_id = source_node_id; + } + + DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, + conn.get_target_node_id(), + conn.get_target_thread(), + conn.get_synapse_model_id(), + conn.get_port() ); + const long target_node_id = getValue< long >( result_dict, names::target ); + const double weight = getValue< double >( result_dict, names::weight ); + const double delay = getValue< double >( result_dict, names::delay ); + const Layer< D >* const tgt_layer = dynamic_cast< Layer< D >* >( target_layer.get() ); + const long tnode_lid = tgt_layer->node_collection_->get_nc_index( target_node_id ); + assert( tnode_lid >= 0 ); + + // Print source, target, weight, delay, rports + out << source_node_id << ' ' << target_node_id << ' ' << weight << ' ' << delay << ' '; + tgt_layer->compute_displacement( source_pos, tnode_lid ).print( out ); + out << '\n'; + } +} + } // namespace nest #endif diff --git a/nestkernel/layer_impl.h b/nestkernel/layer_impl.h index 7f96d0e44b..f181e1ec6d 100644 --- a/nestkernel/layer_impl.h +++ b/nestkernel/layer_impl.h @@ -25,496 +25,6 @@ namespace nest { -inline void -AbstractLayer::set_node_collection( NodeCollectionPTR node_collection ) -{ - node_collection_ = node_collection; -} - - -inline NodeCollectionPTR -AbstractLayer::get_node_collection() -{ - return node_collection_; -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( node_collection ); - - check_mask_( layer, allow_oversized ); -} - -template < int D > -inline MaskedLayer< D >::MaskedLayer( Layer< D >& layer, - const MaskDatum& maskd, - bool allow_oversized, - Layer< D >& target, - NodeCollectionPTR node_collection ) - : mask_( maskd ) -{ - ntree_ = layer.get_global_positions_ntree( - target.get_periodic_mask(), target.get_lower_left(), target.get_extent(), node_collection ); - - check_mask_( target, allow_oversized ); - mask_ = new ConverseMask< D >( dynamic_cast< const Mask< D >& >( *mask_ ) ); -} - -template < int D > -inline MaskedLayer< D >::~MaskedLayer() -{ -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::begin( const Position< D >& anchor ) -{ - try - { - return ntree_->masked_begin( dynamic_cast< const Mask< D >& >( *mask_ ), anchor ); - } - catch ( std::bad_cast& e ) - { - throw BadProperty( "Mask is incompatible with layer." ); - } -} - -template < int D > -inline typename Ntree< D, size_t >::masked_iterator -MaskedLayer< D >::end() -{ - return ntree_->masked_end(); -} - -template < int D > -inline Layer< D >::Layer() -{ - // Default center (0,0) and extent (1,1) - for ( int i = 0; i < D; ++i ) - { - lower_left_[ i ] = -0.5; - extent_[ i ] = 1.0; - } -} - -template < int D > -inline Layer< D >::Layer( const Layer& other_layer ) - : AbstractLayer( other_layer ) - , lower_left_( other_layer.lower_left_ ) - , extent_( other_layer.extent_ ) - , periodic_( other_layer.periodic_ ) -{ -} - -template < int D > -inline Layer< D >::~Layer() -{ - if ( cached_ntree_md_ == get_metadata() ) - { - clear_ntree_cache_(); - } - - if ( cached_vector_md_ == get_metadata() ) - { - clear_vector_cache_(); - } -} - -template < int D > -inline Position< D > -Layer< D >::compute_displacement( const Position< D >& from_pos, const size_t to_lid ) const -{ - return compute_displacement( from_pos, get_position( to_lid ) ); -} - -template < int D > -inline std::vector< double > -Layer< D >::compute_displacement( const std::vector< double >& from_pos, const size_t to_lid ) const -{ - return std::vector< double >( compute_displacement( Position< D >( from_pos ), to_lid ).get_vector() ); -} - -template < int D > -inline double -Layer< D >::compute_distance( const Position< D >& from_pos, const size_t lid ) const -{ - return compute_displacement( from_pos, lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const size_t lid ) const -{ - return compute_displacement( Position< D >( from_pos ), lid ).length(); -} - -template < int D > -inline double -Layer< D >::compute_distance( const std::vector< double >& from_pos, const std::vector< double >& to_pos ) const -{ - double squared_displacement = 0; - for ( unsigned int i = 0; i < D; ++i ) - { - const double displacement = compute_displacement( from_pos, to_pos, i ); - squared_displacement += displacement * displacement; - } - return std::sqrt( squared_displacement ); -} - -template < int D > -inline std::vector< double > -Layer< D >::get_position_vector( const size_t sind ) const -{ - return get_position( sind ).get_vector(); -} - -template < int D > -inline void -Layer< D >::clear_ntree_cache_() const -{ - cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >(); - cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); -} - -template < int D > -inline void -Layer< D >::clear_vector_cache_() const -{ - if ( cached_vector_ != 0 ) - { - delete cached_vector_; - } - cached_vector_ = 0; - cached_vector_md_ = NodeCollectionMetadataPTR( nullptr ); -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > Layer< D >::cached_ntree_; - -template < int D > -std::vector< std::pair< Position< D >, size_t > >* Layer< D >::cached_vector_ = 0; - -template < int D > -Position< D > -Layer< D >::compute_displacement( const Position< D >& from_pos, const Position< D >& to_pos ) const -{ - Position< D > displ = to_pos; - for ( int i = 0; i < D; ++i ) - { - displ[ i ] -= from_pos[ i ]; - if ( periodic_[ i ] ) - { - displ[ i ] = -0.5 * extent_[ i ] + std::fmod( displ[ i ] + 0.5 * extent_[ i ], extent_[ i ] ); - if ( displ[ i ] < -0.5 * extent_[ i ] ) - { - displ[ i ] += extent_[ i ]; - } - } - } - return displ; -} - -template < int D > -double -Layer< D >::compute_displacement( const std::vector< double >& from_pos, - const std::vector< double >& to_pos, - const unsigned int dimension ) const -{ - double displacement = to_pos[ dimension ] - from_pos[ dimension ]; - if ( periodic_[ dimension ] ) - { - displacement -= extent_[ dimension ] * std::round( displacement * ( 1 / extent_[ dimension ] ) ); - } - return displacement; -} - -template < int D > -void -Layer< D >::set_status( const DictionaryDatum& d ) -{ - if ( d->known( names::edge_wrap ) ) - { - if ( getValue< bool >( d, names::edge_wrap ) ) - { - periodic_ = ( 1 << D ) - 1; // All dimensions periodic - } - } -} - -template < int D > -void -Layer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const -{ - ( *d )[ names::extent ] = std::vector< double >( extent_.get_vector() ); - ( *d )[ names::center ] = std::vector< double >( ( lower_left_ + extent_ / 2 ).get_vector() ); - - if ( periodic_.none() ) - { - ( *d )[ names::edge_wrap ] = BoolDatum( false ); - } - else if ( periodic_.count() == D ) - { - ( *d )[ names::edge_wrap ] = true; - } - - if ( nc ) - { - // This is for backward compatibility with some tests and scripts - // TODO: Rename parameter - ( *d )[ names::network_size ] = nc->size(); - } -} - -template < int D > -void -Layer< D >::connect( NodeCollectionPTR source_nc, - AbstractLayerPTR target_layer, - NodeCollectionPTR target_nc, - ConnectionCreator& connector ) -{ - // We need to extract the real pointer here to be able to cast to the - // dimension-specific subclass. - AbstractLayer* target_abs = target_layer.get(); - assert( target_abs ); - - try - { - Layer< D >& tgt = dynamic_cast< Layer< D >& >( *target_abs ); - connector.connect( *this, source_nc, tgt, target_nc ); - } - catch ( std::bad_cast& e ) - { - throw BadProperty( "Target layer must have same number of dimensions as source layer." ); - } -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::get_global_positions_ntree( NodeCollectionPTR node_collection ) -{ - if ( cached_ntree_md_ == node_collection->get_metadata() ) - { - assert( cached_ntree_.get() ); - return cached_ntree_; - } - - clear_ntree_cache_(); - - cached_ntree_ = std::shared_ptr< Ntree< D, size_t > >( - new Ntree< D, size_t >( this->lower_left_, this->extent_, this->periodic_ ) ); - - return do_get_global_positions_ntree_( node_collection ); -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::get_global_positions_ntree( std::bitset< D > periodic, - Position< D > lower_left, - Position< D > extent, - NodeCollectionPTR node_collection ) -{ - clear_ntree_cache_(); - clear_vector_cache_(); - - // Keep layer geometry for non-periodic dimensions - for ( int i = 0; i < D; ++i ) - { - if ( not periodic[ i ] ) - { - extent[ i ] = extent_[ i ]; - lower_left[ i ] = lower_left_[ i ]; - } - } - - cached_ntree_ = - std::shared_ptr< Ntree< D, size_t > >( new Ntree< D, size_t >( this->lower_left_, extent, periodic ) ); - - do_get_global_positions_ntree_( node_collection ); - - // Do not use cache since the periodic bits and extents were altered. - cached_ntree_md_ = NodeCollectionMetadataPTR( nullptr ); - - return cached_ntree_; -} - -template < int D > -std::shared_ptr< Ntree< D, size_t > > -Layer< D >::do_get_global_positions_ntree_( NodeCollectionPTR node_collection ) -{ - if ( cached_vector_md_ == node_collection->get_metadata() ) - { - // Convert from vector to Ntree - - typename std::insert_iterator< Ntree< D, size_t > > to = std::inserter( *cached_ntree_, cached_ntree_->end() ); - - for ( typename std::vector< std::pair< Position< D >, size_t > >::iterator from = cached_vector_->begin(); - from != cached_vector_->end(); - ++from ) - { - *to = *from; - } - } - else - { - - insert_global_positions_ntree_( *cached_ntree_, node_collection ); - } - - clear_vector_cache_(); - - cached_ntree_md_ = node_collection->get_metadata(); - - return cached_ntree_; -} - -template < int D > -std::vector< std::pair< Position< D >, size_t > >* -Layer< D >::get_global_positions_vector( NodeCollectionPTR node_collection ) -{ - if ( cached_vector_md_ == node_collection->get_metadata() ) - { - assert( cached_vector_ ); - return cached_vector_; - } - - clear_vector_cache_(); - - cached_vector_ = new std::vector< std::pair< Position< D >, size_t > >; - - if ( cached_ntree_md_ == node_collection->get_metadata() ) - { - // Convert from NTree to vector - - typename std::back_insert_iterator< std::vector< std::pair< Position< D >, size_t > > > to = - std::back_inserter( *cached_vector_ ); - - for ( typename Ntree< D, size_t >::iterator from = cached_ntree_->begin(); from != cached_ntree_->end(); ++from ) - { - *to = *from; - } - } - else - { - insert_global_positions_vector_( *cached_vector_, node_collection ); - } - - clear_ntree_cache_(); - - cached_vector_md_ = node_collection->get_metadata(); - - return cached_vector_; -} - -template < int D > -std::vector< std::pair< Position< D >, size_t > > -Layer< D >::get_global_positions_vector( const MaskDatum& mask, - const Position< D >& anchor, - bool allow_oversized, - NodeCollectionPTR node_collection ) -{ - MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); - std::vector< std::pair< Position< D >, size_t > > positions; - - for ( typename Ntree< D, size_t >::masked_iterator iter = masked_layer.begin( anchor ); iter != masked_layer.end(); - ++iter ) - { - positions.push_back( *iter ); - } - - return positions; -} - -template < int D > -std::vector< size_t > -Layer< D >::get_global_nodes( const MaskDatum& mask, - const std::vector< double >& anchor, - bool allow_oversized, - NodeCollectionPTR node_collection ) -{ - MaskedLayer< D > masked_layer( *this, mask, allow_oversized, node_collection ); - std::vector< size_t > nodes; - for ( typename Ntree< D, size_t >::masked_iterator i = masked_layer.begin( anchor ); i != masked_layer.end(); ++i ) - { - nodes.push_back( i->second ); - } - return nodes; -} - -template < int D > -void -Layer< D >::dump_nodes( std::ostream& out ) const -{ - for ( NodeCollection::const_iterator it = this->node_collection_->rank_local_begin(); - it < this->node_collection_->end(); - ++it ) - { - out << ( *it ).node_id << ' '; - get_position( ( *it ).nc_index ).print( out ); - out << std::endl; - } -} - -template < int D > -void -Layer< D >::dump_connections( std::ostream& out, - NodeCollectionPTR node_collection, - AbstractLayerPTR target_layer, - const Token& syn_model ) -{ - // Find all connections for given sources, targets and synapse model - DictionaryDatum conn_filter( new Dictionary ); - def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); - def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); - def( conn_filter, names::synapse_model, syn_model ); - ArrayDatum connectome = kernel().connection_manager.get_connections( conn_filter ); - - // Get positions of remote nodes - std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); - - // Iterate over connectome and write every connection, looking up source position only if source neuron changes - size_t previous_source_node_id = 0; // dummy initial value, cannot be node_id of any node - Position< D > source_pos; // dummy value - for ( const auto& entry : connectome ) - { - ConnectionDatum conn = getValue< ConnectionDatum >( entry ); - const size_t source_node_id = conn.get_source_node_id(); - - // Search source_pos for source node only if it is a different node - if ( source_node_id != previous_source_node_id ) - { - const auto it = std::find_if( src_vec->begin(), - src_vec->end(), - [ source_node_id ]( const std::pair< Position< D >, size_t >& p ) { return p.second == source_node_id; } ); - assert( it != src_vec->end() ); // internal error if node not found - - source_pos = it->first; - previous_source_node_id = source_node_id; - } - - DictionaryDatum result_dict = kernel().connection_manager.get_synapse_status( source_node_id, - conn.get_target_node_id(), - conn.get_target_thread(), - conn.get_synapse_model_id(), - conn.get_port() ); - const long target_node_id = getValue< long >( result_dict, names::target ); - const double weight = getValue< double >( result_dict, names::weight ); - const double delay = getValue< double >( result_dict, names::delay ); - const Layer< D >* const tgt_layer = dynamic_cast< Layer< D >* >( target_layer.get() ); - const long tnode_lid = tgt_layer->node_collection_->get_nc_index( target_node_id ); - assert( tnode_lid >= 0 ); - - // Print source, target, weight, delay, rports - out << source_node_id << ' ' << target_node_id << ' ' << weight << ' ' << delay << ' '; - tgt_layer->compute_displacement( source_pos, tnode_lid ).print( out ); - out << '\n'; - } -} - template < int D > void MaskedLayer< D >::check_mask_( Layer< D >& layer, bool allow_oversized ) From cd6271f9d6667daf8032071a4af3079386fa8902 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang <47158055+JanVogelsang@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:11:11 +0200 Subject: [PATCH 11/23] Update models/CMakeLists.txt Co-authored-by: Dennis Terhorst --- models/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/CMakeLists.txt b/models/CMakeLists.txt index 3746c98cf0..5da8995e4e 100644 --- a/models/CMakeLists.txt +++ b/models/CMakeLists.txt @@ -24,9 +24,9 @@ set(models_sources cm_compartmentcurrents.h cm_compartmentcurrents.cpp cm_tree.h cm_tree.cpp rate_neuron_ipn.h - rate_neuron_opn.h - rate_transformer_node.h - weight_optimizer.h weight_optimizer.cpp + rate_neuron_opn.h + rate_transformer_node.h + weight_optimizer.h weight_optimizer.cpp ${MODELS_SOURCES_GENERATED} ) From 6ebf55fe08ed74df855b7868fdbfd5c78f796ef5 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:25:30 +0200 Subject: [PATCH 12/23] Fixed cherry-pick issue --- nestkernel/layer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 054f79f35c..2f14812ff5 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -973,7 +973,7 @@ Layer< D >::dump_connections( std::ostream& out, def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); def( conn_filter, names::synapse_model, syn_model ); - ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); + ArrayDatum connectome = kernel().connection_manager.get_connections( conn_filter ); // Get positions of remote nodes std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); @@ -998,7 +998,7 @@ Layer< D >::dump_connections( std::ostream& out, previous_source_node_id = source_node_id; } - DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, + DictionaryDatum result_dict = kernel().connection_manager.get_synapse_status( source_node_id, conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), From 9884718aa5414989724d3bc9d7941e69daaae1a4 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:25:46 +0200 Subject: [PATCH 13/23] Fixed non-MPI version --- nestkernel/io_manager.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nestkernel/io_manager.cpp b/nestkernel/io_manager.cpp index 4633285782..5a1f9dd75d 100644 --- a/nestkernel/io_manager.cpp +++ b/nestkernel/io_manager.cpp @@ -47,6 +47,8 @@ #ifdef HAVE_MPI #include "recording_backend_mpi.h" #include "stimulation_backend_mpi.h" +#else +#include "stimulation_backend.h" #endif #ifdef HAVE_SIONLIB #include "recording_backend_sionlib.h" From 691c39b1111f1cbeda94ff4114d88d39fd1edf8d Mon Sep 17 00:00:00 2001 From: Jan Vogelsang <47158055+JanVogelsang@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:11:11 +0200 Subject: [PATCH 14/23] Update models/CMakeLists.txt Co-authored-by: Dennis Terhorst (cherry picked from commit cd6271f9d6667daf8032071a4af3079386fa8902) --- models/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/CMakeLists.txt b/models/CMakeLists.txt index 3746c98cf0..5da8995e4e 100644 --- a/models/CMakeLists.txt +++ b/models/CMakeLists.txt @@ -24,9 +24,9 @@ set(models_sources cm_compartmentcurrents.h cm_compartmentcurrents.cpp cm_tree.h cm_tree.cpp rate_neuron_ipn.h - rate_neuron_opn.h - rate_transformer_node.h - weight_optimizer.h weight_optimizer.cpp + rate_neuron_opn.h + rate_transformer_node.h + weight_optimizer.h weight_optimizer.cpp ${MODELS_SOURCES_GENERATED} ) From 4f102e1a28b30bc2c9d90d74a4282f5c70ae786c Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:25:46 +0200 Subject: [PATCH 15/23] Fixed non-MPI version (cherry picked from commit 9884718aa5414989724d3bc9d7941e69daaae1a4) --- nestkernel/io_manager.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nestkernel/io_manager.cpp b/nestkernel/io_manager.cpp index 4633285782..5a1f9dd75d 100644 --- a/nestkernel/io_manager.cpp +++ b/nestkernel/io_manager.cpp @@ -47,6 +47,8 @@ #ifdef HAVE_MPI #include "recording_backend_mpi.h" #include "stimulation_backend_mpi.h" +#else +#include "stimulation_backend.h" #endif #ifdef HAVE_SIONLIB #include "recording_backend_sionlib.h" From 076e11a7ef1fb418824f8d99aee7c8cb71d73ffa Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:52:03 +0200 Subject: [PATCH 16/23] Fixed conngen --- nestkernel/conn_builder_conngen.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nestkernel/conn_builder_conngen.cpp b/nestkernel/conn_builder_conngen.cpp index 7c0547ab34..f6699cc501 100644 --- a/nestkernel/conn_builder_conngen.cpp +++ b/nestkernel/conn_builder_conngen.cpp @@ -26,6 +26,8 @@ // Includes from nestkernel: #include "kernel_manager.h" +#include "nest.h" +#include "node_manager.h" // Includes from sli: #include "dictutils.h" From 0313d499cae5645caa035d43473fabe38c154ca8 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 14:37:16 +0200 Subject: [PATCH 17/23] Fixed sonata --- nestkernel/sonata_connector.cpp | 2 ++ nestkernel/sonata_connector.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nestkernel/sonata_connector.cpp b/nestkernel/sonata_connector.cpp index 59b7583c66..3a1d205623 100644 --- a/nestkernel/sonata_connector.cpp +++ b/nestkernel/sonata_connector.cpp @@ -30,6 +30,8 @@ // Includes from nestkernel: #include "kernel_manager.h" +#include "nest.h" +#include "node_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/sonata_connector.h b/nestkernel/sonata_connector.h index 4420346b0e..2f2e0395b0 100644 --- a/nestkernel/sonata_connector.h +++ b/nestkernel/sonata_connector.h @@ -29,11 +29,11 @@ // C++ includes: #include +#include #include // Includes from nestkernel: #include "conn_parameter.h" -#include "kernel_manager.h" #include "nest_datums.h" #include "H5Cpp.h" From e2ae1f300081b00ff210a85f6d71a3b6cda6b1d3 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 13:52:03 +0200 Subject: [PATCH 18/23] Fixed conngen (cherry picked from commit 076e11a7ef1fb418824f8d99aee7c8cb71d73ffa) --- nestkernel/conn_builder_conngen.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nestkernel/conn_builder_conngen.cpp b/nestkernel/conn_builder_conngen.cpp index cbe075029e..fdb94a81c8 100644 --- a/nestkernel/conn_builder_conngen.cpp +++ b/nestkernel/conn_builder_conngen.cpp @@ -26,6 +26,8 @@ // Includes from nestkernel: #include "kernel_manager.h" +#include "nest.h" +#include "node_manager.h" // Includes from sli: #include "dictutils.h" From e02482fbac83593f74422f5d37cf9b3ca5c40861 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 14:37:16 +0200 Subject: [PATCH 19/23] Fixed sonata (cherry picked from commit 0313d499cae5645caa035d43473fabe38c154ca8) --- nestkernel/sonata_connector.cpp | 2 ++ nestkernel/sonata_connector.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nestkernel/sonata_connector.cpp b/nestkernel/sonata_connector.cpp index c6b2c27cd6..8d7e4b7c97 100644 --- a/nestkernel/sonata_connector.cpp +++ b/nestkernel/sonata_connector.cpp @@ -30,6 +30,8 @@ // Includes from nestkernel: #include "kernel_manager.h" +#include "nest.h" +#include "node_manager.h" // Includes from sli: #include "dictutils.h" diff --git a/nestkernel/sonata_connector.h b/nestkernel/sonata_connector.h index 4420346b0e..2f2e0395b0 100644 --- a/nestkernel/sonata_connector.h +++ b/nestkernel/sonata_connector.h @@ -29,11 +29,11 @@ // C++ includes: #include +#include #include // Includes from nestkernel: #include "conn_parameter.h" -#include "kernel_manager.h" #include "nest_datums.h" #include "H5Cpp.h" From 5c5411900f83b00c40dac79e2782965b5ad6448f Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 15:13:06 +0200 Subject: [PATCH 20/23] Fixed sionlib --- nestkernel/recording_backend_sionlib.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nestkernel/recording_backend_sionlib.cpp b/nestkernel/recording_backend_sionlib.cpp index 2131cd3fc9..38c544e31a 100644 --- a/nestkernel/recording_backend_sionlib.cpp +++ b/nestkernel/recording_backend_sionlib.cpp @@ -32,12 +32,13 @@ // Includes from libnestutil: #include "compose.hpp" -// Includes from nest: -#include "../nest/neststartup.h" - // Includes from nestkernel: +#include "io_manager.h" +#include "logging.h" +#include "logging_manager.h" #include "recording_backend_sionlib.h" #include "recording_device.h" +#include "simulation_manager.h" const unsigned int nest::RecordingBackendSIONlib::SIONLIB_REC_BACKEND_VERSION = 2; const unsigned int nest::RecordingBackendSIONlib::DEV_NAME_BUFFERSIZE = 32; From 508e349aa4e6bce3c7fc3f58653941689197f5af Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 15:13:06 +0200 Subject: [PATCH 21/23] Fixed sionlib --- nestkernel/recording_backend_sionlib.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nestkernel/recording_backend_sionlib.cpp b/nestkernel/recording_backend_sionlib.cpp index 33753117e5..d24706eee9 100644 --- a/nestkernel/recording_backend_sionlib.cpp +++ b/nestkernel/recording_backend_sionlib.cpp @@ -32,12 +32,13 @@ // Includes from libnestutil: #include "compose.hpp" -// Includes from nest: -#include "../nest/neststartup.h" - // Includes from nestkernel: +#include "io_manager.h" +#include "logging.h" +#include "logging_manager.h" #include "recording_backend_sionlib.h" #include "recording_device.h" +#include "simulation_manager.h" const unsigned int nest::RecordingBackendSIONlib::SIONLIB_REC_BACKEND_VERSION = 2; const unsigned int nest::RecordingBackendSIONlib::DEV_NAME_BUFFERSIZE = 32; From ba7cfc68d79c4383da6015ff6b25441aa886b4a0 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Wed, 13 Aug 2025 15:49:13 +0200 Subject: [PATCH 22/23] Attempting to fix macos issue --- nestkernel/recording_backend_mpi.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nestkernel/recording_backend_mpi.cpp b/nestkernel/recording_backend_mpi.cpp index 127adb1647..2056bf7857 100644 --- a/nestkernel/recording_backend_mpi.cpp +++ b/nestkernel/recording_backend_mpi.cpp @@ -22,7 +22,7 @@ // C++ includes: #include - +#include // Includes from nestkernel: #include "exceptions.h" From 67f510f8ba3cec841840666d15bc3f03179db409 Mon Sep 17 00:00:00 2001 From: Jan Vogelsang Date: Thu, 21 Aug 2025 12:03:13 +0200 Subject: [PATCH 23/23] Changed manager access syntax --- CMakeLists.txt | 3 + doc/htmldoc/sg_execution_times.rst | 355 ++++++++++++++++++++ libnestutil/dict_util.h | 4 +- libnestutil/logging.h | 8 +- models/ac_generator.cpp | 4 +- models/aeif_cond_alpha.cpp | 9 +- models/aeif_cond_alpha_astro.cpp | 11 +- models/aeif_cond_alpha_multisynapse.cpp | 7 +- models/aeif_cond_beta_multisynapse.cpp | 7 +- models/aeif_cond_exp.cpp | 9 +- models/aeif_psc_alpha.cpp | 9 +- models/aeif_psc_delta.cpp | 7 +- models/aeif_psc_delta_clopath.cpp | 7 +- models/aeif_psc_exp.cpp | 9 +- models/amat2_psc_exp.cpp | 9 +- models/astrocyte_lr_1994.cpp | 9 +- models/binary_neuron.h | 11 +- models/cm_default.cpp | 6 +- models/cont_delay_synapse.h | 4 +- models/correlomatrix_detector.cpp | 2 +- models/correlospinmatrix_detector.cpp | 2 +- models/dc_generator.cpp | 2 +- models/eprop_iaf.cpp | 6 +- models/eprop_iaf_adapt.cpp | 6 +- models/eprop_iaf_adapt_bsshslm_2020.cpp | 14 +- models/eprop_iaf_bsshslm_2020.cpp | 14 +- models/eprop_iaf_psc_delta.cpp | 6 +- models/eprop_iaf_psc_delta_adapt.cpp | 6 +- models/eprop_readout.cpp | 8 +- models/eprop_readout.h | 2 +- models/eprop_readout_bsshslm_2020.cpp | 18 +- models/eprop_readout_bsshslm_2020.h | 2 +- models/eprop_synapse.cpp | 2 +- models/eprop_synapse_bsshslm_2020.cpp | 2 +- models/eprop_synapse_bsshslm_2020.h | 4 +- models/gamma_sup_generator.cpp | 2 +- models/gif_cond_exp.cpp | 9 +- models/gif_cond_exp_multisynapse.cpp | 7 +- models/gif_pop_psc_exp.cpp | 9 +- models/gif_psc_exp.cpp | 9 +- models/gif_psc_exp_multisynapse.cpp | 7 +- models/glif_cond.cpp | 6 +- models/glif_psc.cpp | 2 +- models/glif_psc.h | 4 +- models/glif_psc_double_alpha.cpp | 6 +- models/hh_cond_beta_gap_traub.cpp | 31 +- models/hh_cond_exp_traub.cpp | 9 +- models/hh_psc_alpha.cpp | 9 +- models/hh_psc_alpha_clopath.cpp | 9 +- models/hh_psc_alpha_gap.cpp | 31 +- models/ht_neuron.cpp | 7 +- models/iaf_bw_2001.cpp | 4 +- models/iaf_bw_2001.h | 4 +- models/iaf_bw_2001_exact.cpp | 2 +- models/iaf_bw_2001_exact.h | 4 +- models/iaf_chs_2007.cpp | 4 +- models/iaf_chxk_2008.cpp | 8 +- models/iaf_cond_alpha.cpp | 8 +- models/iaf_cond_alpha_mc.cpp | 6 +- models/iaf_cond_beta.cpp | 8 +- models/iaf_cond_exp.cpp | 9 +- models/iaf_cond_exp_sfa_rr.cpp | 9 +- models/iaf_psc_alpha.cpp | 12 +- models/iaf_psc_alpha_multisynapse.cpp | 7 +- models/iaf_psc_alpha_ps.cpp | 8 +- models/iaf_psc_delta.cpp | 7 +- models/iaf_psc_delta_ps.cpp | 9 +- models/iaf_psc_exp.cpp | 12 +- models/iaf_psc_exp_htum.cpp | 9 +- models/iaf_psc_exp_multisynapse.cpp | 7 +- models/iaf_psc_exp_ps.cpp | 8 +- models/iaf_psc_exp_ps_lossless.cpp | 8 +- models/iaf_tum_2000.cpp | 12 +- models/ignore_and_fire.cpp | 10 +- models/inhomogeneous_poisson_generator.cpp | 4 +- models/izhikevich.cpp | 7 +- models/jonke_synapse.h | 2 +- models/mat2_psc_exp.cpp | 9 +- models/mip_generator.cpp | 2 +- models/multimeter.cpp | 2 +- models/multimeter.h | 2 +- models/music_cont_in_proxy.cpp | 2 +- models/music_cont_out_proxy.cpp | 12 +- models/music_event_in_proxy.cpp | 14 +- models/music_event_out_proxy.cpp | 2 +- models/music_message_in_proxy.cpp | 2 +- models/music_rate_in_proxy.cpp | 12 +- models/music_rate_out_proxy.cpp | 2 +- models/noise_generator.cpp | 4 +- models/noise_generator.h | 2 +- models/parrot_neuron.cpp | 4 +- models/parrot_neuron_ps.cpp | 4 +- models/poisson_generator.cpp | 2 +- models/poisson_generator_ps.cpp | 2 +- models/pp_cond_exp_mc_urbanczik.cpp | 6 +- models/pp_psc_delta.cpp | 7 +- models/ppd_sup_generator.cpp | 2 +- models/pulsepacket_generator.cpp | 4 +- models/rate_neuron_ipn.h | 16 +- models/rate_neuron_opn.h | 16 +- models/rate_transformer_node.h | 16 +- models/siegert_neuron.cpp | 12 +- models/sinusoidal_gamma_generator.cpp | 8 +- models/sinusoidal_poisson_generator.cpp | 6 +- models/spike_dilutor.cpp | 6 +- models/spike_generator.cpp | 2 +- models/spike_generator.h | 2 +- models/spike_recorder.cpp | 2 +- models/spike_train_injector.cpp | 4 +- models/spike_train_injector.h | 2 +- models/spin_detector.cpp | 2 +- models/stdp_dopamine_synapse.cpp | 4 +- models/stdp_dopamine_synapse.h | 8 +- models/stdp_facetshw_synapse_hom.h | 2 +- models/stdp_nn_pre_centered_synapse.h | 2 +- models/stdp_nn_restr_synapse.h | 2 +- models/stdp_nn_symm_synapse.h | 2 +- models/stdp_pl_synapse_hom.h | 2 +- models/stdp_synapse.h | 2 +- models/stdp_synapse_hom.h | 2 +- models/stdp_triplet_synapse.h | 2 +- models/step_current_generator.cpp | 2 +- models/step_rate_generator.cpp | 4 +- models/vogels_sprekeler_synapse.h | 2 +- models/volume_transmitter.cpp | 14 +- models/weight_recorder.cpp | 2 +- nest/neststartup.cpp | 6 +- nestkernel/archiving_node.cpp | 14 +- nestkernel/buffer_resize_log.cpp | 2 +- nestkernel/clopath_archiving_node.cpp | 4 +- nestkernel/common_synapse_properties.cpp | 4 +- nestkernel/conn_builder.cpp | 208 ++++++------ nestkernel/conn_builder.h | 2 +- nestkernel/conn_builder_conngen.cpp | 10 +- nestkernel/connection.h | 2 +- nestkernel/connection_creator.cpp | 14 +- nestkernel/connection_creator.h | 62 ++-- nestkernel/connection_manager.cpp | 207 ++++++------ nestkernel/connector_base.cpp | 3 +- nestkernel/connector_model.cpp | 2 +- nestkernel/connector_model_impl.h | 18 +- nestkernel/delay_checker.cpp | 14 +- nestkernel/eprop_archiving_node.h | 2 +- nestkernel/eprop_archiving_node_recurrent.h | 2 +- nestkernel/event.cpp | 2 +- nestkernel/event_delivery_manager.cpp | 196 ++++++----- nestkernel/event_delivery_manager.h | 30 +- nestkernel/free_layer.h | 12 +- nestkernel/kernel_manager.cpp | 40 +-- nestkernel/kernel_manager.h | 25 +- nestkernel/layer.cpp | 4 +- nestkernel/layer.h | 4 +- nestkernel/model.cpp | 4 +- nestkernel/model_manager.cpp | 36 +- nestkernel/model_manager.h | 10 +- nestkernel/modelrange_manager.cpp | 2 +- nestkernel/module_manager.cpp | 2 +- nestkernel/mpi_manager.cpp | 15 +- nestkernel/music_event_handler.cpp | 2 +- nestkernel/music_rate_in_handler.cpp | 4 +- nestkernel/nest.cpp | 76 ++--- nestkernel/nest.h | 4 +- nestkernel/nestmodule.cpp | 83 +++-- nestkernel/node.cpp | 6 +- nestkernel/node_collection.cpp | 114 +++---- nestkernel/node_manager.cpp | 116 +++---- nestkernel/parameter.cpp | 14 +- nestkernel/per_thread_bool_indicator.cpp | 18 +- nestkernel/proxynode.cpp | 18 +- nestkernel/random_manager.cpp | 14 +- nestkernel/recording_backend_ascii.cpp | 14 +- nestkernel/recording_backend_memory.cpp | 4 +- nestkernel/recording_backend_mpi.cpp | 18 +- nestkernel/recording_backend_screen.cpp | 2 +- nestkernel/recording_backend_sionlib.cpp | 30 +- nestkernel/recording_device.cpp | 19 +- nestkernel/ring_buffer.cpp | 12 +- nestkernel/ring_buffer.h | 18 +- nestkernel/secondary_event.h | 4 +- nestkernel/send_buffer_position.cpp | 10 +- nestkernel/simulation_manager.cpp | 188 +++++------ nestkernel/slice_ring_buffer.cpp | 10 +- nestkernel/slice_ring_buffer.h | 2 +- nestkernel/sonata_connector.cpp | 22 +- nestkernel/source_table.cpp | 82 ++--- nestkernel/sp_manager.cpp | 84 ++--- nestkernel/sparse_node_array.cpp | 3 +- nestkernel/spatial.cpp | 24 +- nestkernel/stimulation_backend_mpi.cpp | 24 +- nestkernel/stimulation_device.cpp | 6 +- nestkernel/stopwatch.h | 28 +- nestkernel/synaptic_element.cpp | 6 +- nestkernel/target_identifier.h | 4 +- nestkernel/target_table.cpp | 10 +- nestkernel/target_table_devices.cpp | 36 +- nestkernel/target_table_devices.h | 8 +- nestkernel/universal_data_logger.h | 32 +- nestkernel/vp_manager.cpp | 20 +- nestkernel/vp_manager.h | 18 +- 199 files changed, 1815 insertions(+), 1523 deletions(-) create mode 100644 doc/htmldoc/sg_execution_times.rst diff --git a/CMakeLists.txt b/CMakeLists.txt index 137a952507..d486f04cb4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -118,6 +118,9 @@ include( CheckExtraCompilerFeatures ) include( ConfigureSummary ) include( GetTriple ) +# enable link-time optimizations +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) + # get triples arch-vendor-os get_host_triple( NEST_HOST_TRIPLE NEST_HOST_ARCH NEST_HOST_VENDOR NEST_HOST_OS ) get_target_triple( NEST_TARGET_TRIPLE NEST_TARGET_ARCH NEST_TARGET_VENDOR NEST_TARGET_OS ) diff --git a/doc/htmldoc/sg_execution_times.rst b/doc/htmldoc/sg_execution_times.rst new file mode 100644 index 0000000000..a414a4e7b4 --- /dev/null +++ b/doc/htmldoc/sg_execution_times.rst @@ -0,0 +1,355 @@ + +:orphan: + +.. _sphx_glr_sg_execution_times: + + +Computation times +================= +**00:00.000** total execution time for 107 files **from all galleries**: + +.. container:: + + .. raw:: html + + + + + + + + .. list-table:: + :header-rows: 1 + :class: table table-striped sg-datatable + + * - Example + - Time + - Mem (MB) + * - :ref:`sphx_glr_auto_examples_BrodyHopfield.py` (``../../pynest/examples/BrodyHopfield.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_CampbellSiegert.py` (``../../pynest/examples/CampbellSiegert.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_helper.py` (``../../pynest/examples/EI_clustered_network/helper.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_network.py` (``../../pynest/examples/EI_clustered_network/network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_network_params.py` (``../../pynest/examples/EI_clustered_network/network_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_run_simulation.py` (``../../pynest/examples/EI_clustered_network/run_simulation.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_sim_params.py` (``../../pynest/examples/EI_clustered_network/sim_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_EI_clustered_network_stimulus_params.py` (``../../pynest/examples/EI_clustered_network/stimulus_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_helpers.py` (``../../pynest/examples/Potjans_2014/helpers.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_network.py` (``../../pynest/examples/Potjans_2014/network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_network_params.py` (``../../pynest/examples/Potjans_2014/network_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_run_microcircuit.py` (``../../pynest/examples/Potjans_2014/run_microcircuit.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_sim_params.py` (``../../pynest/examples/Potjans_2014/sim_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_Potjans_2014_stimulus_params.py` (``../../pynest/examples/Potjans_2014/stimulus_params.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_aeif_cond_beta_multisynapse.py` (``../../pynest/examples/aeif_cond_beta_multisynapse.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_astrocytes_astrocyte_brunel_bernoulli.py` (``../../pynest/examples/astrocytes/astrocyte_brunel_bernoulli.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_astrocytes_astrocyte_brunel_fixed_indegree.py` (``../../pynest/examples/astrocytes/astrocyte_brunel_fixed_indegree.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_astrocytes_astrocyte_interaction.py` (``../../pynest/examples/astrocytes/astrocyte_interaction.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_astrocytes_astrocyte_single.py` (``../../pynest/examples/astrocytes/astrocyte_single.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_astrocytes_astrocyte_small_network.py` (``../../pynest/examples/astrocytes/astrocyte_small_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_balancedneuron.py` (``../../pynest/examples/balancedneuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brette_gerstner_fig_2c.py` (``../../pynest/examples/brette_gerstner_fig_2c.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brette_gerstner_fig_3d.py` (``../../pynest/examples/brette_gerstner_fig_3d.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brunel_alpha_evolution_strategies.py` (``../../pynest/examples/brunel_alpha_evolution_strategies.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brunel_alpha_nest.py` (``../../pynest/examples/brunel_alpha_nest.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brunel_delta_nest.py` (``../../pynest/examples/brunel_delta_nest.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brunel_exp_multisynapse_nest.py` (``../../pynest/examples/brunel_exp_multisynapse_nest.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_brunel_siegert_nest.py` (``../../pynest/examples/brunel_siegert_nest.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_clopath_synapse_small_network.py` (``../../pynest/examples/clopath_synapse_small_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_clopath_synapse_spike_pairing.py` (``../../pynest/examples/clopath_synapse_spike_pairing.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_compartmental_model_receptors_and_current.py` (``../../pynest/examples/compartmental_model/receptors_and_current.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_compartmental_model_two_comps.py` (``../../pynest/examples/compartmental_model/two_comps.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_correlospinmatrix_detector_two_neuron.py` (``../../pynest/examples/correlospinmatrix_detector_two_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_cross_check_mip_corrdet.py` (``../../pynest/examples/cross_check_mip_corrdet.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_csa_example.py` (``../../pynest/examples/csa_example.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_csa_spatial_example.py` (``../../pynest/examples/csa_spatial_example.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_classification_evidence-accumulation_bsshslm_2020.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_classification_neuromorphic_mnist.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_classification_neuromorphic_mnist.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_regression_handwriting_bsshslm_2020.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_regression_handwriting_bsshslm_2020.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_regression_lemniscate_bsshslm_2020.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_regression_lemniscate_bsshslm_2020.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_regression_sine-waves.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_eprop_plasticity_eprop_supervised_regression_sine-waves_bsshslm_2020.py` (``../../pynest/examples/eprop_plasticity/eprop_supervised_regression_sine-waves_bsshslm_2020.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_evaluate_quantal_stp_synapse.py` (``../../pynest/examples/evaluate_quantal_stp_synapse.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_evaluate_tsodyks2_synapse.py` (``../../pynest/examples/evaluate_tsodyks2_synapse.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_gap_junctions_inhibitory_network.py` (``../../pynest/examples/gap_junctions_inhibitory_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_gap_junctions_two_neurons.py` (``../../pynest/examples/gap_junctions_two_neurons.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_gif_cond_exp_multisynapse.py` (``../../pynest/examples/gif_cond_exp_multisynapse.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_gif_pop_psc_exp.py` (``../../pynest/examples/gif_pop_psc_exp.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_gif_population.py` (``../../pynest/examples/gif_population.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_glif_cond_neuron.py` (``../../pynest/examples/glif_cond_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_glif_psc_double_alpha_neuron.py` (``../../pynest/examples/glif_psc_double_alpha_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_glif_psc_neuron.py` (``../../pynest/examples/glif_psc_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_hh_phaseplane.py` (``../../pynest/examples/hh_phaseplane.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_hh_psc_alpha.py` (``../../pynest/examples/hh_psc_alpha.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_hpc_benchmark.py` (``../../pynest/examples/hpc_benchmark.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_iaf_tum_2000_short_term_depression.py` (``../../pynest/examples/iaf_tum_2000_short_term_depression.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_iaf_tum_2000_short_term_facilitation.py` (``../../pynest/examples/iaf_tum_2000_short_term_facilitation.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_if_curve.py` (``../../pynest/examples/if_curve.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_intrinsic_currents_spiking.py` (``../../pynest/examples/intrinsic_currents_spiking.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_intrinsic_currents_subthreshold.py` (``../../pynest/examples/intrinsic_currents_subthreshold.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_lin_rate_ipn_network.py` (``../../pynest/examples/lin_rate_ipn_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_mc_neuron.py` (``../../pynest/examples/mc_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_multimeter_file.py` (``../../pynest/examples/multimeter_file.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_music_cont_out_proxy_example_nest_script.py` (``../../pynest/examples/music_cont_out_proxy_example/nest_script.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_music_cont_out_proxy_example_receiver_script.py` (``../../pynest/examples/music_cont_out_proxy_example/receiver_script.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_one_neuron.py` (``../../pynest/examples/one_neuron.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_one_neuron_with_noise.py` (``../../pynest/examples/one_neuron_with_noise.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_plot_weight_matrices.py` (``../../pynest/examples/plot_weight_matrices.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_pong_generate_gif.py` (``../../pynest/examples/pong/generate_gif.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_pong_networks.py` (``../../pynest/examples/pong/networks.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_pong_pong.py` (``../../pynest/examples/pong/pong.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_pong_run_simulations.py` (``../../pynest/examples/pong/run_simulations.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_precise_spiking.py` (``../../pynest/examples/precise_spiking.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_pulsepacket.py` (``../../pynest/examples/pulsepacket.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_rate_neuron_dm.py` (``../../pynest/examples/rate_neuron_dm.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_recording_demo.py` (``../../pynest/examples/recording_demo.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_repeated_stimulation.py` (``../../pynest/examples/repeated_stimulation.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sensitivity_to_perturbation.py` (``../../pynest/examples/sensitivity_to_perturbation.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sinusoidal_gamma_generator.py` (``../../pynest/examples/sinusoidal_gamma_generator.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sinusoidal_poisson_generator.py` (``../../pynest/examples/sinusoidal_poisson_generator.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sonata_example_sonata_network.py` (``../../pynest/examples/sonata_example/sonata_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_conncomp.py` (``../../pynest/examples/spatial/conncomp.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_conncon_sources.py` (``../../pynest/examples/spatial/conncon_sources.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_conncon_targets.py` (``../../pynest/examples/spatial/conncon_targets.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_connex.py` (``../../pynest/examples/spatial/connex.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_connex_ew.py` (``../../pynest/examples/spatial/connex_ew.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_ctx_2n.py` (``../../pynest/examples/spatial/ctx_2n.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_gaussex.py` (``../../pynest/examples/spatial/gaussex.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_grid_iaf.py` (``../../pynest/examples/spatial/grid_iaf.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_grid_iaf_irr.py` (``../../pynest/examples/spatial/grid_iaf_irr.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_grid_iaf_oc.py` (``../../pynest/examples/spatial/grid_iaf_oc.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_nodes_source_target.py` (``../../pynest/examples/spatial/nodes_source_target.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_test_3d.py` (``../../pynest/examples/spatial/test_3d.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_test_3d_exp.py` (``../../pynest/examples/spatial/test_3d_exp.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_spatial_test_3d_gauss.py` (``../../pynest/examples/spatial/test_3d_gauss.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_store_restore_network.py` (``../../pynest/examples/store_restore_network.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_structural_plasticity.py` (``../../pynest/examples/structural_plasticity.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sudoku_helpers_sudoku.py` (``../../pynest/examples/sudoku/helpers_sudoku.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sudoku_plot_progress.py` (``../../pynest/examples/sudoku/plot_progress.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sudoku_sudoku_net.py` (``../../pynest/examples/sudoku/sudoku_net.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_sudoku_sudoku_solver.py` (``../../pynest/examples/sudoku/sudoku_solver.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_synapsecollection.py` (``../../pynest/examples/synapsecollection.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_testiaf.py` (``../../pynest/examples/testiaf.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_twoneurons.py` (``../../pynest/examples/twoneurons.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_urbanczik_synapse_example.py` (``../../pynest/examples/urbanczik_synapse_example.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_vinit_example.py` (``../../pynest/examples/vinit_example.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_auto_examples_wang_decision_making.py` (``../../pynest/examples/wang_decision_making.py``) + - 00:00.000 + - 0.0 diff --git a/libnestutil/dict_util.h b/libnestutil/dict_util.h index 398e99fe6b..715bb54c84 100644 --- a/libnestutil/dict_util.h +++ b/libnestutil/dict_util.h @@ -53,8 +53,8 @@ updateValueParam( DictionaryDatum const& d, Name const n, VT& value, nest::Node* { throw BadParameter( "Cannot use Parameter with this model." ); } - auto vp = kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ); - auto tid = kernel::manager< VPManager >().vp_to_thread( vp ); + auto vp = kernel::manager< VPManager >.node_id_to_vp( node->get_node_id() ); + auto tid = kernel::manager< VPManager >.vp_to_thread( vp ); auto rng = get_vp_specific_rng( tid ); value = pd->get()->value( rng, node ); return true; diff --git a/libnestutil/logging.h b/libnestutil/logging.h index 858723a24d..38820a34ca 100644 --- a/libnestutil/logging.h +++ b/libnestutil/logging.h @@ -27,19 +27,19 @@ * */ #define LOG( s, fctn, msg ) \ - nest::kernel::manager< LoggingManager >().publish_log( ( s ), ( fctn ), ( msg ), __FILE__, __LINE__ ) + nest::kernel::manager< LoggingManager >.publish_log( ( s ), ( fctn ), ( msg ), __FILE__, __LINE__ ) /** * */ #define ALL_ENTRIES_ACCESSED( d, fctn, msg ) \ - nest::kernel::manager< LoggingManager >().all_entries_accessed( ( d ), ( fctn ), ( msg ), __FILE__, __LINE__ ) + nest::kernel::manager< LoggingManager >.all_entries_accessed( ( d ), ( fctn ), ( msg ), __FILE__, __LINE__ ) /** * */ -#define ALL_ENTRIES_ACCESSED2( d, fctn, msg1, msg2 ) \ - nest::kernel::manager< LoggingManager >().all_entries_accessed( \ +#define ALL_ENTRIES_ACCESSED2( d, fctn, msg1, msg2 ) \ + nest::kernel::manager< LoggingManager >.all_entries_accessed( \ ( d ), ( fctn ), ( msg1 ), ( msg2 ), __FILE__, __LINE__ ) namespace nest diff --git a/models/ac_generator.cpp b/models/ac_generator.cpp index 4b4ec53cec..c9b9ae5042 100644 --- a/models/ac_generator.cpp +++ b/models/ac_generator.cpp @@ -186,7 +186,7 @@ nest::ac_generator::pre_run_hook() StimulationDevice::pre_run_hook(); const double h = Time::get_resolution().get_ms(); - const double t = kernel::manager< SimulationManager >().get_time().get_ms(); + const double t = kernel::manager< SimulationManager >.get_time().get_ms(); // scale Hz to ms const double omega = 2.0 * numerics::pi * P_.freq_ / 1000.0; @@ -222,7 +222,7 @@ nest::ac_generator::update( Time const& origin, const long from, const long to ) { S_.I_ = S_.y_1_ + P_.offset_; ce.set_current( S_.I_ ); - kernel::manager< EventDeliveryManager >().send( *this, ce, lag ); + kernel::manager< EventDeliveryManager >.send( *this, ce, lag ); } B_.logger_.record_data( origin.get_steps() + lag ); } diff --git a/models/aeif_cond_alpha.cpp b/models/aeif_cond_alpha.cpp index 84f1e180e8..689c3a2a91 100644 --- a/models/aeif_cond_alpha.cpp +++ b/models/aeif_cond_alpha.cpp @@ -510,7 +510,7 @@ nest::aeif_cond_alpha::update( Time const& origin, const long from, const long t set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -539,12 +539,12 @@ nest::aeif_cond_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -557,8 +557,7 @@ nest::aeif_cond_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/aeif_cond_alpha_astro.cpp b/models/aeif_cond_alpha_astro.cpp index 321a94db37..f27e58c1ca 100644 --- a/models/aeif_cond_alpha_astro.cpp +++ b/models/aeif_cond_alpha_astro.cpp @@ -513,7 +513,7 @@ nest::aeif_cond_alpha_astro::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -543,12 +543,12 @@ nest::aeif_cond_alpha_astro::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -561,15 +561,14 @@ nest::aeif_cond_alpha_astro::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void nest::aeif_cond_alpha_astro::handle( SICEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >.get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/aeif_cond_alpha_multisynapse.cpp b/models/aeif_cond_alpha_multisynapse.cpp index 48f611904a..017458b703 100644 --- a/models/aeif_cond_alpha_multisynapse.cpp +++ b/models/aeif_cond_alpha_multisynapse.cpp @@ -549,7 +549,7 @@ aeif_cond_alpha_multisynapse::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -596,7 +596,7 @@ aeif_cond_alpha_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -609,8 +609,7 @@ aeif_cond_alpha_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/aeif_cond_beta_multisynapse.cpp b/models/aeif_cond_beta_multisynapse.cpp index 1b4efdb6b5..ab3d66c0e7 100644 --- a/models/aeif_cond_beta_multisynapse.cpp +++ b/models/aeif_cond_beta_multisynapse.cpp @@ -558,7 +558,7 @@ aeif_cond_beta_multisynapse::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -605,7 +605,7 @@ aeif_cond_beta_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -618,8 +618,7 @@ aeif_cond_beta_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/aeif_cond_exp.cpp b/models/aeif_cond_exp.cpp index 3d925c078c..4bf769848e 100644 --- a/models/aeif_cond_exp.cpp +++ b/models/aeif_cond_exp.cpp @@ -500,7 +500,7 @@ nest::aeif_cond_exp::update( const Time& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -529,12 +529,12 @@ nest::aeif_cond_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -547,8 +547,7 @@ nest::aeif_cond_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_alpha.cpp b/models/aeif_psc_alpha.cpp index bf0e1cf95c..872354b466 100644 --- a/models/aeif_psc_alpha.cpp +++ b/models/aeif_psc_alpha.cpp @@ -500,7 +500,7 @@ nest::aeif_psc_alpha::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -529,12 +529,12 @@ nest::aeif_psc_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -547,8 +547,7 @@ nest::aeif_psc_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_delta.cpp b/models/aeif_psc_delta.cpp index 906fa72bca..de6d3164b0 100644 --- a/models/aeif_psc_delta.cpp +++ b/models/aeif_psc_delta.cpp @@ -500,7 +500,7 @@ nest::aeif_psc_delta::update( const Time& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -522,7 +522,7 @@ nest::aeif_psc_delta::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -535,8 +535,7 @@ nest::aeif_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_delta_clopath.cpp b/models/aeif_psc_delta_clopath.cpp index 1989def896..300b2cd939 100644 --- a/models/aeif_psc_delta_clopath.cpp +++ b/models/aeif_psc_delta_clopath.cpp @@ -548,7 +548,7 @@ nest::aeif_psc_delta_clopath::update( const Time& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } else if ( S_.clamp_r_ == 1 ) { @@ -601,7 +601,7 @@ nest::aeif_psc_delta_clopath::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -614,8 +614,7 @@ nest::aeif_psc_delta_clopath::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/aeif_psc_exp.cpp b/models/aeif_psc_exp.cpp index c158ef8b86..728e07dd25 100644 --- a/models/aeif_psc_exp.cpp +++ b/models/aeif_psc_exp.cpp @@ -490,7 +490,7 @@ nest::aeif_psc_exp::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -518,12 +518,12 @@ nest::aeif_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -536,8 +536,7 @@ nest::aeif_psc_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/amat2_psc_exp.cpp b/models/amat2_psc_exp.cpp index 5c724a8baa..c1ed99a101 100644 --- a/models/amat2_psc_exp.cpp +++ b/models/amat2_psc_exp.cpp @@ -406,7 +406,7 @@ nest::amat2_psc_exp::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else @@ -431,12 +431,12 @@ nest::amat2_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -450,8 +450,7 @@ nest::amat2_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/astrocyte_lr_1994.cpp b/models/astrocyte_lr_1994.cpp index 838b10a741..74dd52405c 100644 --- a/models/astrocyte_lr_1994.cpp +++ b/models/astrocyte_lr_1994.cpp @@ -369,7 +369,7 @@ nest::astrocyte_lr_1994::init_buffers_() B_.spike_exc_.clear(); // includes resize B_.currents_.clear(); B_.sic_values.resize( - kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); // set size of SIC buffer according to min_delay + kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ); // set size of SIC buffer according to min_delay B_.logger_.reset(); @@ -485,7 +485,7 @@ nest::astrocyte_lr_1994::update( Time const& origin, const long from, const long // send SIC event SICEvent sic; sic.set_coeffarray( B_.sic_values ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, sic ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, sic ); } void @@ -495,7 +495,7 @@ nest::astrocyte_lr_1994::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else @@ -512,8 +512,7 @@ nest::astrocyte_lr_1994::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/binary_neuron.h b/models/binary_neuron.h index b0eabd2db4..0ace3c5aae 100644 --- a/models/binary_neuron.h +++ b/models/binary_neuron.h @@ -480,7 +480,7 @@ binary_neuron< TGainfunction >::update( Time const& origin, const long from, con // use multiplicity 2 to signal transition to 1 state // use multiplicity 1 to signal transition to 0 state se.set_multiplicity( new_y ? 2 : 1 ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); // As multiplicity is used only to signal internal information // to other binary neurons, we only set spiketime once, independent @@ -536,21 +536,21 @@ binary_neuron< TGainfunction >::handle( SpikeEvent& e ) // received twice the same node ID, so transition 0->1 // take double weight to compensate for subtracting first event B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), 2.0 * e.get_weight() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), 2.0 * e.get_weight() ); } else { // count this event negatively, assuming it comes as single event // transition 1->0 B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), -e.get_weight() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() ); } } else if ( m == 2 ) { // count this event positively, transition 0->1 B_.spikes_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), e.get_weight() ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() ); } S_.last_in_node_id_ = node_id; @@ -569,8 +569,7 @@ binary_neuron< TGainfunction >::handle( CurrentEvent& e ) // we use the spike buffer to receive the binary events // but also to handle the incoming current events added // both contributions are directly added to the variable h - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } diff --git a/models/cm_default.cpp b/models/cm_default.cpp index 9ecd89c654..c7137b77d2 100644 --- a/models/cm_default.cpp +++ b/models/cm_default.cpp @@ -331,7 +331,7 @@ nest::cm_default::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } logger_.record_data( origin.get_steps() + lag ); @@ -350,7 +350,7 @@ nest::cm_default::handle( SpikeEvent& e ) assert( e.get_rport() < syn_buffers_.size() ); syn_buffers_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -364,7 +364,7 @@ nest::cm_default::handle( CurrentEvent& e ) Compartment* compartment = c_tree_.get_compartment_opt( e.get_rport() ); compartment->currents.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/cont_delay_synapse.h b/models/cont_delay_synapse.h index 3fa7086dd1..5ccc04c4d4 100644 --- a/models/cont_delay_synapse.h +++ b/models/cont_delay_synapse.h @@ -288,14 +288,14 @@ cont_delay_synapse< targetidentifierT >::set_status( const DictionaryDatum& d, C if ( frac_delay == 0 ) { - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( delay ); set_delay_steps( Time::delay_ms_to_steps( delay ) ); delay_offset_ = 0.0; } else { const long lowerbound = static_cast< long >( int_delay ); - kernel::manager< ConnectionManager >().get_delay_checker().assert_two_valid_delays_steps( + kernel::manager< ConnectionManager >.get_delay_checker().assert_two_valid_delays_steps( lowerbound, lowerbound + 1 ); set_delay_steps( lowerbound + 1 ); delay_offset_ = h * ( 1.0 - frac_delay ); diff --git a/models/correlomatrix_detector.cpp b/models/correlomatrix_detector.cpp index 4ada5dd4a1..3f4aa557e8 100644 --- a/models/correlomatrix_detector.cpp +++ b/models/correlomatrix_detector.cpp @@ -325,7 +325,7 @@ nest::correlomatrix_detector::handle( SpikeEvent& e ) // throw away all spikes which are too old to // enter the correlation window - const long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); + const long min_delay = kernel::manager< ConnectionManager >.get_min_delay(); while ( not otherSpikes.empty() and ( spike_i - otherSpikes.front().timestep_ ) >= tau_edge + min_delay ) { otherSpikes.pop_front(); diff --git a/models/correlospinmatrix_detector.cpp b/models/correlospinmatrix_detector.cpp index 761171fc8b..119df6a805 100644 --- a/models/correlospinmatrix_detector.cpp +++ b/models/correlospinmatrix_detector.cpp @@ -402,7 +402,7 @@ nest::correlospinmatrix_detector::handle( SpikeEvent& e ) } const double tau_edge = P_.tau_max_.get_steps() + P_.delta_tau_.get_steps(); - const long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); + const long min_delay = kernel::manager< ConnectionManager >.get_min_delay(); while ( not otherPulses.empty() and ( t_min_on - otherPulses.front().t_off_ ) >= tau_edge + min_delay ) { otherPulses.pop_front(); diff --git a/models/dc_generator.cpp b/models/dc_generator.cpp index e3bbec73da..3a784e9961 100644 --- a/models/dc_generator.cpp +++ b/models/dc_generator.cpp @@ -175,7 +175,7 @@ nest::dc_generator::update( Time const& origin, const long from, const long to ) if ( StimulationDevice::is_active( Time::step( start + offs ) ) ) { S_.I_ = P_.amp_; - kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >.send( *this, ce, offs ); } B_.logger_.record_data( origin.get_steps() + offs ); } diff --git a/models/eprop_iaf.cpp b/models/eprop_iaf.cpp index fbac3c950a..e254b01d08 100644 --- a/models/eprop_iaf.cpp +++ b/models/eprop_iaf.cpp @@ -306,7 +306,7 @@ eprop_iaf::update( Time const& origin, const long from, const long to ) if ( S_.v_m_ >= P_.V_th_ and S_.r_ == 0 ) { SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); S_.z_ = 1.0; S_.v_m_ -= P_.V_th_ * S_.z_; @@ -334,7 +334,7 @@ eprop_iaf::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -343,7 +343,7 @@ eprop_iaf::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/eprop_iaf_adapt.cpp b/models/eprop_iaf_adapt.cpp index 8399b981dd..2c2a0a3cce 100644 --- a/models/eprop_iaf_adapt.cpp +++ b/models/eprop_iaf_adapt.cpp @@ -345,7 +345,7 @@ eprop_iaf_adapt::update( Time const& origin, const long from, const long to ) if ( S_.v_m_ >= S_.v_th_adapt_ and S_.r_ == 0 ) { SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); S_.z_ = 1.0; S_.v_m_ -= P_.V_th_ * S_.z_; @@ -373,7 +373,7 @@ eprop_iaf_adapt::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -382,7 +382,7 @@ eprop_iaf_adapt::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/eprop_iaf_adapt_bsshslm_2020.cpp b/models/eprop_iaf_adapt_bsshslm_2020.cpp index f100867005..f2c04f539f 100644 --- a/models/eprop_iaf_adapt_bsshslm_2020.cpp +++ b/models/eprop_iaf_adapt_bsshslm_2020.cpp @@ -299,8 +299,8 @@ eprop_iaf_adapt_bsshslm_2020::pre_run_hook() void eprop_iaf_adapt_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); - const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >.get_eprop_reset_neurons_on_update(); const long shift = get_shift(); for ( long lag = from; lag < to; ++lag ) @@ -346,7 +346,7 @@ eprop_iaf_adapt_bsshslm_2020::update( Time const& origin, const long from, const count_spike(); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); S_.z_ = 1.0; S_.r_ = V_.RefractoryCounts_; @@ -378,7 +378,7 @@ eprop_iaf_adapt_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -387,7 +387,7 @@ eprop_iaf_adapt_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } @@ -454,8 +454,8 @@ eprop_iaf_adapt_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis } presyn_isis.clear(); - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); - const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >.get_eprop_learning_window().get_steps(); const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval ); grad += firing_rate_reg * sum_e; diff --git a/models/eprop_iaf_bsshslm_2020.cpp b/models/eprop_iaf_bsshslm_2020.cpp index 3f6dc1d378..f0645d664d 100644 --- a/models/eprop_iaf_bsshslm_2020.cpp +++ b/models/eprop_iaf_bsshslm_2020.cpp @@ -264,8 +264,8 @@ eprop_iaf_bsshslm_2020::pre_run_hook() void eprop_iaf_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); - const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >.get_eprop_reset_neurons_on_update(); const long shift = get_shift(); for ( long lag = from; lag < to; ++lag ) @@ -306,7 +306,7 @@ eprop_iaf_bsshslm_2020::update( Time const& origin, const long from, const long count_spike(); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); S_.z_ = 1.0; S_.r_ = V_.RefractoryCounts_; @@ -338,7 +338,7 @@ eprop_iaf_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -347,7 +347,7 @@ eprop_iaf_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } @@ -412,8 +412,8 @@ eprop_iaf_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis, } presyn_isis.clear(); - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); - const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >.get_eprop_learning_window().get_steps(); const auto firing_rate_reg = get_firing_rate_reg_history( t_previous_update + get_shift() + update_interval ); grad += firing_rate_reg * sum_e; diff --git a/models/eprop_iaf_psc_delta.cpp b/models/eprop_iaf_psc_delta.cpp index e9a19b86ef..846e5df0b1 100644 --- a/models/eprop_iaf_psc_delta.cpp +++ b/models/eprop_iaf_psc_delta.cpp @@ -341,7 +341,7 @@ eprop_iaf_psc_delta::update( Time const& origin, const long from, const long to S_.v_m_ = P_.V_reset_; SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); z = 1.0; } @@ -367,7 +367,7 @@ eprop_iaf_psc_delta::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -376,7 +376,7 @@ eprop_iaf_psc_delta::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/eprop_iaf_psc_delta_adapt.cpp b/models/eprop_iaf_psc_delta_adapt.cpp index 13b43acbe4..b0fe277aa4 100644 --- a/models/eprop_iaf_psc_delta_adapt.cpp +++ b/models/eprop_iaf_psc_delta_adapt.cpp @@ -380,7 +380,7 @@ eprop_iaf_psc_delta_adapt::update( Time const& origin, const long from, const lo S_.v_m_ = P_.V_reset_; SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); S_.z_ = 1.0; } @@ -406,7 +406,7 @@ eprop_iaf_psc_delta_adapt::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -415,7 +415,7 @@ eprop_iaf_psc_delta_adapt::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/eprop_readout.cpp b/models/eprop_readout.cpp index eb946125f6..2ed54e3bcf 100644 --- a/models/eprop_readout.cpp +++ b/models/eprop_readout.cpp @@ -215,7 +215,7 @@ eprop_readout::pre_run_hook() void eprop_readout::update( Time const& origin, const long from, const long to ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); std::vector< double > error_signal_buffer( buffer_size, 0.0 ); @@ -247,7 +247,7 @@ eprop_readout::update( Time const& origin, const long from, const long to ) LearningSignalConnectionEvent error_signal_event; error_signal_event.set_coeffarray( error_signal_buffer ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, error_signal_event ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, error_signal_event ); return; } @@ -283,7 +283,7 @@ eprop_readout::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -292,7 +292,7 @@ eprop_readout::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/eprop_readout.h b/models/eprop_readout.h index dce5951585..566010591c 100644 --- a/models/eprop_readout.h +++ b/models/eprop_readout.h @@ -517,7 +517,7 @@ eprop_readout::handles_test_event( CurrentEvent&, size_t receptor_type ) inline size_t eprop_readout::handles_test_event( DelayedRateConnectionEvent& e, size_t receptor_type ) { - size_t step_rate_model_id = kernel::manager< ModelManager >().get_node_model_id( "step_rate_generator" ); + size_t step_rate_model_id = kernel::manager< ModelManager >.get_node_model_id( "step_rate_generator" ); size_t model_id = e.get_sender().get_model_id(); if ( step_rate_model_id == model_id and receptor_type != TARGET_SIG and receptor_type != LEARNING_WINDOW_SIG ) diff --git a/models/eprop_readout_bsshslm_2020.cpp b/models/eprop_readout_bsshslm_2020.cpp index 7aa5a5994b..ca0489fb2b 100644 --- a/models/eprop_readout_bsshslm_2020.cpp +++ b/models/eprop_readout_bsshslm_2020.cpp @@ -231,12 +231,12 @@ eprop_readout_bsshslm_2020::pre_run_hook() void eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const long to ) { - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); - const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); - const bool with_reset = kernel::manager< SimulationManager >().get_eprop_reset_neurons_on_update(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); + const long learning_window = kernel::manager< SimulationManager >.get_eprop_learning_window().get_steps(); + const bool with_reset = kernel::manager< SimulationManager >.get_eprop_reset_neurons_on_update(); const long shift = get_shift(); - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); std::vector< double > error_signal_buffer( buffer_size, 0.0 ); std::vector< double > readout_signal_unnorm_buffer( buffer_size, 0.0 ); @@ -290,7 +290,7 @@ eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const l LearningSignalConnectionEvent error_signal_event; error_signal_event.set_coeffarray( error_signal_buffer ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, error_signal_event ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, error_signal_event ); if ( V_.signal_to_other_readouts_ ) { @@ -299,7 +299,7 @@ eprop_readout_bsshslm_2020::update( Time const& origin, const long from, const l // in the next times step for computing the normalized readout signal DelayedRateConnectionEvent readout_signal_unnorm_event; readout_signal_unnorm_event.set_coeffarray( readout_signal_unnorm_buffer ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, readout_signal_unnorm_event ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, readout_signal_unnorm_event ); } return; } @@ -356,7 +356,7 @@ eprop_readout_bsshslm_2020::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -365,7 +365,7 @@ eprop_readout_bsshslm_2020::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } @@ -408,7 +408,7 @@ eprop_readout_bsshslm_2020::compute_gradient( std::vector< long >& presyn_isis, } presyn_isis.clear(); - const long learning_window = kernel::manager< SimulationManager >().get_eprop_learning_window().get_steps(); + const long learning_window = kernel::manager< SimulationManager >.get_eprop_learning_window().get_steps(); if ( average_gradient ) { grad /= learning_window; diff --git a/models/eprop_readout_bsshslm_2020.h b/models/eprop_readout_bsshslm_2020.h index 677ff85529..34d96ecbeb 100644 --- a/models/eprop_readout_bsshslm_2020.h +++ b/models/eprop_readout_bsshslm_2020.h @@ -525,7 +525,7 @@ eprop_readout_bsshslm_2020::handles_test_event( CurrentEvent&, size_t receptor_t inline size_t eprop_readout_bsshslm_2020::handles_test_event( DelayedRateConnectionEvent& e, size_t receptor_type ) { - size_t step_rate_model_id = kernel::manager< ModelManager >().get_node_model_id( "step_rate_generator" ); + size_t step_rate_model_id = kernel::manager< ModelManager >.get_node_model_id( "step_rate_generator" ); size_t model_id = e.get_sender().get_model_id(); if ( step_rate_model_id == model_id and receptor_type != TARGET_SIG ) diff --git a/models/eprop_synapse.cpp b/models/eprop_synapse.cpp index 8b5d87db4d..7ecb3dc071 100644 --- a/models/eprop_synapse.cpp +++ b/models/eprop_synapse.cpp @@ -73,7 +73,7 @@ EpropSynapseCommonProperties::set_status( const DictionaryDatum& d, ConnectorMod const bool set_optimizer = updateValue< std::string >( optimizer_dict, names::type, new_optimizer ); if ( set_optimizer and new_optimizer != optimizer_cp_->get_name() ) { - if ( kernel::manager< ConnectionManager >().get_num_connections( cm.get_syn_id() ) > 0 ) + if ( kernel::manager< ConnectionManager >.get_num_connections( cm.get_syn_id() ) > 0 ) { throw BadParameter( "The optimizer cannot be changed because synapses have been created." ); } diff --git a/models/eprop_synapse_bsshslm_2020.cpp b/models/eprop_synapse_bsshslm_2020.cpp index 1f7e41a4dd..e32ba0d827 100644 --- a/models/eprop_synapse_bsshslm_2020.cpp +++ b/models/eprop_synapse_bsshslm_2020.cpp @@ -78,7 +78,7 @@ EpropSynapseBSSHSLM2020CommonProperties::set_status( const DictionaryDatum& d, C const bool set_optimizer = updateValue< std::string >( optimizer_dict, names::type, new_optimizer ); if ( set_optimizer and new_optimizer != optimizer_cp_->get_name() ) { - if ( kernel::manager< ConnectionManager >().get_num_connections( cm.get_syn_id() ) > 0 ) + if ( kernel::manager< ConnectionManager >.get_num_connections( cm.get_syn_id() ) > 0 ) { throw BadParameter( "The optimizer cannot be changed because synapses have been created." ); } diff --git a/models/eprop_synapse_bsshslm_2020.h b/models/eprop_synapse_bsshslm_2020.h index 5d404cd0f6..1ecf264b8d 100644 --- a/models/eprop_synapse_bsshslm_2020.h +++ b/models/eprop_synapse_bsshslm_2020.h @@ -408,7 +408,7 @@ eprop_synapse_bsshslm_2020< targetidentifierT >::eprop_synapse_bsshslm_2020( con , weight_( es.weight_ ) , t_spike_previous_( 0 ) , t_previous_update_( 0 ) - , t_next_update_( kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps() ) + , t_next_update_( kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps() ) , t_previous_trigger_spike_( 0 ) , tau_m_readout_( es.tau_m_readout_ ) , kappa_( std::exp( -Time::get_resolution().get_ms() / tau_m_readout_ ) ) @@ -524,7 +524,7 @@ eprop_synapse_bsshslm_2020< targetidentifierT >::send( Event& e, assert( target ); const long t_spike = e.get_stamp().get_steps(); - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); const long shift = target->get_shift(); const long interval_step = ( t_spike - shift ) % update_interval; diff --git a/models/gamma_sup_generator.cpp b/models/gamma_sup_generator.cpp index fef9caf2bf..46b19fb549 100644 --- a/models/gamma_sup_generator.cpp +++ b/models/gamma_sup_generator.cpp @@ -246,7 +246,7 @@ nest::gamma_sup_generator::update( Time const& T, const long from, const long to } DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } diff --git a/models/gif_cond_exp.cpp b/models/gif_cond_exp.cpp index 92a29fa9bd..72ed2c0240 100644 --- a/models/gif_cond_exp.cpp +++ b/models/gif_cond_exp.cpp @@ -556,7 +556,7 @@ nest::gif_cond_exp::update( Time const& origin, const long from, const long to ) // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } @@ -585,12 +585,12 @@ nest::gif_cond_exp::handle( SpikeEvent& e ) // is clumsy and should be improved. if ( e.get_weight() >= 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } // keep conductance positive } @@ -604,8 +604,7 @@ nest::gif_cond_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/gif_cond_exp_multisynapse.cpp b/models/gif_cond_exp_multisynapse.cpp index 14bb0fba8c..9eb2151a80 100644 --- a/models/gif_cond_exp_multisynapse.cpp +++ b/models/gif_cond_exp_multisynapse.cpp @@ -563,7 +563,7 @@ nest::gif_cond_exp_multisynapse::update( Time const& origin, const long from, co // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } @@ -595,7 +595,7 @@ nest::gif_cond_exp_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -608,8 +608,7 @@ nest::gif_cond_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/gif_pop_psc_exp.cpp b/models/gif_pop_psc_exp.cpp index 25ef1ffa81..d797167efb 100644 --- a/models/gif_pop_psc_exp.cpp +++ b/models/gif_pop_psc_exp.cpp @@ -640,7 +640,7 @@ nest::gif_pop_psc_exp::update( Time const& origin, const long from, const long t SpikeEvent* se; se = new SpikeEvent; se->set_multiplicity( S_.n_spikes_ ); - kernel::manager< EventDeliveryManager >().send( *this, *se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, *se, lag ); } } } @@ -654,11 +654,11 @@ gif_pop_psc_exp::handle( SpikeEvent& e ) if ( s > 0.0 ) { - B_.ex_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), s ); + B_.ex_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), s ); } else { - B_.in_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), s ); + B_.in_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), s ); } } @@ -671,8 +671,7 @@ nest::gif_pop_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/gif_psc_exp.cpp b/models/gif_psc_exp.cpp index 880d51733d..97613fb54d 100644 --- a/models/gif_psc_exp.cpp +++ b/models/gif_psc_exp.cpp @@ -378,7 +378,7 @@ nest::gif_psc_exp::update( Time const& origin, const long from, const long to ) // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } @@ -408,12 +408,12 @@ nest::gif_psc_exp::handle( SpikeEvent& e ) // is clumsy and should be improved. if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -427,8 +427,7 @@ nest::gif_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/gif_psc_exp_multisynapse.cpp b/models/gif_psc_exp_multisynapse.cpp index 0b9894ba13..368a30b145 100644 --- a/models/gif_psc_exp_multisynapse.cpp +++ b/models/gif_psc_exp_multisynapse.cpp @@ -405,7 +405,7 @@ nest::gif_psc_exp_multisynapse::update( Time const& origin, const long from, con // And send the spike event set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } @@ -431,7 +431,7 @@ gif_psc_exp_multisynapse::handle( SpikeEvent& e ) assert( ( e.get_rport() > 0 ) and ( ( size_t ) e.get_rport() <= P_.n_receptors_() ) ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -444,8 +444,7 @@ nest::gif_psc_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/glif_cond.cpp b/models/glif_cond.cpp index 6532b611f6..fe308d3dab 100644 --- a/models/glif_cond.cpp +++ b/models/glif_cond.cpp @@ -737,7 +737,7 @@ nest::glif_cond::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else @@ -788,7 +788,7 @@ nest::glif_cond::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -797,7 +797,7 @@ nest::glif_cond::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/glif_psc.cpp b/models/glif_psc.cpp index 4595481a2a..3308389605 100644 --- a/models/glif_psc.cpp +++ b/models/glif_psc.cpp @@ -561,7 +561,7 @@ glif_psc::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else diff --git a/models/glif_psc.h b/models/glif_psc.h index a5ede89930..d0d54a0001 100644 --- a/models/glif_psc.h +++ b/models/glif_psc.h @@ -485,7 +485,7 @@ glif_psc::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -494,7 +494,7 @@ glif_psc::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/glif_psc_double_alpha.cpp b/models/glif_psc_double_alpha.cpp index 08114d2b59..a520e8f598 100644 --- a/models/glif_psc_double_alpha.cpp +++ b/models/glif_psc_double_alpha.cpp @@ -626,7 +626,7 @@ nest::glif_psc_double_alpha::update( Time const& origin, const long from, const set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else @@ -684,7 +684,7 @@ nest::glif_psc_double_alpha::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -693,6 +693,6 @@ nest::glif_psc_double_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/hh_cond_beta_gap_traub.cpp b/models/hh_cond_beta_gap_traub.cpp index 85fc85adf4..b46f4630ad 100644 --- a/models/hh_cond_beta_gap_traub.cpp +++ b/models/hh_cond_beta_gap_traub.cpp @@ -104,7 +104,7 @@ hh_cond_beta_gap_traub_dynamics( double time, const double y[], double f[], void const double t = time / node.B_.step_; - switch ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() ) + switch ( kernel::manager< SimulationManager >.get_wfr_interpolation_order() ) { case 0: gap = -node.B_.sumj_g_ij_ * y[ S::V_M ] + node.B_.interpolation_coefficients[ node.B_.lag_ ]; @@ -342,7 +342,7 @@ nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub( const hh_cond_beta_gap_traub& n ) @@ -351,7 +351,7 @@ nest::hh_cond_beta_gap_traub::hh_cond_beta_gap_traub( const hh_cond_beta_gap_tra , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } nest::hh_cond_beta_gap_traub::~hh_cond_beta_gap_traub() @@ -392,12 +392,12 @@ nest::hh_cond_beta_gap_traub::init_buffers_() // per min_delay step) // resize interpolation_coefficients depending on interpolation order - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() - * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay() + * ( kernel::manager< SimulationManager >.get_wfr_interpolation_order() + 1 ); B_.interpolation_coefficients.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); + B_.last_y_values.resize( kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ); B_.sumj_g_ij_ = 0.0; @@ -475,13 +475,13 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t interpolation_order = kernel::manager< SimulationManager >().get_wfr_interpolation_order(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t interpolation_order = kernel::manager< SimulationManager >.get_wfr_interpolation_order(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store the new interpolation coefficients // to be sent by gap event - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() * ( interpolation_order + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay() * ( interpolation_order + 1 ); std::vector< double > new_coefficients( buffer_size, 0.0 ); // parameters needed for piecewise interpolation @@ -554,7 +554,7 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // log state data @@ -614,13 +614,13 @@ nest::hh_cond_beta_gap_traub::update_( Time const& origin, new_coefficients[ temp * ( interpolation_order + 1 ) + 0 ] = S_.y_[ State_::V_M ]; } - std::vector< double >( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ).swap( B_.last_y_values ); + std::vector< double >( kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ).swap( B_.last_y_values ); } // Send gap-event GapJunctionEvent ge; ge.set_coeffarray( new_coefficients ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, ge ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, ge ); // Reset variables B_.sumj_g_ij_ = 0.0; @@ -636,14 +636,14 @@ nest::hh_cond_beta_gap_traub::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { // add with negative weight, ie positive value, since we are changing a // conductance - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -657,8 +657,7 @@ nest::hh_cond_beta_gap_traub::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/hh_cond_exp_traub.cpp b/models/hh_cond_exp_traub.cpp index 2b354b7e1b..bf6234f2c7 100644 --- a/models/hh_cond_exp_traub.cpp +++ b/models/hh_cond_exp_traub.cpp @@ -428,7 +428,7 @@ nest::hh_cond_exp_traub::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -447,14 +447,14 @@ nest::hh_cond_exp_traub::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { // add with negative weight, ie positive value, since we are changing a // conductance - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -468,8 +468,7 @@ nest::hh_cond_exp_traub::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha.cpp b/models/hh_psc_alpha.cpp index 21c1d1c9a1..67891295d2 100644 --- a/models/hh_psc_alpha.cpp +++ b/models/hh_psc_alpha.cpp @@ -437,7 +437,7 @@ nest::hh_psc_alpha::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // log state data @@ -455,12 +455,12 @@ nest::hh_psc_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -473,8 +473,7 @@ nest::hh_psc_alpha::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha_clopath.cpp b/models/hh_psc_alpha_clopath.cpp index b0cff794c6..2869733464 100644 --- a/models/hh_psc_alpha_clopath.cpp +++ b/models/hh_psc_alpha_clopath.cpp @@ -474,7 +474,7 @@ nest::hh_psc_alpha_clopath::update( Time const& origin, const long from, const l set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // log state data @@ -492,12 +492,12 @@ nest::hh_psc_alpha_clopath::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } // current input, keep negative weight } @@ -511,8 +511,7 @@ nest::hh_psc_alpha_clopath::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/hh_psc_alpha_gap.cpp b/models/hh_psc_alpha_gap.cpp index be62091213..5e9d07dd2f 100644 --- a/models/hh_psc_alpha_gap.cpp +++ b/models/hh_psc_alpha_gap.cpp @@ -112,7 +112,7 @@ hh_psc_alpha_gap_dynamics( double time, const double y[], double f[], void* pnod const double t = time / node.B_.step_; - switch ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() ) + switch ( kernel::manager< SimulationManager >.get_wfr_interpolation_order() ) { case 0: gap = -node.B_.sumj_g_ij_ * V + node.B_.interpolation_coefficients[ node.B_.lag_ ]; @@ -333,7 +333,7 @@ nest::hh_psc_alpha_gap::hh_psc_alpha_gap() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } nest::hh_psc_alpha_gap::hh_psc_alpha_gap( const hh_psc_alpha_gap& n ) @@ -342,7 +342,7 @@ nest::hh_psc_alpha_gap::hh_psc_alpha_gap( const hh_psc_alpha_gap& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } nest::hh_psc_alpha_gap::~hh_psc_alpha_gap() @@ -383,12 +383,12 @@ nest::hh_psc_alpha_gap::init_buffers_() // per min_delay step) // resize interpolation_coefficients depending on interpolation order - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() - * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay() + * ( kernel::manager< SimulationManager >.get_wfr_interpolation_order() + 1 ); B_.interpolation_coefficients.resize( buffer_size, 0.0 ); - B_.last_y_values.resize( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); + B_.last_y_values.resize( kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ); B_.sumj_g_ij_ = 0.0; @@ -454,13 +454,13 @@ nest::hh_psc_alpha_gap::pre_run_hook() bool nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long to, const bool called_from_wfr_update ) { - const size_t interpolation_order = kernel::manager< SimulationManager >().get_wfr_interpolation_order(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t interpolation_order = kernel::manager< SimulationManager >.get_wfr_interpolation_order(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store the new interpolation coefficients // to be sent by gap event - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay() * ( interpolation_order + 1 ); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay() * ( interpolation_order + 1 ); std::vector< double > new_coefficients( buffer_size, 0.0 ); // parameters needed for piecewise interpolation @@ -535,7 +535,7 @@ nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // log state data @@ -595,13 +595,13 @@ nest::hh_psc_alpha_gap::update_( Time const& origin, const long from, const long new_coefficients[ temp * ( interpolation_order + 1 ) + 0 ] = S_.y_[ State_::V_M ]; } - std::vector< double >( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ).swap( B_.last_y_values ); + std::vector< double >( kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ).swap( B_.last_y_values ); } // Send gap-event GapJunctionEvent ge; ge.set_coeffarray( new_coefficients ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, ge ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, ge ); // Reset variables B_.sumj_g_ij_ = 0.0; @@ -617,12 +617,12 @@ nest::hh_psc_alpha_gap::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -635,8 +635,7 @@ nest::hh_psc_alpha_gap::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/ht_neuron.cpp b/models/ht_neuron.cpp index 2bc3461c6b..b8e558cf47 100644 --- a/models/ht_neuron.cpp +++ b/models/ht_neuron.cpp @@ -822,7 +822,7 @@ ht_neuron::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } @@ -855,7 +855,7 @@ nest::ht_neuron::handle( SpikeEvent& e ) assert( e.get_rport() < B_.spike_inputs_.size() ); B_.spike_inputs_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -868,8 +868,7 @@ nest::ht_neuron::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/iaf_bw_2001.cpp b/models/iaf_bw_2001.cpp index f1f97f45e8..c86c49c6f5 100644 --- a/models/iaf_bw_2001.cpp +++ b/models/iaf_bw_2001.cpp @@ -407,7 +407,7 @@ nest::iaf_bw_2001::pre_run_hook() void nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) { - std::vector< double > s_vals( kernel::manager< ConnectionManager >().get_min_delay(), 0.0 ); + std::vector< double > s_vals( kernel::manager< ConnectionManager >.get_min_delay(), 0.0 ); for ( long lag = from; lag < to; ++lag ) { double t = 0.0; @@ -473,7 +473,7 @@ nest::iaf_bw_2001::update( Time const& origin, const long from, const long to ) SpikeEvent se; se.set_offset( s_NMDA_delta ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current diff --git a/models/iaf_bw_2001.h b/models/iaf_bw_2001.h index 380ce95d05..9a2cd670e2 100644 --- a/models/iaf_bw_2001.h +++ b/models/iaf_bw_2001.h @@ -535,7 +535,7 @@ iaf_bw_2001::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ); + const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ); const auto rport = e.get_rport(); @@ -554,7 +554,7 @@ iaf_bw_2001::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_bw_2001_exact.cpp b/models/iaf_bw_2001_exact.cpp index 5759956951..1e37dc99fe 100644 --- a/models/iaf_bw_2001_exact.cpp +++ b/models/iaf_bw_2001_exact.cpp @@ -504,7 +504,7 @@ nest::iaf_bw_2001_exact::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current diff --git a/models/iaf_bw_2001_exact.h b/models/iaf_bw_2001_exact.h index a149f0be7c..8f9c6cf964 100644 --- a/models/iaf_bw_2001_exact.h +++ b/models/iaf_bw_2001_exact.h @@ -553,7 +553,7 @@ nest::iaf_bw_2001_exact::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); assert( e.get_rport() <= static_cast< int >( B_.spikes_.size() ) ); - const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ); + const double steps = e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ); const auto rport = e.get_rport(); if ( rport < NMDA ) @@ -584,7 +584,7 @@ nest::iaf_bw_2001_exact::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_chs_2007.cpp b/models/iaf_chs_2007.cpp index 95cd2089af..2d5ab7b3eb 100644 --- a/models/iaf_chs_2007.cpp +++ b/models/iaf_chs_2007.cpp @@ -251,7 +251,7 @@ nest::iaf_chs_2007::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // log state data @@ -266,7 +266,7 @@ nest::iaf_chs_2007::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } diff --git a/models/iaf_chxk_2008.cpp b/models/iaf_chxk_2008.cpp index e671d7e8d2..e3448c5e4e 100644 --- a/models/iaf_chxk_2008.cpp +++ b/models/iaf_chxk_2008.cpp @@ -424,7 +424,7 @@ nest::iaf_chxk_2008::update( Time const& origin, const long from, const long to SpikeEvent se; se.set_offset( dt ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // add incoming spikes @@ -446,12 +446,12 @@ nest::iaf_chxk_2008::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -461,7 +461,7 @@ nest::iaf_chxk_2008::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_cond_alpha.cpp b/models/iaf_cond_alpha.cpp index 5ed389f3a4..abd1449f49 100644 --- a/models/iaf_cond_alpha.cpp +++ b/models/iaf_cond_alpha.cpp @@ -424,7 +424,7 @@ nest::iaf_cond_alpha::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // add incoming spikes @@ -446,12 +446,12 @@ nest::iaf_cond_alpha::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -461,7 +461,7 @@ nest::iaf_cond_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_cond_alpha_mc.cpp b/models/iaf_cond_alpha_mc.cpp index be8debf186..a1c26d2b23 100644 --- a/models/iaf_cond_alpha_mc.cpp +++ b/models/iaf_cond_alpha_mc.cpp @@ -638,7 +638,7 @@ nest::iaf_cond_alpha_mc::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input currents @@ -659,7 +659,7 @@ nest::iaf_cond_alpha_mc::handle( SpikeEvent& e ) assert( e.get_rport() < 2 * NCOMP ); B_.spikes_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -672,7 +672,7 @@ nest::iaf_cond_alpha_mc::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_cond_beta.cpp b/models/iaf_cond_beta.cpp index 0e266b6e34..a44fa3b445 100644 --- a/models/iaf_cond_beta.cpp +++ b/models/iaf_cond_beta.cpp @@ -437,7 +437,7 @@ nest::iaf_cond_beta::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // add incoming spikes @@ -459,12 +459,12 @@ nest::iaf_cond_beta::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } // ensure conductance is positive } @@ -475,7 +475,7 @@ nest::iaf_cond_beta::handle( CurrentEvent& e ) assert( e.get_delay_steps() > 0 ); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/iaf_cond_exp.cpp b/models/iaf_cond_exp.cpp index 5035cd23ab..6ad1841321 100644 --- a/models/iaf_cond_exp.cpp +++ b/models/iaf_cond_exp.cpp @@ -401,7 +401,7 @@ nest::iaf_cond_exp::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -419,12 +419,12 @@ nest::iaf_cond_exp::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -437,8 +437,7 @@ nest::iaf_cond_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/iaf_cond_exp_sfa_rr.cpp b/models/iaf_cond_exp_sfa_rr.cpp index ad2f91f68a..bcbc88e398 100644 --- a/models/iaf_cond_exp_sfa_rr.cpp +++ b/models/iaf_cond_exp_sfa_rr.cpp @@ -440,7 +440,7 @@ nest::iaf_cond_exp_sfa_rr::update( Time const& origin, const long from, const lo S_.y_[ State_::G_RR ] += P_.q_rr; SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -458,12 +458,12 @@ nest::iaf_cond_exp_sfa_rr::handle( SpikeEvent& e ) if ( e.get_weight() > 0.0 ) { - B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_exc_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spike_inh_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), -e.get_weight() * e.get_multiplicity() ); } } @@ -476,8 +476,7 @@ nest::iaf_cond_exp_sfa_rr::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_alpha.cpp b/models/iaf_psc_alpha.cpp index 6a5df69b85..93c8a9cc02 100644 --- a/models/iaf_psc_alpha.cpp +++ b/models/iaf_psc_alpha.cpp @@ -325,7 +325,7 @@ iaf_psc_alpha::update( Time const& origin, const long from, const long to ) S_.dI_ex_ *= V_.P11_ex_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // Apply spikes delivered in this step; spikes arriving at T+1 have @@ -354,7 +354,7 @@ iaf_psc_alpha::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -373,8 +373,8 @@ iaf_psc_alpha::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); @@ -387,8 +387,8 @@ iaf_psc_alpha::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); const double I = e.get_current(); const double w = e.get_weight(); diff --git a/models/iaf_psc_alpha_multisynapse.cpp b/models/iaf_psc_alpha_multisynapse.cpp index 8d258ff3b8..98b7b29ed2 100644 --- a/models/iaf_psc_alpha_multisynapse.cpp +++ b/models/iaf_psc_alpha_multisynapse.cpp @@ -369,7 +369,7 @@ iaf_psc_alpha_multisynapse::update( Time const& origin, const long from, const l set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -398,7 +398,7 @@ iaf_psc_alpha_multisynapse::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -411,8 +411,7 @@ iaf_psc_alpha_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/iaf_psc_alpha_ps.cpp b/models/iaf_psc_alpha_ps.cpp index 2a203ebd53..555984799a 100644 --- a/models/iaf_psc_alpha_ps.cpp +++ b/models/iaf_psc_alpha_ps.cpp @@ -472,7 +472,7 @@ nest::iaf_psc_alpha_ps::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -488,7 +488,7 @@ nest::iaf_psc_alpha_ps::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_.add_value( - e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void @@ -553,7 +553,7 @@ nest::iaf_psc_alpha_ps::emit_spike_( Time const& origin, const long lag, const d set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); return; } @@ -575,7 +575,7 @@ nest::iaf_psc_alpha_ps::emit_instant_spike_( Time const& origin, const long lag, set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); return; } diff --git a/models/iaf_psc_delta.cpp b/models/iaf_psc_delta.cpp index 2e2aa63ba3..c86805ae52 100644 --- a/models/iaf_psc_delta.cpp +++ b/models/iaf_psc_delta.cpp @@ -318,7 +318,7 @@ nest::iaf_psc_delta::update( Time const& origin, const long from, const long to set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -338,7 +338,7 @@ nest::iaf_psc_delta::handle( SpikeEvent& e ) // explicity, since it depends on delay and offset within // the update cycle. The way it is done here works, but // is clumsy and should be improved. - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -351,8 +351,7 @@ nest::iaf_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_delta_ps.cpp b/models/iaf_psc_delta_ps.cpp index f9de090d64..1952be9743 100644 --- a/models/iaf_psc_delta_ps.cpp +++ b/models/iaf_psc_delta_ps.cpp @@ -479,7 +479,7 @@ nest::iaf_psc_delta_ps::emit_spike_( Time const& origin, const long lag, const d set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } void @@ -499,7 +499,7 @@ nest::iaf_psc_delta_ps::emit_instant_spike_( Time const& origin, const long lag, set_spiketime( Time::step( S_.last_spike_step_ ), S_.last_spike_offset_ ); SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } void @@ -512,7 +512,7 @@ iaf_psc_delta_ps::handle( SpikeEvent& e ) in the queue. The time is computed according to Time Memo, Rule 3. */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -527,8 +527,7 @@ iaf_psc_delta_ps::handle( CurrentEvent& e ) const double w = e.get_weight(); // add stepwise constant current; MH 2009-10-14 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } diff --git a/models/iaf_psc_exp.cpp b/models/iaf_psc_exp.cpp index 73b9c998fa..afbbda2c03 100644 --- a/models/iaf_psc_exp.cpp +++ b/models/iaf_psc_exp.cpp @@ -311,7 +311,7 @@ nest::iaf_psc_exp::update( const Time& origin, const long from, const long to ) S_.i_syn_ex_ += ( 1. - V_.P11ex_ ) * S_.i_1_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // the spikes arriving at T+1 have an immediate effect on the state of the @@ -332,7 +332,7 @@ nest::iaf_psc_exp::update( const Time& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -352,8 +352,8 @@ nest::iaf_psc_exp::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); @@ -369,8 +369,8 @@ nest::iaf_psc_exp::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); if ( 0 == e.get_rport() ) { diff --git a/models/iaf_psc_exp_htum.cpp b/models/iaf_psc_exp_htum.cpp index 106262c382..4a90e1cd25 100644 --- a/models/iaf_psc_exp_htum.cpp +++ b/models/iaf_psc_exp_htum.cpp @@ -330,7 +330,7 @@ nest::iaf_psc_exp_htum::update( Time const& origin, const long from, const long set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else @@ -355,12 +355,12 @@ nest::iaf_psc_exp_htum::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -374,8 +374,7 @@ nest::iaf_psc_exp_htum::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/iaf_psc_exp_multisynapse.cpp b/models/iaf_psc_exp_multisynapse.cpp index 166c230b97..6e74eda8a7 100644 --- a/models/iaf_psc_exp_multisynapse.cpp +++ b/models/iaf_psc_exp_multisynapse.cpp @@ -335,7 +335,7 @@ iaf_psc_exp_multisynapse::update( const Time& origin, const long from, const lon set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -364,7 +364,7 @@ iaf_psc_exp_multisynapse::handle( SpikeEvent& e ) assert( e.get_delay_steps() > 0 ); B_.spikes_[ e.get_rport() - 1 ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -377,8 +377,7 @@ iaf_psc_exp_multisynapse::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * I ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * I ); } void diff --git a/models/iaf_psc_exp_ps.cpp b/models/iaf_psc_exp_ps.cpp index b1e662f858..6e1749ff44 100644 --- a/models/iaf_psc_exp_ps.cpp +++ b/models/iaf_psc_exp_ps.cpp @@ -441,7 +441,7 @@ nest::iaf_psc_exp_ps::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -457,7 +457,7 @@ nest::iaf_psc_exp_ps::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_.add_value( - e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void @@ -516,7 +516,7 @@ nest::iaf_psc_exp_ps::emit_spike_( const Time& origin, const long lag, const dou SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } void @@ -537,7 +537,7 @@ nest::iaf_psc_exp_ps::emit_instant_spike_( const Time& origin, const long lag, c SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } double diff --git a/models/iaf_psc_exp_ps_lossless.cpp b/models/iaf_psc_exp_ps_lossless.cpp index d928bea16b..ece9106858 100644 --- a/models/iaf_psc_exp_ps_lossless.cpp +++ b/models/iaf_psc_exp_ps_lossless.cpp @@ -485,7 +485,7 @@ nest::iaf_psc_exp_ps_lossless::handle( SpikeEvent& e ) */ const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), Tdeliver, e.get_offset(), e.get_weight() * e.get_multiplicity() ); @@ -501,7 +501,7 @@ nest::iaf_psc_exp_ps_lossless::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_.add_value( - e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void @@ -559,7 +559,7 @@ nest::iaf_psc_exp_ps_lossless::emit_spike_( const Time& origin, const long lag, SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } void @@ -580,7 +580,7 @@ nest::iaf_psc_exp_ps_lossless::emit_instant_spike_( const Time& origin, const lo SpikeEvent se; se.set_offset( S_.last_spike_offset_ ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } double diff --git a/models/iaf_tum_2000.cpp b/models/iaf_tum_2000.cpp index c9491a7b23..c6de4e0afe 100644 --- a/models/iaf_tum_2000.cpp +++ b/models/iaf_tum_2000.cpp @@ -364,7 +364,7 @@ nest::iaf_tum_2000::update( const Time& origin, const long from, const long to ) S_.i_syn_ex_ += ( 1. - V_.P11ex_ ) * S_.i_1_; // get read access to the correct input-buffer slot - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( lag ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( lag ); auto& input = B_.input_buffer_.get_values_all_channels( input_buffer_slot ); // the spikes arriving at T+1 have an immediate effect on the state of the @@ -427,7 +427,7 @@ nest::iaf_tum_2000::update( const Time& origin, const long from, const long to ) // send spike with datafield SpikeEvent se; se.set_offset( delta_y_tsp ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -447,8 +447,8 @@ nest::iaf_tum_2000::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); // Multiply with datafield from SpikeEvent to apply depression/facilitation computed by presynaptic neuron double s = e.get_weight() * e.get_multiplicity(); @@ -470,8 +470,8 @@ nest::iaf_tum_2000::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); if ( 0 == e.get_rport() ) { diff --git a/models/ignore_and_fire.cpp b/models/ignore_and_fire.cpp index 2ba28827d7..bd29772ee2 100644 --- a/models/ignore_and_fire.cpp +++ b/models/ignore_and_fire.cpp @@ -181,7 +181,7 @@ ignore_and_fire::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } else { @@ -198,8 +198,8 @@ ignore_and_fire::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); const double s = e.get_weight() * e.get_multiplicity(); // separate buffer channels for excitatory and inhibitory inputs @@ -211,8 +211,8 @@ ignore_and_fire::handle( CurrentEvent& e ) { assert( e.get_delay_steps() > 0 ); - const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >().get_modulo( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ) ); + const size_t input_buffer_slot = kernel::manager< EventDeliveryManager >.get_modulo( + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ) ); const double I = e.get_current(); const double w = e.get_weight(); diff --git a/models/inhomogeneous_poisson_generator.cpp b/models/inhomogeneous_poisson_generator.cpp index 4f272d3e46..27a8f0d5d0 100644 --- a/models/inhomogeneous_poisson_generator.cpp +++ b/models/inhomogeneous_poisson_generator.cpp @@ -82,7 +82,7 @@ nest::inhomogeneous_poisson_generator::Parameters_::assert_valid_rate_time_and_i { Time t_rate; - if ( t <= kernel::manager< SimulationManager >().get_time().get_ms() ) + if ( t <= kernel::manager< SimulationManager >.get_time().get_ms() ) { throw BadProperty( "Time points must lie strictly in the future." ); } @@ -269,7 +269,7 @@ nest::inhomogeneous_poisson_generator::update( Time const& origin, const long fr if ( B_.rate_ > 0 and StimulationDevice::is_active( Time::step( curr_time ) ) ) { DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, offs ); + kernel::manager< EventDeliveryManager >.send( *this, se, offs ); } } } diff --git a/models/izhikevich.cpp b/models/izhikevich.cpp index 9bebc04240..667665be66 100644 --- a/models/izhikevich.cpp +++ b/models/izhikevich.cpp @@ -229,7 +229,7 @@ nest::izhikevich::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } // set new input current @@ -244,7 +244,7 @@ void nest::izhikevich::handle( SpikeEvent& e ) { assert( e.get_delay_steps() > 0 ); - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -255,8 +255,7 @@ nest::izhikevich::handle( CurrentEvent& e ) const double c = e.get_current(); const double w = e.get_weight(); - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/jonke_synapse.h b/models/jonke_synapse.h index 3c5573bb55..ccfcfdff3f 100644 --- a/models/jonke_synapse.h +++ b/models/jonke_synapse.h @@ -329,7 +329,7 @@ jonke_synapse< targetidentifierT >::send( Event& e, size_t t, const JonkeCommonP ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/mat2_psc_exp.cpp b/models/mat2_psc_exp.cpp index 277d007d9f..41b29b864d 100644 --- a/models/mat2_psc_exp.cpp +++ b/models/mat2_psc_exp.cpp @@ -339,7 +339,7 @@ nest::mat2_psc_exp::update( Time const& origin, const long from, const long to ) set_spiketime( Time::step( origin.get_steps() + lag + 1 ) ); SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } else @@ -364,12 +364,12 @@ nest::mat2_psc_exp::handle( SpikeEvent& e ) if ( e.get_weight() >= 0.0 ) { - B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_ex_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } else { - B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_in_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } } @@ -383,8 +383,7 @@ nest::mat2_psc_exp::handle( CurrentEvent& e ) const double w = e.get_weight(); // add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/mip_generator.cpp b/models/mip_generator.cpp index 4fe8944df6..b4b9d56299 100644 --- a/models/mip_generator.cpp +++ b/models/mip_generator.cpp @@ -141,7 +141,7 @@ nest::mip_generator::update( Time const& T, const long from, const long to ) DSSpikeEvent se; se.set_multiplicity( n_parent_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } diff --git a/models/multimeter.cpp b/models/multimeter.cpp index 1f4e01f111..68e86360a4 100644 --- a/models/multimeter.cpp +++ b/models/multimeter.cpp @@ -206,7 +206,7 @@ multimeter::update( Time const& origin, const long from, const long ) // // Note that not all nodes receiving the request will necessarily answer. DataLoggingRequest req; - kernel::manager< EventDeliveryManager >().send( *this, req ); + kernel::manager< EventDeliveryManager >.send( *this, req ); } void diff --git a/models/multimeter.h b/models/multimeter.h index 6fab67cd03..a50f8799a8 100644 --- a/models/multimeter.h +++ b/models/multimeter.h @@ -247,7 +247,7 @@ nest::multimeter::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >.get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/music_cont_in_proxy.cpp b/models/music_cont_in_proxy.cpp index 91c9b4a8b2..b367c999ad 100644 --- a/models/music_cont_in_proxy.cpp +++ b/models/music_cont_in_proxy.cpp @@ -134,7 +134,7 @@ nest::music_cont_in_proxy::pre_run_hook() // only publish the port once if ( not S_.published_ ) { - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_cont_out_proxy.cpp b/models/music_cont_out_proxy.cpp index ef2f42afc7..2c0313ca41 100644 --- a/models/music_cont_out_proxy.cpp +++ b/models/music_cont_out_proxy.cpp @@ -242,16 +242,16 @@ nest::music_cont_out_proxy::pre_run_hook() // only publish the output port once, if ( S_.published_ == false ) { - const size_t synmodel_id = kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ); + const size_t synmodel_id = kernel::manager< ModelManager >.get_synapse_model_id( "static_synapse" ); std::vector< MUSIC::GlobalIndex > music_index_map; DictionaryDatum dummy_params = new Dictionary(); for ( size_t i = 0; i < P_.targets_->size(); ++i ) { const size_t tnode_id = ( *P_.targets_ )[ i ]; - if ( kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { - kernel::manager< ConnectionManager >().connect( get_node_id(), tnode_id, dummy_params, synmodel_id ); + kernel::manager< ConnectionManager >.connect( get_node_id(), tnode_id, dummy_params, synmodel_id ); for ( size_t j = 0; j < P_.record_from_.size(); ++j ) { @@ -260,7 +260,7 @@ nest::music_cont_out_proxy::pre_run_hook() } } - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); @@ -323,7 +323,7 @@ nest::music_cont_out_proxy::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >.get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { @@ -359,7 +359,7 @@ nest::music_cont_out_proxy::update( Time const& origin, const long from, const l // // Note that not all nodes receiving the request will necessarily answer. DataLoggingRequest req; - kernel::manager< EventDeliveryManager >().send( *this, req ); + kernel::manager< EventDeliveryManager >.send( *this, req ); } void diff --git a/models/music_event_in_proxy.cpp b/models/music_event_in_proxy.cpp index 04977d053e..fb22303ae4 100644 --- a/models/music_event_in_proxy.cpp +++ b/models/music_event_in_proxy.cpp @@ -102,7 +102,7 @@ nest::music_event_in_proxy::music_event_in_proxy() , S_() { // Register port for the model so it is available as default - kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( P_.port_name_ ); } nest::music_event_in_proxy::music_event_in_proxy( const music_event_in_proxy& n ) @@ -111,7 +111,7 @@ nest::music_event_in_proxy::music_event_in_proxy( const music_event_in_proxy& n , S_( n.S_ ) { // Register port for node instance because MusicManager manages ports via reference count - kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( P_.port_name_ ); } @@ -130,7 +130,7 @@ nest::music_event_in_proxy::pre_run_hook() // register my port and my channel at the scheduler if ( not S_.registered_ ) { - kernel::manager< MUSICManager >().register_music_event_in_proxy( P_.port_name_, P_.channel_, this ); + kernel::manager< MUSICManager >.register_music_event_in_proxy( P_.port_name_, P_.channel_, this ); S_.registered_ = true; } } @@ -152,8 +152,8 @@ nest::music_event_in_proxy::set_status( const DictionaryDatum& d ) stmp.set( d, P_ ); // throws if BadProperty // if we get here, temporaries contain consistent set of properties - kernel::manager< MUSICManager >().unregister_music_in_port( P_.port_name_ ); - kernel::manager< MUSICManager >().register_music_in_port( ptmp.port_name_ ); + kernel::manager< MUSICManager >.unregister_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( ptmp.port_name_ ); P_ = ptmp; S_ = stmp; @@ -164,9 +164,9 @@ nest::music_event_in_proxy::handle( SpikeEvent& e ) { e.set_sender( *this ); - for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >.get_num_threads(); ++t ) { - kernel::manager< ConnectionManager >().send_from_device( t, local_device_id_, e ); + kernel::manager< ConnectionManager >.send_from_device( t, local_device_id_, e ); } } diff --git a/models/music_event_out_proxy.cpp b/models/music_event_out_proxy.cpp index 3cb24ed72a..8f73dffe5a 100644 --- a/models/music_event_out_proxy.cpp +++ b/models/music_event_out_proxy.cpp @@ -137,7 +137,7 @@ nest::music_event_out_proxy::pre_run_hook() // only publish the output port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_message_in_proxy.cpp b/models/music_message_in_proxy.cpp index 05ed6b3daf..b0a02f8923 100644 --- a/models/music_message_in_proxy.cpp +++ b/models/music_message_in_proxy.cpp @@ -131,7 +131,7 @@ nest::music_message_in_proxy::pre_run_hook() // only publish the port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( get_name() ); diff --git a/models/music_rate_in_proxy.cpp b/models/music_rate_in_proxy.cpp index aa955c542b..34dad05c27 100644 --- a/models/music_rate_in_proxy.cpp +++ b/models/music_rate_in_proxy.cpp @@ -108,7 +108,7 @@ nest::music_rate_in_proxy::music_rate_in_proxy() , S_() { // Register port for the model so it is available as default - kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( P_.port_name_ ); } nest::music_rate_in_proxy::music_rate_in_proxy( const music_rate_in_proxy& n ) @@ -117,7 +117,7 @@ nest::music_rate_in_proxy::music_rate_in_proxy( const music_rate_in_proxy& n ) , S_( n.S_ ) { // Register port for node instance because MusicManager manages ports via reference count - kernel::manager< MUSICManager >().register_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( P_.port_name_ ); } @@ -136,7 +136,7 @@ nest::music_rate_in_proxy::pre_run_hook() // only publish the port once if ( not S_.registered_ ) { - kernel::manager< MUSICManager >().register_music_rate_in_proxy( P_.port_name_, P_.channel_, this ); + kernel::manager< MUSICManager >.register_music_rate_in_proxy( P_.port_name_, P_.channel_, this ); S_.registered_ = true; } } @@ -160,8 +160,8 @@ nest::music_rate_in_proxy::set_status( const DictionaryDatum& d ) stmp.set( d, P_ ); // throws if BadProperty // if we get here, temporaries contain consistent set of properties - kernel::manager< MUSICManager >().unregister_music_in_port( P_.port_name_ ); - kernel::manager< MUSICManager >().register_music_in_port( ptmp.port_name_ ); + kernel::manager< MUSICManager >.unregister_music_in_port( P_.port_name_ ); + kernel::manager< MUSICManager >.register_music_in_port( ptmp.port_name_ ); P_ = ptmp; S_ = stmp; } @@ -174,7 +174,7 @@ nest::music_rate_in_proxy::update( Time const&, const long, const long ) void nest::music_rate_in_proxy::handle( InstantaneousRateConnectionEvent& e ) { - kernel::manager< EventDeliveryManager >().send_secondary( *this, e ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, e ); } diff --git a/models/music_rate_out_proxy.cpp b/models/music_rate_out_proxy.cpp index 116752330a..fa15336aa8 100644 --- a/models/music_rate_out_proxy.cpp +++ b/models/music_rate_out_proxy.cpp @@ -143,7 +143,7 @@ nest::music_rate_out_proxy::pre_run_hook() // only publish the output port once, if ( not S_.published_ ) { - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { diff --git a/models/noise_generator.cpp b/models/noise_generator.cpp index f6b9fdce29..a4463f7e36 100644 --- a/models/noise_generator.cpp +++ b/models/noise_generator.cpp @@ -241,7 +241,7 @@ nest::noise_generator::pre_run_hook() V_.dt_steps_ = P_.dt_.get_steps(); const double h = Time::get_resolution().get_ms(); - const double t = kernel::manager< SimulationManager >().get_time().get_ms(); + const double t = kernel::manager< SimulationManager >.get_time().get_ms(); // scale Hz to ms const double omega = 2.0 * numerics::pi * P_.freq_ / 1000.0; @@ -337,7 +337,7 @@ nest::noise_generator::update( Time const& origin, const long from, const long t B_.logger_.record_data( origin.get_steps() + offs ); DSCurrentEvent ce; - kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >.send( *this, ce, offs ); } } diff --git a/models/noise_generator.h b/models/noise_generator.h index 16aa3d1db8..1192008fa0 100644 --- a/models/noise_generator.h +++ b/models/noise_generator.h @@ -298,7 +298,7 @@ class noise_generator : public StimulationDevice inline size_t noise_generator::handles_test_event( DataLoggingRequest& dlr, size_t receptor_type ) { - if ( kernel::manager< VPManager >().get_num_threads() > 1 ) + if ( kernel::manager< VPManager >.get_num_threads() > 1 ) { throw KernelException( "Recording from a noise_generator is only possible in single-threaded mode." ); } diff --git a/models/parrot_neuron.cpp b/models/parrot_neuron.cpp index 0d06f4ba6e..63a8cd6cb8 100644 --- a/models/parrot_neuron.cpp +++ b/models/parrot_neuron.cpp @@ -58,7 +58,7 @@ parrot_neuron::update( Time const& origin, const long from, const long to ) // create a new SpikeEvent, set its multiplicity and send it SpikeEvent se; se.set_multiplicity( current_spikes_n ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); // set the spike times, respecting the multiplicity for ( unsigned long i = 0; i < current_spikes_n; i++ ) @@ -87,7 +87,7 @@ parrot_neuron::handle( SpikeEvent& e ) // Repeat only spikes incoming on port 0, port 1 will be ignored if ( 0 == e.get_rport() ) { - B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } } diff --git a/models/parrot_neuron_ps.cpp b/models/parrot_neuron_ps.cpp index 8a7cb8aec0..28baee5e92 100644 --- a/models/parrot_neuron_ps.cpp +++ b/models/parrot_neuron_ps.cpp @@ -80,7 +80,7 @@ parrot_neuron_ps::update( Time const& origin, long const from, long const to ) SpikeEvent se; se.set_multiplicity( multiplicity ); se.set_offset( ev_offset ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); for ( unsigned long i = 0; i < multiplicity; ++i ) { @@ -117,7 +117,7 @@ parrot_neuron_ps::handle( SpikeEvent& e ) const long Tdeliver = e.get_stamp().get_steps() + e.get_delay_steps() - 1; // parrot ignores weight of incoming connection, store multiplicity - B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >().get_slice_origin() ), + B_.events_.add_spike( e.get_rel_delivery_steps( nest::kernel::manager< SimulationManager >.get_slice_origin() ), Tdeliver, e.get_offset(), static_cast< double >( e.get_multiplicity() ) ); diff --git a/models/poisson_generator.cpp b/models/poisson_generator.cpp index e6edc49814..a9eae0928c 100644 --- a/models/poisson_generator.cpp +++ b/models/poisson_generator.cpp @@ -137,7 +137,7 @@ nest::poisson_generator::update( Time const& T, const long from, const long to ) } DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } diff --git a/models/poisson_generator_ps.cpp b/models/poisson_generator_ps.cpp index a0ea31ae37..7a05266ed1 100644 --- a/models/poisson_generator_ps.cpp +++ b/models/poisson_generator_ps.cpp @@ -204,7 +204,7 @@ nest::poisson_generator_ps::update( Time const& T, const long from, const long t // the event hook then sends out the real spikes with offgrid timing // We pretend to send at T+from DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, from ); + kernel::manager< EventDeliveryManager >.send( *this, se, from ); } } diff --git a/models/pp_cond_exp_mc_urbanczik.cpp b/models/pp_cond_exp_mc_urbanczik.cpp index 2d7195df54..96e6399b20 100644 --- a/models/pp_cond_exp_mc_urbanczik.cpp +++ b/models/pp_cond_exp_mc_urbanczik.cpp @@ -662,7 +662,7 @@ nest::pp_cond_exp_mc_urbanczik::update( Time const& origin, const long from, con // And send the spike event SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); // Set spike time in order to make plasticity rules work for ( unsigned int i = 0; i < n_spikes; i++ ) @@ -699,7 +699,7 @@ nest::pp_cond_exp_mc_urbanczik::handle( SpikeEvent& e ) assert( e.get_rport() < 2 * NCOMP ); B_.spikes_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -712,7 +712,7 @@ nest::pp_cond_exp_mc_urbanczik::handle( CurrentEvent& e ) // add weighted current; HEP 2002-10-04 B_.currents_[ e.get_rport() ].add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_current() ); } diff --git a/models/pp_psc_delta.cpp b/models/pp_psc_delta.cpp index 3dc68dabc5..becc9fa572 100644 --- a/models/pp_psc_delta.cpp +++ b/models/pp_psc_delta.cpp @@ -430,7 +430,7 @@ nest::pp_psc_delta::update( Time const& origin, const long from, const long to ) // And send the spike event SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); // set spike time for STDP to work, // see https://github.com/nest/nest-simulator/issues/77 @@ -469,7 +469,7 @@ nest::pp_psc_delta::handle( SpikeEvent& e ) // explicitly, since it depends on delay and offset within // the update cycle. The way it is done here works, but // is clumsy and should be improved. - B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), e.get_weight() * e.get_multiplicity() ); } @@ -482,8 +482,7 @@ nest::pp_psc_delta::handle( CurrentEvent& e ) const double w = e.get_weight(); // Add weighted current; HEP 2002-10-04 - B_.currents_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), w * c ); + B_.currents_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), w * c ); } void diff --git a/models/ppd_sup_generator.cpp b/models/ppd_sup_generator.cpp index e74851b98e..b6cf313207 100644 --- a/models/ppd_sup_generator.cpp +++ b/models/ppd_sup_generator.cpp @@ -262,7 +262,7 @@ nest::ppd_sup_generator::update( Time const& T, const long from, const long to ) } DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } diff --git a/models/pulsepacket_generator.cpp b/models/pulsepacket_generator.cpp index b3d55b9fd5..350892dddb 100644 --- a/models/pulsepacket_generator.cpp +++ b/models/pulsepacket_generator.cpp @@ -147,7 +147,7 @@ nest::pulsepacket_generator::pre_run_hook() V_.tolerance = 1.0; } - const double now = ( kernel::manager< SimulationManager >().get_time() ).get_ms(); + const double now = ( kernel::manager< SimulationManager >.get_time() ).get_ms(); V_.start_center_idx_ = 0; V_.stop_center_idx_ = 0; @@ -225,7 +225,7 @@ nest::pulsepacket_generator::update( Time const& T, const long, const long to ) { SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, prev_spike - T.get_steps() ); + kernel::manager< EventDeliveryManager >.send( *this, se, prev_spike - T.get_steps() ); n_spikes = 0; } } diff --git a/models/rate_neuron_ipn.h b/models/rate_neuron_ipn.h index bf53e5dad9..224ae32ed3 100644 --- a/models/rate_neuron_ipn.h +++ b/models/rate_neuron_ipn.h @@ -537,7 +537,7 @@ rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } template < class TNonlinearities > @@ -548,7 +548,7 @@ rate_neuron_ipn< TNonlinearities >::rate_neuron_ipn( const rate_neuron_ipn& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } /* ---------------------------------------------------------------- @@ -563,7 +563,7 @@ rate_neuron_ipn< TNonlinearities >::init_buffers_() B_.delayed_rates_in_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); B_.instant_rates_ex_.resize( buffer_size, 0.0 ); B_.instant_rates_in_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -614,8 +614,8 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -704,7 +704,7 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -726,7 +726,7 @@ rate_neuron_ipn< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); @@ -778,7 +778,7 @@ void rate_neuron_ipn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >.get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/rate_neuron_opn.h b/models/rate_neuron_opn.h index 44dc22538b..38a4d27800 100644 --- a/models/rate_neuron_opn.h +++ b/models/rate_neuron_opn.h @@ -517,7 +517,7 @@ nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } template < class TNonlinearities > @@ -527,7 +527,7 @@ nest::rate_neuron_opn< TNonlinearities >::rate_neuron_opn( const rate_neuron_opn , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } /* ---------------------------------------------------------------- @@ -542,7 +542,7 @@ nest::rate_neuron_opn< TNonlinearities >::init_buffers_() B_.delayed_rates_in_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); B_.instant_rates_ex_.resize( buffer_size, 0.0 ); B_.instant_rates_in_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -585,8 +585,8 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -672,7 +672,7 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -694,7 +694,7 @@ nest::rate_neuron_opn< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ex_ ); @@ -746,7 +746,7 @@ void nest::rate_neuron_opn< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >.get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/rate_transformer_node.h b/models/rate_transformer_node.h index 61a65fc32d..3b414e03f5 100644 --- a/models/rate_transformer_node.h +++ b/models/rate_transformer_node.h @@ -406,7 +406,7 @@ nest::rate_transformer_node< TNonlinearities >::rate_transformer_node() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } template < class TNonlinearities > @@ -416,7 +416,7 @@ nest::rate_transformer_node< TNonlinearities >::rate_transformer_node( const rat , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); } /* ---------------------------------------------------------------- @@ -430,7 +430,7 @@ nest::rate_transformer_node< TNonlinearities >::init_buffers_() B_.delayed_rates_.clear(); // includes resize // resize buffers - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); B_.instant_rates_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -456,8 +456,8 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -511,7 +511,7 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, // to avoid accumulation in the buffers of the receiving neurons. DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, drve ); // clear last_y_values std::vector< double >( buffer_size, 0.0 ).swap( B_.last_y_values ); @@ -526,7 +526,7 @@ nest::rate_transformer_node< TNonlinearities >::update_( Time const& origin, // Send rate-neuron-event InstantaneousRateConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.instant_rates_ ); @@ -563,7 +563,7 @@ void nest::rate_transformer_node< TNonlinearities >::handle( DelayedRateConnectionEvent& e ) { const double weight = e.get_weight(); - const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >().get_min_delay(); + const long delay = e.get_delay_steps() - kernel::manager< ConnectionManager >.get_min_delay(); size_t i = 0; std::vector< unsigned int >::iterator it = e.begin(); diff --git a/models/siegert_neuron.cpp b/models/siegert_neuron.cpp index 5a1f0c3095..7a42dd1dd7 100644 --- a/models/siegert_neuron.cpp +++ b/models/siegert_neuron.cpp @@ -187,7 +187,7 @@ nest::siegert_neuron::siegert_neuron() , B_( *this ) { recordablesMap_.create(); - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); gsl_w_ = gsl_integration_workspace_alloc( 1000 ); } @@ -197,7 +197,7 @@ nest::siegert_neuron::siegert_neuron( const siegert_neuron& n ) , S_( n.S_ ) , B_( n.B_, *this ) { - Node::set_node_uses_wfr( kernel::manager< SimulationManager >().use_wfr() ); + Node::set_node_uses_wfr( kernel::manager< SimulationManager >.use_wfr() ); gsl_w_ = gsl_integration_workspace_alloc( 1000 ); } @@ -280,7 +280,7 @@ void nest::siegert_neuron::init_buffers_() { // resize buffers - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); B_.drift_input_.resize( buffer_size, 0.0 ); B_.diffusion_input_.resize( buffer_size, 0.0 ); B_.last_y_values.resize( buffer_size, 0.0 ); @@ -308,8 +308,8 @@ nest::siegert_neuron::pre_run_hook() bool nest::siegert_neuron::update_( Time const& origin, const long from, const long to, const bool called_from_wfr_update ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); - const double wfr_tol = kernel::manager< SimulationManager >().get_wfr_tol(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); + const double wfr_tol = kernel::manager< SimulationManager >.get_wfr_tol(); bool wfr_tol_exceeded = false; // allocate memory to store rates to be sent by rate events @@ -353,7 +353,7 @@ nest::siegert_neuron::update_( Time const& origin, const long from, const long t // Send diffusion-event DiffusionConnectionEvent rve; rve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, rve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, rve ); // Reset variables std::vector< double >( buffer_size, 0.0 ).swap( B_.drift_input_ ); diff --git a/models/sinusoidal_gamma_generator.cpp b/models/sinusoidal_gamma_generator.cpp index db5625c85c..646ab7f0c4 100644 --- a/models/sinusoidal_gamma_generator.cpp +++ b/models/sinusoidal_gamma_generator.cpp @@ -248,7 +248,7 @@ nest::sinusoidal_gamma_generator::init_buffers_() StimulationDevice::init_buffers(); B_.logger_.reset(); - std::vector< double >( P_.num_trains_, kernel::manager< SimulationManager >().get_time().get_ms() ).swap( B_.t0_ms_ ); + std::vector< double >( P_.num_trains_, kernel::manager< SimulationManager >.get_time().get_ms() ).swap( B_.t0_ms_ ); std::vector< double >( P_.num_trains_, 0.0 ).swap( B_.Lambda_t0_ ); B_.P_prev_ = P_; } @@ -284,7 +284,7 @@ nest::sinusoidal_gamma_generator::pre_run_hook() V_.h_ = Time::get_resolution().get_ms(); V_.rng_ = get_vp_specific_rng( get_thread() ); - const double t_ms = kernel::manager< SimulationManager >().get_time().get_ms(); + const double t_ms = kernel::manager< SimulationManager >.get_time().get_ms(); // if new connections were created during simulation break, resize accordingly // this is a no-op if no new connections were created @@ -328,14 +328,14 @@ nest::sinusoidal_gamma_generator::update( Time const& origin, const long from, c if ( P_.individual_spike_trains_ ) { DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } else { if ( V_.rng_->drand() < hazard_( 0 ) ) { SpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); B_.t0_ms_[ 0 ] = V_.t_ms_; B_.Lambda_t0_[ 0 ] = 0; } diff --git a/models/sinusoidal_poisson_generator.cpp b/models/sinusoidal_poisson_generator.cpp index 7ee08d3785..4c2cb7c8a8 100644 --- a/models/sinusoidal_poisson_generator.cpp +++ b/models/sinusoidal_poisson_generator.cpp @@ -221,7 +221,7 @@ nest::sinusoidal_poisson_generator::pre_run_hook() // time resolution V_.h_ = Time::get_resolution().get_ms(); - const double t = kernel::manager< SimulationManager >().get_time().get_ms(); + const double t = kernel::manager< SimulationManager >.get_time().get_ms(); // initial state S_.y_0_ = P_.amplitude_ * std::cos( P_.om_ * t + P_.phi_ ); @@ -268,7 +268,7 @@ nest::sinusoidal_poisson_generator::update( Time const& origin, const long from, if ( P_.individual_spike_trains_ ) { DSSpikeEvent se; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } else { @@ -276,7 +276,7 @@ nest::sinusoidal_poisson_generator::update( Time const& origin, const long from, long n_spikes = V_.poisson_dist_( rng, param ); SpikeEvent se; se.set_multiplicity( n_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } // store rate in spks/s diff --git a/models/spike_dilutor.cpp b/models/spike_dilutor.cpp index f37264a6b8..af06c8ce0f 100644 --- a/models/spike_dilutor.cpp +++ b/models/spike_dilutor.cpp @@ -97,7 +97,7 @@ nest::spike_dilutor::init_state_() // This check cannot be done in the copy constructor because that is also used to // create model prototypes. Since spike_dilutor is deprecated anyways, we put this // brute-force solution here. - if ( kernel::manager< VPManager >().get_num_threads() > 1 ) + if ( kernel::manager< VPManager >.get_num_threads() > 1 ) { throw KernelException( "The network contains a spike_dilutor which cannot be used with multiple threads." ); } @@ -140,7 +140,7 @@ nest::spike_dilutor::update( Time const& T, const long from, const long to ) DSSpikeEvent se; se.set_multiplicity( n_mother_spikes ); - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } } } @@ -181,6 +181,6 @@ nest::spike_dilutor::event_hook( DSSpikeEvent& e ) void nest::spike_dilutor::handle( SpikeEvent& e ) { - B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + B_.n_spikes_.add_value( e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } diff --git a/models/spike_generator.cpp b/models/spike_generator.cpp index 182d55ff71..6733b8aa9c 100644 --- a/models/spike_generator.cpp +++ b/models/spike_generator.cpp @@ -387,7 +387,7 @@ nest::spike_generator::update( Time const& sliceT0, const long from, const long long lag = Time( tnext_stamp - sliceT0 ).get_steps() - 1; // all spikes are sent locally, so offset information is always preserved - kernel::manager< EventDeliveryManager >().send( *this, *se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, *se, lag ); delete se; } diff --git a/models/spike_generator.h b/models/spike_generator.h index 38316a503d..767f68df60 100644 --- a/models/spike_generator.h +++ b/models/spike_generator.h @@ -379,7 +379,7 @@ nest::spike_generator::set_status( const DictionaryDatum& d ) } // throws if BadProperty - ptmp.set( d, S_, origin, kernel::manager< SimulationManager >().get_time(), this ); + ptmp.set( d, S_, origin, kernel::manager< SimulationManager >.get_time(), this ); // We now know that ptmp is consistent. We do not write it back // to P_ before we are also sure that the properties to be set diff --git a/models/spike_recorder.cpp b/models/spike_recorder.cpp index 042dd99e7c..426234c395 100644 --- a/models/spike_recorder.cpp +++ b/models/spike_recorder.cpp @@ -83,7 +83,7 @@ nest::spike_recorder::get_status( DictionaryDatum& d ) const // if we are the device on thread 0, also get the data from the siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >.get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/spike_train_injector.cpp b/models/spike_train_injector.cpp index ac5bec800c..b08d699adf 100644 --- a/models/spike_train_injector.cpp +++ b/models/spike_train_injector.cpp @@ -303,7 +303,7 @@ spike_train_injector::pre_run_hook() // is not an exclusive precise spiking model if ( is_off_grid() ) { - kernel::manager< EventDeliveryManager >().set_off_grid_communication( true ); + kernel::manager< EventDeliveryManager >.set_off_grid_communication( true ); LOG( M_INFO, "spike_train_injector::pre_run_hook", "Spike train injector has been configured to emit precisely timed " @@ -377,7 +377,7 @@ spike_train_injector::update( Time const& sliceT0, const long from, const long t // we need to subtract one from stamp which is added again in send() long lag = Time( tnext_stamp - sliceT0 ).get_steps() - 1; - kernel::manager< EventDeliveryManager >().send( *this, se, lag ); + kernel::manager< EventDeliveryManager >.send( *this, se, lag ); } ++S_.position_; diff --git a/models/spike_train_injector.h b/models/spike_train_injector.h index 43aec86a9a..44b4c0a057 100644 --- a/models/spike_train_injector.h +++ b/models/spike_train_injector.h @@ -366,7 +366,7 @@ spike_train_injector::set_status( const DictionaryDatum& d ) } // throws if BadProperty - ptmp.set( d, S_, origin, kernel::manager< SimulationManager >().get_time(), this ); + ptmp.set( d, S_, origin, kernel::manager< SimulationManager >.get_time(), this ); // We now know that ptmp is consistent. We do not write it back // to P_ before we are also sure that the properties to be set diff --git a/models/spin_detector.cpp b/models/spin_detector.cpp index bc24f896c5..48013d8d73 100644 --- a/models/spin_detector.cpp +++ b/models/spin_detector.cpp @@ -89,7 +89,7 @@ nest::spin_detector::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >.get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/models/stdp_dopamine_synapse.cpp b/models/stdp_dopamine_synapse.cpp index 8768643573..920f1dfbdc 100644 --- a/models/stdp_dopamine_synapse.cpp +++ b/models/stdp_dopamine_synapse.cpp @@ -86,8 +86,8 @@ STDPDopaCommonProperties::set_status( const DictionaryDatum& d, ConnectorModel& throw BadProperty( "Property volume_transmitter must be a single element NodeCollection" ); } - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - Node* vt_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *vt_datum )[ 0 ], tid ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + Node* vt_node = kernel::manager< NodeManager >.get_node_or_proxy( ( *vt_datum )[ 0 ], tid ); volume_transmitter* vt = dynamic_cast< volume_transmitter* >( vt_node ); if ( not vt ) { diff --git a/models/stdp_dopamine_synapse.h b/models/stdp_dopamine_synapse.h index 2cc220888c..82399231f4 100644 --- a/models/stdp_dopamine_synapse.h +++ b/models/stdp_dopamine_synapse.h @@ -394,7 +394,7 @@ void stdp_dopamine_synapse< targetidentifierT >::check_synapse_params( const DictionaryDatum& syn_spec ) const { // Setting of parameter c and n not thread safe. - if ( kernel::manager< VPManager >().get_num_threads() > 1 ) + if ( kernel::manager< VPManager >.get_num_threads() > 1 ) { if ( syn_spec->known( names::c ) ) { @@ -457,7 +457,7 @@ stdp_dopamine_synapse< targetidentifierT >::process_dopa_spikes_( const std::vec // propagate weight from t0 to t1 if ( ( dopa_spikes.size() > dopa_spikes_idx_ + 1 ) and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ - > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ) ) + > -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ) ) { // there is at least 1 dopa spike in (t0, t1] // propagate weight up to first dopa spike and update dopamine trace @@ -472,7 +472,7 @@ stdp_dopamine_synapse< targetidentifierT >::process_dopa_spikes_( const std::vec double cd; while ( ( dopa_spikes.size() > dopa_spikes_idx_ + 1 ) and ( t1 - dopa_spikes[ dopa_spikes_idx_ + 1 ].spike_time_ - > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ) ) + > -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ) ) { // propagate weight up to next dopa spike and update dopamine trace // weight and dopamine trace n are at time of last dopa spike td but @@ -554,7 +554,7 @@ stdp_dopamine_synapse< targetidentifierT >::send( Event& e, size_t t, const STDP minus_dt = t_last_update_ - t0; // facilitate only in case of post- after presyn. spike // skip facilitation if pre- and postsyn. spike occur at the same time - if ( t_spike - start->t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) + if ( t_spike - start->t_ > kernel::manager< ConnectionManager >.get_stdp_eps() ) { facilitate_( Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/stdp_facetshw_synapse_hom.h b/models/stdp_facetshw_synapse_hom.h index 5916a51407..6b534b850f 100644 --- a/models/stdp_facetshw_synapse_hom.h +++ b/models/stdp_facetshw_synapse_hom.h @@ -513,7 +513,7 @@ stdp_facetshw_synapse_hom< targetidentifierT >::send( Event& e, // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt_causal < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt_causal < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); a_causal_ += std::exp( minus_dt_causal / cp.tau_plus_ ); diff --git a/models/stdp_nn_pre_centered_synapse.h b/models/stdp_nn_pre_centered_synapse.h index 48478112cd..8001cf5602 100644 --- a/models/stdp_nn_pre_centered_synapse.h +++ b/models/stdp_nn_pre_centered_synapse.h @@ -285,7 +285,7 @@ stdp_nn_pre_centered_synapse< targetidentifierT >::send( Event& e, size_t t, con // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ) ); diff --git a/models/stdp_nn_restr_synapse.h b/models/stdp_nn_restr_synapse.h index 2760da388f..5ba903fa0a 100644 --- a/models/stdp_nn_restr_synapse.h +++ b/models/stdp_nn_restr_synapse.h @@ -280,7 +280,7 @@ stdp_nn_restr_synapse< targetidentifierT >::send( Event& e, size_t t, const Comm // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_nn_symm_synapse.h b/models/stdp_nn_symm_synapse.h index 065567568d..43b10441f7 100644 --- a/models/stdp_nn_symm_synapse.h +++ b/models/stdp_nn_symm_synapse.h @@ -278,7 +278,7 @@ stdp_nn_symm_synapse< targetidentifierT >::send( Event& e, size_t t, const Commo // get_history() should make sure that // start->t_ > t_lastspike_ - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_pl_synapse_hom.h b/models/stdp_pl_synapse_hom.h index d791d719b2..aeace8c93c 100644 --- a/models/stdp_pl_synapse_hom.h +++ b/models/stdp_pl_synapse_hom.h @@ -281,7 +281,7 @@ stdp_pl_synapse_hom< targetidentifierT >::send( Event& e, size_t t, const STDPPL start++; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt * cp.tau_plus_inv_ ), cp ); } diff --git a/models/stdp_synapse.h b/models/stdp_synapse.h index 5e63a1a76d..0efbedd967 100644 --- a/models/stdp_synapse.h +++ b/models/stdp_synapse.h @@ -268,7 +268,7 @@ stdp_synapse< targetidentifierT >::send( Event& e, size_t t, const CommonSynapse ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ) ); } diff --git a/models/stdp_synapse_hom.h b/models/stdp_synapse_hom.h index f37486b4d1..109317eefb 100644 --- a/models/stdp_synapse_hom.h +++ b/models/stdp_synapse_hom.h @@ -316,7 +316,7 @@ stdp_synapse_hom< targetidentifierT >::send( Event& e, size_t t, const STDPHomCo ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / cp.tau_plus_ ), cp ); } diff --git a/models/stdp_triplet_synapse.h b/models/stdp_triplet_synapse.h index 6db113e7a5..b0691dd937 100644 --- a/models/stdp_triplet_synapse.h +++ b/models/stdp_triplet_synapse.h @@ -286,7 +286,7 @@ stdp_triplet_synapse< targetidentifierT >::send( Event& e, size_t t, const Commo ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_plus_ ), ky ); } diff --git a/models/step_current_generator.cpp b/models/step_current_generator.cpp index 66d82cb2cb..91c141c8de 100644 --- a/models/step_current_generator.cpp +++ b/models/step_current_generator.cpp @@ -309,7 +309,7 @@ nest::step_current_generator::update( Time const& origin, const long from, const CurrentEvent ce; ce.set_current( B_.amp_ ); S_.I_ = B_.amp_; - kernel::manager< EventDeliveryManager >().send( *this, ce, offs ); + kernel::manager< EventDeliveryManager >.send( *this, ce, offs ); } B_.logger_.record_data( origin.get_steps() + offs ); } diff --git a/models/step_rate_generator.cpp b/models/step_rate_generator.cpp index 7d6c13360c..d7ed9e65cb 100644 --- a/models/step_rate_generator.cpp +++ b/models/step_rate_generator.cpp @@ -282,7 +282,7 @@ nest::step_rate_generator::update( Time const& origin, const long from, const lo const long t0 = origin.get_steps(); // allocate memory to store rates to be sent by rate events - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); std::vector< double > new_rates( buffer_size, 0.0 ); // Skip any times in the past. Since we must send events proactively, @@ -324,7 +324,7 @@ nest::step_rate_generator::update( Time const& origin, const long from, const lo { DelayedRateConnectionEvent drve; drve.set_coeffarray( new_rates ); - kernel::manager< EventDeliveryManager >().send_secondary( *this, drve ); + kernel::manager< EventDeliveryManager >.send_secondary( *this, drve ); } } diff --git a/models/vogels_sprekeler_synapse.h b/models/vogels_sprekeler_synapse.h index c12fe6cbdd..c107bc72b2 100644 --- a/models/vogels_sprekeler_synapse.h +++ b/models/vogels_sprekeler_synapse.h @@ -240,7 +240,7 @@ vogels_sprekeler_synapse< targetidentifierT >::send( Event& e, size_t t, const C ++start; // get_history() should make sure that // start->t_ > t_lastspike - dendritic_delay, i.e. minus_dt < 0 - assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + assert( minus_dt < -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); weight_ = facilitate_( weight_, Kplus_ * std::exp( minus_dt / tau_ ) ); } diff --git a/models/volume_transmitter.cpp b/models/volume_transmitter.cpp index 2762291530..76dc812740 100644 --- a/models/volume_transmitter.cpp +++ b/models/volume_transmitter.cpp @@ -94,7 +94,7 @@ void nest::volume_transmitter::pre_run_hook() { // +1 as pseudo dopa spike at t_trig is inserted after trigger_update_weight - B_.spikecounter_.reserve( kernel::manager< ConnectionManager >().get_min_delay() * P_.deliver_interval_ + 1 ); + B_.spikecounter_.reserve( kernel::manager< ConnectionManager >.get_min_delay() * P_.deliver_interval_ + 1 ); } void @@ -109,22 +109,22 @@ nest::volume_transmitter::update( const Time&, const long from, const long to ) if ( multiplicity > 0 ) { t_spike = - Time( Time::step( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + lag + 1 ) ).get_ms(); + Time( Time::step( kernel::manager< SimulationManager >.get_slice_origin().get_steps() + lag + 1 ) ).get_ms(); B_.spikecounter_.push_back( spikecounter( t_spike, multiplicity ) ); } } // all spikes stored in spikecounter_ are delivered to the target synapses - if ( ( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + to ) - % ( P_.deliver_interval_ * kernel::manager< ConnectionManager >().get_min_delay() ) + if ( ( kernel::manager< SimulationManager >.get_slice_origin().get_steps() + to ) + % ( P_.deliver_interval_ * kernel::manager< ConnectionManager >.get_min_delay() ) == 0 ) { double t_trig = - Time( Time::step( kernel::manager< SimulationManager >().get_slice_origin().get_steps() + to ) ).get_ms(); + Time( Time::step( kernel::manager< SimulationManager >.get_slice_origin().get_steps() + to ) ).get_ms(); if ( not B_.spikecounter_.empty() ) { - kernel::manager< ConnectionManager >().trigger_update_weight( get_node_id(), B_.spikecounter_, t_trig ); + kernel::manager< ConnectionManager >.trigger_update_weight( get_node_id(), B_.spikecounter_, t_trig ); } // clear spikecounter @@ -140,6 +140,6 @@ void nest::volume_transmitter::handle( SpikeEvent& e ) { B_.neuromodulatory_spikes_.add_value( - e.get_rel_delivery_steps( kernel::manager< SimulationManager >().get_slice_origin() ), + e.get_rel_delivery_steps( kernel::manager< SimulationManager >.get_slice_origin() ), static_cast< double >( e.get_multiplicity() ) ); } diff --git a/models/weight_recorder.cpp b/models/weight_recorder.cpp index 82fe6edfca..90f4148b05 100644 --- a/models/weight_recorder.cpp +++ b/models/weight_recorder.cpp @@ -168,7 +168,7 @@ nest::weight_recorder::get_status( DictionaryDatum& d ) const // siblings on other threads if ( get_thread() == 0 ) { - const std::vector< Node* > siblings = kernel::manager< NodeManager >().get_thread_siblings( get_node_id() ); + const std::vector< Node* > siblings = kernel::manager< NodeManager >.get_thread_siblings( get_node_id() ); std::vector< Node* >::const_iterator s; for ( s = siblings.begin() + 1; s != siblings.end(); ++s ) { diff --git a/nest/neststartup.cpp b/nest/neststartup.cpp index d6aaee614d..59e269154f 100644 --- a/nest/neststartup.cpp +++ b/nest/neststartup.cpp @@ -123,8 +123,8 @@ neststartup( int* argc, char*** argv, SLIInterpreter& engine, std::string module void nestshutdown( int exitcode ) { - nest::kernel::manager< nest::KernelManager >().finalize(); - nest::kernel::manager< nest::MPIManager >().mpi_finalize( exitcode ); + nest::kernel::manager< nest::KernelManager >.finalize(); + nest::kernel::manager< nest::MPIManager >.mpi_finalize( exitcode ); } #if defined( HAVE_LIBNEUROSIM ) && defined( _IS_PYNEST ) @@ -160,7 +160,7 @@ set_communicator( PyObject* pyobj ) throw nest::KernelException( "set_communicator: argument is not a mpi4py communicator" ); } - nest::kernel::manager< nest::MPIManager >().set_communicator( *PyMPIComm_Get( pyobj ) ); + nest::kernel::manager< nest::MPIManager >.set_communicator( *PyMPIComm_Get( pyobj ) ); } #else // ! HAVE_MPI4PY diff --git a/nestkernel/archiving_node.cpp b/nestkernel/archiving_node.cpp index 41f6af71fa..a8b7fa7eb9 100644 --- a/nestkernel/archiving_node.cpp +++ b/nestkernel/archiving_node.cpp @@ -72,7 +72,7 @@ ArchivingNode::register_stdp_connection( double t_first_read, double delay ) // For details see bug #218. MH 08-04-22 for ( std::deque< histentry >::iterator runner = history_.begin(); runner != history_.end() - and ( t_first_read - runner->t_ > -1.0 * kernel::manager< ConnectionManager >().get_stdp_eps() ); + and ( t_first_read - runner->t_ > -1.0 * kernel::manager< ConnectionManager >.get_stdp_eps() ); ++runner ) { ( runner->access_counter_ )++; @@ -98,7 +98,7 @@ nest::ArchivingNode::get_K_value( double t ) int i = history_.size() - 1; while ( i >= 0 ) { - if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) + if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >.get_stdp_eps() ) { trace_ = ( history_[ i ].Kminus_ * std::exp( ( history_[ i ].t_ - t ) * tau_minus_inv_ ) ); return trace_; @@ -132,7 +132,7 @@ nest::ArchivingNode::get_K_values( double t, int i = history_.size() - 1; while ( i >= 0 ) { - if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >().get_stdp_eps() ) + if ( t - history_[ i ].t_ > kernel::manager< ConnectionManager >.get_stdp_eps() ) { K_triplet_value = ( history_[ i ].Kminus_triplet_ * std::exp( ( history_[ i ].t_ - t ) * tau_minus_triplet_inv_ ) ); @@ -163,8 +163,8 @@ nest::ArchivingNode::get_history( double t1, return; } std::deque< histentry >::reverse_iterator runner = history_.rbegin(); - const double t2_lim = t2 + kernel::manager< ConnectionManager >().get_stdp_eps(); - const double t1_lim = t1 + kernel::manager< ConnectionManager >().get_stdp_eps(); + const double t2_lim = t2 + kernel::manager< ConnectionManager >.get_stdp_eps(); + const double t1_lim = t1 + kernel::manager< ConnectionManager >.get_stdp_eps(); while ( runner != history_.rend() and runner->t_ >= t2_lim ) { ++runner; @@ -198,8 +198,8 @@ nest::ArchivingNode::set_spiketime( Time const& t_sp, double offset ) const double next_t_sp = history_[ 1 ].t_; if ( history_.front().access_counter_ >= n_incoming_ and t_sp_ms - next_t_sp > max_delay_ - + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_min_delay() ) - + kernel::manager< ConnectionManager >().get_stdp_eps() ) + + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >.get_min_delay() ) + + kernel::manager< ConnectionManager >.get_stdp_eps() ) { history_.pop_front(); } diff --git a/nestkernel/buffer_resize_log.cpp b/nestkernel/buffer_resize_log.cpp index 47df4c882d..be5ef455dc 100644 --- a/nestkernel/buffer_resize_log.cpp +++ b/nestkernel/buffer_resize_log.cpp @@ -49,7 +49,7 @@ BufferResizeLog::clear() void BufferResizeLog::add_entry( size_t global_max_spikes_sent, size_t new_buffer_size ) { - time_steps_.emplace_back( kernel::manager< SimulationManager >().get_clock().get_steps() ); + time_steps_.emplace_back( kernel::manager< SimulationManager >.get_clock().get_steps() ); global_max_spikes_sent_.emplace_back( global_max_spikes_sent ); new_buffer_size_.emplace_back( new_buffer_size ); } diff --git a/nestkernel/clopath_archiving_node.cpp b/nestkernel/clopath_archiving_node.cpp index 2cc56412e2..80734e0e03 100644 --- a/nestkernel/clopath_archiving_node.cpp +++ b/nestkernel/clopath_archiving_node.cpp @@ -72,7 +72,7 @@ nest::ClopathArchivingNode::init_clopath_buffers() // initialize the ltp-history ltd_hist_current_ = 0; - ltd_hist_len_ = kernel::manager< ConnectionManager >().get_max_delay() + 1; + ltd_hist_len_ = kernel::manager< ConnectionManager >.get_max_delay() + 1; ltd_history_.resize( ltd_hist_len_, histentry_extended( 0.0, 0.0, 0 ) ); } @@ -138,7 +138,7 @@ nest::ClopathArchivingNode::get_LTD_value( double t ) runner = ltd_history_.begin(); while ( runner != ltd_history_.end() ) { - if ( fabs( t - runner->t_ ) < kernel::manager< ConnectionManager >().get_stdp_eps() ) + if ( fabs( t - runner->t_ ) < kernel::manager< ConnectionManager >.get_stdp_eps() ) { return runner->dw_; } diff --git a/nestkernel/common_synapse_properties.cpp b/nestkernel/common_synapse_properties.cpp index 6e96d773a4..fc226d028a 100644 --- a/nestkernel/common_synapse_properties.cpp +++ b/nestkernel/common_synapse_properties.cpp @@ -63,8 +63,8 @@ CommonSynapseProperties::set_status( const DictionaryDatum& d, ConnectorModel& ) throw BadProperty( "Property weight_recorder must be a single element NodeCollection" ); } - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - Node* wr_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *wr_datum )[ 0 ], tid ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + Node* wr_node = kernel::manager< NodeManager >.get_node_or_proxy( ( *wr_datum )[ 0 ], tid ); weight_recorder* wr = dynamic_cast< weight_recorder* >( wr_node ); if ( not wr ) { diff --git a/nestkernel/conn_builder.cpp b/nestkernel/conn_builder.cpp index c847de9616..faf939c8bb 100644 --- a/nestkernel/conn_builder.cpp +++ b/nestkernel/conn_builder.cpp @@ -54,8 +54,12 @@ nest::ConnBuilder::ConnBuilder( const std::string& primary_rule, const std::vector< DictionaryDatum >& syn_specs ) : third_in_builder_( nullptr ) , third_out_builder_( nullptr ) - , primary_builder_( kernel::manager< ConnectionManager >() - .get_conn_builder( primary_rule, sources, targets, third_out_builder_, conn_spec, syn_specs ) ) + , primary_builder_( kernel::manager< ConnectionManager >.get_conn_builder( primary_rule, + sources, + targets, + third_out_builder_, + conn_spec, + syn_specs ) ) { } @@ -71,14 +75,14 @@ nest::ConnBuilder::ConnBuilder( const std::string& primary_rule, third, third_conn_spec, const_cast< std::map< Name, std::vector< DictionaryDatum > >& >( syn_specs )[ names::third_in ] ) ) - , third_out_builder_( kernel::manager< ConnectionManager >().get_third_conn_builder( third_rule, + , third_out_builder_( kernel::manager< ConnectionManager >.get_third_conn_builder( third_rule, third, targets, third_in_builder_, third_conn_spec, // const_cast here seems required, clang complains otherwise; try to clean up when Datums disappear const_cast< std::map< Name, std::vector< DictionaryDatum > >& >( syn_specs )[ names::third_out ] ) ) - , primary_builder_( kernel::manager< ConnectionManager >().get_conn_builder( primary_rule, + , primary_builder_( kernel::manager< ConnectionManager >.get_conn_builder( primary_rule, sources, targets, third_out_builder_, @@ -127,7 +131,7 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, , allow_multapses_( true ) , make_symmetric_( false ) , creates_symmetric_connections_( false ) - , exceptions_raised_( kernel::manager< VPManager >().get_num_threads() ) + , exceptions_raised_( kernel::manager< VPManager >.get_num_threads() ) , use_structural_plasticity_( false ) , parameters_requiring_skipping_() , param_dicts_() @@ -156,7 +160,7 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, delays_.resize( syn_specs.size() ); synapse_params_.resize( syn_specs.size() ); synapse_model_id_.resize( syn_specs.size() ); - synapse_model_id_[ 0 ] = kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ); + synapse_model_id_[ 0 ] = kernel::manager< ModelManager >.get_synapse_model_id( "static_synapse" ); param_dicts_.resize( syn_specs.size() ); // loop through vector of synapse dictionaries, and set synapse parameters @@ -168,7 +172,7 @@ nest::BipartiteConnBuilder::BipartiteConnBuilder( NodeCollectionPTR sources, set_default_weight_or_delay_( syn_params, synapse_indx ); DictionaryDatum syn_defaults = - kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id_[ synapse_indx ] ); + kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id_[ synapse_indx ] ); #ifdef HAVE_MUSIC // We allow music_channel as alias for receptor_type during connection setup @@ -232,9 +236,9 @@ nest::BipartiteConnBuilder::change_connected_synaptic_elements( size_t snode_id, int local = true; // check whether the source is on this mpi machine - if ( kernel::manager< NodeManager >().is_local_node_id( snode_id ) ) + if ( kernel::manager< NodeManager >.is_local_node_id( snode_id ) ) { - Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, tid ); + Node* const source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id, tid ); const size_t source_thread = source->get_thread(); // check whether the source is on our thread @@ -246,13 +250,13 @@ nest::BipartiteConnBuilder::change_connected_synaptic_elements( size_t snode_id, } // check whether the target is on this mpi machine - if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { local = false; } else { - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is on our thread if ( tid != target_thread ) @@ -277,7 +281,7 @@ nest::BipartiteConnBuilder::connect() for ( auto synapse_model_id : synapse_model_id_ ) { const ConnectorModel& synapse_model = - kernel::manager< ModelManager >().get_connection_model( synapse_model_id, /* thread */ 0 ); + kernel::manager< ModelManager >.get_connection_model( synapse_model_id, /* thread */ 0 ); const bool requires_symmetric = synapse_model.has_property( ConnectionModelProperties::REQUIRES_SYMMETRIC ); if ( requires_symmetric and not( is_symmetric() or make_symmetric_ ) ) @@ -326,7 +330,7 @@ nest::BipartiteConnBuilder::connect() } } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -348,7 +352,7 @@ nest::BipartiteConnBuilder::disconnect() } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -365,7 +369,7 @@ nest::BipartiteConnBuilder::update_param_dict_( size_t snode_id, size_t synapse_indx ) { assert( - kernel::manager< VPManager >().get_num_threads() == static_cast< size_t >( param_dicts_[ synapse_indx ].size() ) ); + kernel::manager< VPManager >.get_num_threads() == static_cast< size_t >( param_dicts_[ synapse_indx ].size() ) ); for ( auto synapse_parameter : synapse_params_[ synapse_indx ] ) { @@ -400,7 +404,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size if ( default_weight_and_delay_[ synapse_indx ] ) { - kernel::manager< ConnectionManager >().connect( snode_id, + kernel::manager< ConnectionManager >.connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -408,7 +412,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size } else if ( default_weight_[ synapse_indx ] ) { - kernel::manager< ConnectionManager >().connect( snode_id, + kernel::manager< ConnectionManager >.connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -417,7 +421,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size } else if ( default_delay_[ synapse_indx ] ) { - kernel::manager< ConnectionManager >().connect( snode_id, + kernel::manager< ConnectionManager >.connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -429,7 +433,7 @@ nest::BipartiteConnBuilder::single_connect_( size_t snode_id, Node& target, size { const double delay = delays_[ synapse_indx ]->value_double( target_thread, rng, snode_id, &target ); const double weight = weights_[ synapse_indx ]->value_double( target_thread, rng, snode_id, &target ); - kernel::manager< ConnectionManager >().connect( snode_id, + kernel::manager< ConnectionManager >.connect( snode_id, &target, target_thread, synapse_model_id_[ synapse_indx ], @@ -495,7 +499,7 @@ nest::BipartiteConnBuilder::all_parameters_scalar_() const bool nest::BipartiteConnBuilder::loop_over_targets_() const { - return targets_->size() < kernel::manager< NodeManager >().size() or not targets_->is_range() + return targets_->size() < kernel::manager< NodeManager >.size() or not targets_->is_range() or parameters_requiring_skipping_.size() > 0; } @@ -509,12 +513,12 @@ nest::BipartiteConnBuilder::set_synapse_model_( DictionaryDatum syn_params, size const std::string syn_name = ( *syn_params )[ names::synapse_model ]; // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >.get_synapse_model_id( syn_name ); synapse_model_id_[ synapse_indx ] = synapse_model_id; // We need to make sure that Connect can process all synapse parameters specified. const ConnectorModel& synapse_model = - kernel::manager< ModelManager >().get_connection_model( synapse_model_id, /* thread */ 0 ); + kernel::manager< ModelManager >.get_connection_model( synapse_model_id, /* thread */ 0 ); synapse_model.check_synapse_params( syn_params ); } @@ -522,7 +526,7 @@ void nest::BipartiteConnBuilder::set_default_weight_or_delay_( DictionaryDatum syn_params, size_t synapse_indx ) { DictionaryDatum syn_defaults = - kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id_[ synapse_indx ] ); + kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id_[ synapse_indx ] ); // All synapse models have the possibility to set the delay (see SynIdDelay), but some have // homogeneous weights, hence it should be possible to set the delay without the weight. @@ -538,19 +542,19 @@ nest::BipartiteConnBuilder::set_default_weight_or_delay_( DictionaryDatum syn_pa if ( not default_weight_and_delay_[ synapse_indx ] ) { weights_[ synapse_indx ] = syn_params->known( names::weight ) - ? ConnParameter::create( ( *syn_params )[ names::weight ], kernel::manager< VPManager >().get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::weight ], kernel::manager< VPManager >().get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::weight ], kernel::manager< VPManager >.get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::weight ], kernel::manager< VPManager >.get_num_threads() ); register_parameters_requiring_skipping_( *weights_[ synapse_indx ] ); delays_[ synapse_indx ] = syn_params->known( names::delay ) - ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >.get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >.get_num_threads() ); } else if ( default_weight_[ synapse_indx ] ) { delays_[ synapse_indx ] = syn_params->known( names::delay ) - ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ) - : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >().get_num_threads() ); + ? ConnParameter::create( ( *syn_params )[ names::delay ], kernel::manager< VPManager >.get_num_threads() ) + : ConnParameter::create( ( *syn_defaults )[ names::delay ], kernel::manager< VPManager >.get_num_threads() ); } register_parameters_requiring_skipping_( *delays_[ synapse_indx ] ); } @@ -571,14 +575,14 @@ nest::BipartiteConnBuilder::set_synapse_params( DictionaryDatum syn_defaults, if ( syn_params->known( param_name ) ) { synapse_params_[ synapse_indx ][ param_name ] = - ConnParameter::create( ( *syn_params )[ param_name ], kernel::manager< VPManager >().get_num_threads() ); + ConnParameter::create( ( *syn_params )[ param_name ], kernel::manager< VPManager >.get_num_threads() ); register_parameters_requiring_skipping_( *synapse_params_[ synapse_indx ][ param_name ] ); } } // Now create dictionary with dummy values that we will use to pass settings to the synapses created. We // create it here once to avoid re-creating the object over and over again. - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { param_dicts_[ synapse_indx ].push_back( new Dictionary() ); @@ -658,14 +662,14 @@ nest::ThirdInBuilder::ThirdInBuilder( NodeCollectionPTR sources, const DictionaryDatum& third_conn_spec, const std::vector< DictionaryDatum >& syn_specs ) : BipartiteConnBuilder( sources, third, nullptr, third_conn_spec, syn_specs ) - , source_third_gids_( kernel::manager< VPManager >().get_num_threads(), nullptr ) - , source_third_counts_( kernel::manager< VPManager >().get_num_threads(), nullptr ) + , source_third_gids_( kernel::manager< VPManager >.get_num_threads(), nullptr ) + , source_third_counts_( kernel::manager< VPManager >.get_num_threads(), nullptr ) { #pragma omp parallel { - const size_t thrd = kernel::manager< VPManager >().get_thread_id(); + const size_t thrd = kernel::manager< VPManager >.get_thread_id(); source_third_gids_[ thrd ] = new BlockVector< SourceThirdInfo_ >(); - source_third_counts_[ thrd ] = new std::vector< size_t >( kernel::manager< MPIManager >().get_num_processes(), 0 ); + source_third_counts_[ thrd ] = new std::vector< size_t >( kernel::manager< MPIManager >.get_num_processes(), 0 ); } } @@ -673,7 +677,7 @@ nest::ThirdInBuilder::~ThirdInBuilder() { #pragma omp parallel { - const size_t thrd = kernel::manager< VPManager >().get_thread_id(); + const size_t thrd = kernel::manager< VPManager >.get_thread_id(); delete source_third_gids_[ thrd ]; delete source_third_counts_[ thrd ]; } @@ -682,9 +686,9 @@ nest::ThirdInBuilder::~ThirdInBuilder() void nest::ThirdInBuilder::register_connection( size_t primary_source_id, size_t third_node_id ) { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - const auto third_node_rank = kernel::manager< MPIManager >().get_process_id_of_vp( - kernel::manager< VPManager >().node_id_to_vp( third_node_id ) ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + const auto third_node_rank = + kernel::manager< MPIManager >.get_process_id_of_vp( kernel::manager< VPManager >.node_id_to_vp( third_node_id ) ); source_third_gids_[ tid ]->push_back( { primary_source_id, third_node_id, third_node_rank } ); ++( ( *source_third_counts_[ tid ] )[ third_node_rank ] ); } @@ -692,10 +696,10 @@ nest::ThirdInBuilder::register_connection( size_t primary_source_id, size_t thir void nest::ThirdInBuilder::connect_() { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); // count up how many source-third pairs we need to send to each rank - const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); + const size_t num_ranks = kernel::manager< MPIManager >.get_num_processes(); std::vector< size_t > source_third_per_rank( num_ranks, 0 ); for ( auto stcp : source_third_counts_ ) { @@ -708,9 +712,9 @@ nest::ThirdInBuilder::connect_() // now find global maximum; for simplicity, we will use this to configure buffers std::vector< long > max_stc( num_ranks ); // MPIManager does not support size_t - max_stc[ kernel::manager< MPIManager >().get_rank() ] = + max_stc[ kernel::manager< MPIManager >.get_rank() ] = *std::max_element( source_third_per_rank.begin(), source_third_per_rank.end() ); - kernel::manager< MPIManager >().communicate( max_stc ); + kernel::manager< MPIManager >.communicate( max_stc ); const size_t global_max_stc = *std::max_element( max_stc.begin(), max_stc.end() ); if ( global_max_stc == 0 ) @@ -749,7 +753,7 @@ nest::ThirdInBuilder::connect_() // force to master thread for compatibility with MPI standard #pragma omp master { - kernel::manager< MPIManager >().communicate_Alltoall( send_stg, recv_stg, send_recv_count ); + kernel::manager< MPIManager >.communicate_Alltoall( send_stg, recv_stg, send_recv_count ); } // Now recv_stg contains all source-third pairs where third is on current rank @@ -757,8 +761,8 @@ nest::ThirdInBuilder::connect_() #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - RngPtr rng = kernel::manager< RandomManager >().get_vp_specific_rng( tid ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + RngPtr rng = kernel::manager< RandomManager >.get_vp_specific_rng( tid ); for ( size_t idx = 0; idx < recv_stg.size(); idx += 2 ) { @@ -772,11 +776,11 @@ nest::ThirdInBuilder::connect_() continue; } - if ( kernel::manager< VPManager >().is_node_id_vp_local( third_gid ) ) + if ( kernel::manager< VPManager >.is_node_id_vp_local( third_gid ) ) { const auto source_gid = recv_stg[ idx + 1 ]; assert( source_gid > 0 ); - single_connect_( source_gid, *kernel::manager< NodeManager >().get_node_or_proxy( third_gid, tid ), tid, rng ); + single_connect_( source_gid, *kernel::manager< NodeManager >.get_node_or_proxy( third_gid, tid ), tid, rng ); } } } @@ -802,7 +806,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo , random_pool_( true ) , pool_size_( third->size() ) , targets_per_third_( targets->size() / third->size() ) - , pools_( kernel::manager< VPManager >().get_num_threads(), nullptr ) + , pools_( kernel::manager< VPManager >.get_num_threads(), nullptr ) { updateValue< double >( conn_spec, names::p, p_ ); updateValue< long >( conn_spec, names::pool_size, pool_size_ ); @@ -845,7 +849,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo #pragma omp parallel { - const size_t thrd = kernel::manager< VPManager >().get_thread_id(); + const size_t thrd = kernel::manager< VPManager >.get_thread_id(); pools_[ thrd ] = new TgtPoolMap_(); } @@ -860,7 +864,7 @@ nest::ThirdBernoulliWithPoolBuilder::ThirdBernoulliWithPoolBuilder( const NodeCo size_t idx = 0; for ( auto tgt_it = targets_->begin(); tgt_it != targets_->end(); ++tgt_it ) { - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id ); if ( not tgt->is_proxy() ) { tgt->set_tmp_nc_index( idx++ ); // must be postfix @@ -873,7 +877,7 @@ nest::ThirdBernoulliWithPoolBuilder::~ThirdBernoulliWithPoolBuilder() { #pragma omp parallel { - const size_t thrd = kernel::manager< VPManager >().get_thread_id(); + const size_t thrd = kernel::manager< VPManager >.get_thread_id(); delete pools_[ thrd ]; if ( not random_pool_ ) @@ -884,7 +888,7 @@ nest::ThirdBernoulliWithPoolBuilder::~ThirdBernoulliWithPoolBuilder() // Here we can work in parallel since we just reset to invalid_index for ( auto tgt_it = targets_->thread_local_begin(); tgt_it != targets_->end(); ++tgt_it ) { - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thrd ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id, thrd ); assert( not tgt->is_proxy() ); tgt->set_tmp_nc_index( invalid_index ); } @@ -896,7 +900,7 @@ void nest::ThirdBernoulliWithPoolBuilder::third_connect( size_t primary_source_id, Node& primary_target ) { // We assume target is on this thread - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); RngPtr rng = get_vp_specific_rng( tid ); // conditionally connect third factor @@ -969,7 +973,7 @@ nest::OneToOneBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -994,7 +998,7 @@ nest::OneToOneBuilder::connect_() continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1007,7 +1011,7 @@ nest::OneToOneBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1048,7 +1052,7 @@ nest::OneToOneBuilder::disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1062,13 +1066,13 @@ nest::OneToOneBuilder::disconnect_() const size_t snode_id = ( *source_it ).node_id; // check whether the target is on this mpi machine - if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is a proxy @@ -1096,7 +1100,7 @@ nest::OneToOneBuilder::sp_connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1121,7 +1125,7 @@ nest::OneToOneBuilder::sp_connect_() skip_conn_parameter_( tid ); continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_connect_( snode_id, *target, target_thread, rng ); @@ -1143,7 +1147,7 @@ nest::OneToOneBuilder::sp_disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1161,7 +1165,7 @@ nest::OneToOneBuilder::sp_disconnect_() continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_disconnect_( snode_id, *target, target_thread ); @@ -1183,7 +1187,7 @@ nest::AllToAllBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1195,7 +1199,7 @@ nest::AllToAllBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { skip_conn_parameter_( tid, sources_->size() ); @@ -1207,7 +1211,7 @@ nest::AllToAllBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1271,7 +1275,7 @@ nest::AllToAllBuilder::sp_connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { RngPtr rng = get_vp_specific_rng( tid ); @@ -1296,7 +1300,7 @@ nest::AllToAllBuilder::sp_connect_() skip_conn_parameter_( tid, sources_->size() ); continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_connect_( snode_id, *target, target_thread, rng ); } @@ -1318,7 +1322,7 @@ nest::AllToAllBuilder::disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1328,13 +1332,13 @@ nest::AllToAllBuilder::disconnect_() const size_t tnode_id = ( *target_it ).node_id; // check whether the target is on this mpi machine - if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); // check whether the target is a proxy @@ -1367,7 +1371,7 @@ nest::AllToAllBuilder::sp_disconnect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1386,7 +1390,7 @@ nest::AllToAllBuilder::sp_disconnect_() // Disconnecting: no parameter skipping required continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); single_disconnect_( snode_id, *target, target_thread ); } @@ -1464,7 +1468,7 @@ nest::FixedInDegreeBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1476,7 +1480,7 @@ nest::FixedInDegreeBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const long indegree_value = std::round( indegree_->value( rng, target ) ); if ( target->is_proxy() ) @@ -1491,7 +1495,7 @@ nest::FixedInDegreeBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -1639,7 +1643,7 @@ nest::FixedOutDegreeBuilder::connect_() std::vector< size_t > tgt_ids_; const long n_rnd = targets_->size(); - Node* source_node = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); + Node* source_node = kernel::manager< NodeManager >.get_node_or_proxy( snode_id ); const long outdegree_value = std::round( outdegree_->value( grng, source_node ) ); for ( long j = 0; j < outdegree_value; ++j ) { @@ -1667,7 +1671,7 @@ nest::FixedOutDegreeBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1676,7 +1680,7 @@ nest::FixedOutDegreeBuilder::connect_() std::vector< size_t >::const_iterator tnode_id_it = tgt_ids_.begin(); for ( ; tnode_id_it != tgt_ids_.end(); ++tnode_id_it ) { - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( *tnode_id_it, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( *tnode_id_it, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1736,7 +1740,7 @@ nest::FixedTotalNumberBuilder::FixedTotalNumberBuilder( NodeCollectionPTR source void nest::FixedTotalNumberBuilder::connect_() { - const int M = kernel::manager< VPManager >().get_num_virtual_processes(); + const int M = kernel::manager< VPManager >.get_num_virtual_processes(); const long size_sources = sources_->size(); const long size_targets = targets_->size(); @@ -1746,12 +1750,12 @@ nest::FixedTotalNumberBuilder::connect_() // function std::vector< size_t > number_of_targets_on_vp( M, 0 ); std::vector< size_t > local_targets; - local_targets.reserve( size_targets / kernel::manager< MPIManager >().get_num_processes() ); + local_targets.reserve( size_targets / kernel::manager< MPIManager >.get_num_processes() ); for ( size_t t = 0; t < targets_->size(); t++ ) { - int vp = kernel::manager< VPManager >().node_id_to_vp( ( *targets_ )[ t ] ); + int vp = kernel::manager< VPManager >.node_id_to_vp( ( *targets_ )[ t ] ); ++number_of_targets_on_vp[ vp ]; - if ( kernel::manager< VPManager >().is_local_vp( vp ) ) + if ( kernel::manager< VPManager >.is_local_vp( vp ) ) { local_targets.push_back( ( *targets_ )[ t ] ); } @@ -1807,13 +1811,13 @@ nest::FixedTotalNumberBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { - const size_t vp_id = kernel::manager< VPManager >().thread_to_vp( tid ); + const size_t vp_id = kernel::manager< VPManager >.thread_to_vp( tid ); - if ( kernel::manager< VPManager >().is_local_vp( vp_id ) ) + if ( kernel::manager< VPManager >.is_local_vp( vp_id ) ) { RngPtr rng = get_vp_specific_rng( tid ); @@ -1824,7 +1828,7 @@ nest::FixedTotalNumberBuilder::connect_() std::vector< size_t >::const_iterator tnode_id_it = local_targets.begin(); for ( ; tnode_id_it != local_targets.end(); ++tnode_id_it ) { - if ( kernel::manager< VPManager >().node_id_to_vp( *tnode_id_it ) == vp_id ) + if ( kernel::manager< VPManager >.node_id_to_vp( *tnode_id_it ) == vp_id ) { thread_local_targets.push_back( *tnode_id_it ); } @@ -1847,7 +1851,7 @@ nest::FixedTotalNumberBuilder::connect_() // targets_on_vp vector const long tnode_id = thread_local_targets[ t_index ]; - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); if ( allow_autapses_ or snode_id != tnode_id ) @@ -1900,7 +1904,7 @@ nest::BernoulliBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -1912,7 +1916,7 @@ nest::BernoulliBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip array parameters handled in other virtual processes @@ -1926,7 +1930,7 @@ nest::BernoulliBuilder::connect_() else { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -2018,7 +2022,7 @@ nest::PoissonBuilder::connect_() #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -2030,7 +2034,7 @@ nest::PoissonBuilder::connect_() for ( ; target_it < targets_->end(); ++target_it ) { const size_t tnode_id = ( *target_it ).node_id; - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); if ( target->is_proxy() ) { // skip parameters handled in other virtual processes @@ -2043,7 +2047,7 @@ nest::PoissonBuilder::connect_() } else { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator n; for ( n = local_nodes.begin(); n != local_nodes.end(); ++n ) { @@ -2140,7 +2144,7 @@ nest::SymmetricBernoulliBuilder::connect_() { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); // Use RNG generating same number sequence on all threads RngPtr synced_rng = get_vp_synced_rng( tid ); @@ -2168,7 +2172,7 @@ nest::SymmetricBernoulliBuilder::connect_() } assert( indegree < sources_->size() ); - target = kernel::manager< NodeManager >().get_node_or_proxy( ( *tnode_id ).node_id, tid ); + target = kernel::manager< NodeManager >.get_node_or_proxy( ( *tnode_id ).node_id, tid ); target_thread = tid; // check whether the target is on this thread @@ -2194,7 +2198,7 @@ nest::SymmetricBernoulliBuilder::connect_() } previous_snode_ids.insert( snode_id ); - source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, tid ); + source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id, tid ); source_thread = tid; if ( source->is_proxy() ) @@ -2249,7 +2253,7 @@ nest::SPBuilder::update_delay( long& d ) const { if ( get_default_delay() ) { - DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( get_synapse_model() ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >.get_connector_defaults( get_synapse_model() ); const double delay = getValue< double >( syn_defaults, "delay" ); d = Time( Time::ms( delay ) ).get_steps(); } @@ -2261,7 +2265,7 @@ nest::SPBuilder::sp_connect( const std::vector< size_t >& sources, const std::ve connect_( sources, targets ); // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -2298,7 +2302,7 @@ nest::SPBuilder::connect_( const std::vector< size_t >& sources, const std::vect #pragma omp parallel { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -2320,7 +2324,7 @@ nest::SPBuilder::connect_( const std::vector< size_t >& sources, const std::vect skip_conn_parameter_( tid ); continue; } - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( *tnode_id_it, tid ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( *tnode_id_it, tid ); single_connect_( *snode_id_it, *target, tid, rng ); } diff --git a/nestkernel/conn_builder.h b/nestkernel/conn_builder.h index a4884d6675..be3874ed28 100644 --- a/nestkernel/conn_builder.h +++ b/nestkernel/conn_builder.h @@ -872,7 +872,7 @@ BipartiteConnBuilder::single_disconnect_( size_t snode_id, Node& target, size_t { throw KernelException( "Can only disconnect when single element syn_spec has been used." ); } - kernel::manager< SPManager >().disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); + kernel::manager< SPManager >.disconnect( snode_id, &target, target_thread, synapse_model_id_[ 0 ] ); } } // namespace nest diff --git a/nestkernel/conn_builder_conngen.cpp b/nestkernel/conn_builder_conngen.cpp index fdb94a81c8..f33948ec04 100644 --- a/nestkernel/conn_builder_conngen.cpp +++ b/nestkernel/conn_builder_conngen.cpp @@ -91,7 +91,7 @@ ConnectionGeneratorBuilder::connect_() { // No need to check for locality of the target, as the mask // created by cg_set_masks() only contains local nodes. - Node* const target_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *targets_ )[ target ] ); + Node* const target_node = kernel::manager< NodeManager >.get_node_or_proxy( ( *targets_ )[ target ] ); const size_t target_thread = target_node->get_thread(); single_connect_( ( *sources_ )[ source ], *target_node, target_thread, rng ); } @@ -120,13 +120,13 @@ ConnectionGeneratorBuilder::connect_() { // No need to check for locality of the target node, as the mask // created by cg_set_masks() only contains local nodes. - Node* target_node = kernel::manager< NodeManager >().get_node_or_proxy( ( *targets_ )[ target ] ); + Node* target_node = kernel::manager< NodeManager >.get_node_or_proxy( ( *targets_ )[ target ] ); const size_t target_thread = target_node->get_thread(); update_param_dict_( ( *sources_ )[ source ], *target_node, target_thread, rng, 0 ); // Use the low-level connect() here, as we need to pass a custom weight and delay - kernel::manager< ConnectionManager >().connect( ( *sources_ )[ source ], + kernel::manager< ConnectionManager >.connect( ( *sources_ )[ source ], target_node, target_thread, synapse_model_id_[ 0 ], @@ -145,7 +145,7 @@ ConnectionGeneratorBuilder::connect_() void ConnectionGeneratorBuilder::cg_set_masks() { - const size_t np = kernel::manager< MPIManager >().get_num_processes(); + const size_t np = kernel::manager< MPIManager >.get_num_processes(); std::vector< ConnectionGenerator::Mask > masks( np, ConnectionGenerator::Mask( 1, np ) ); // The index of the left border of the currently looked at range @@ -205,7 +205,7 @@ ConnectionGeneratorBuilder::cg_set_masks() cg_idx_left += num_elements; } - cg_->setMask( masks, kernel::manager< MPIManager >().get_rank() ); + cg_->setMask( masks, kernel::manager< MPIManager >.get_rank() ); } diff --git a/nestkernel/connection.h b/nestkernel/connection.h index 36b8678da3..58e3c1dd2b 100644 --- a/nestkernel/connection.h +++ b/nestkernel/connection.h @@ -362,7 +362,7 @@ Connection< targetidentifierT >::set_status( const DictionaryDatum& d, Connector double delay; if ( updateValue< double >( d, names::delay, delay ) ) { - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( delay ); syn_id_delay_.set_delay_ms( delay ); } // no call to target_.set_status() because target and rport cannot be changed diff --git a/nestkernel/connection_creator.cpp b/nestkernel/connection_creator.cpp index 157c22ec10..3ddaed68d4 100644 --- a/nestkernel/connection_creator.cpp +++ b/nestkernel/connection_creator.cpp @@ -95,7 +95,7 @@ ConnectionCreator::ConnectionCreator( DictionaryDatum dict ) { // If not, we have single synapses. param_dicts_.resize( 1 ); - param_dicts_[ 0 ].resize( kernel::manager< VPManager >().get_num_threads() ); + param_dicts_[ 0 ].resize( kernel::manager< VPManager >.get_num_threads() ); extract_params_( dict, param_dicts_[ 0 ] ); } @@ -104,9 +104,9 @@ ConnectionCreator::ConnectionCreator( DictionaryDatum dict ) // Set default synapse_model, weight and delay if not given explicitly if ( synapse_model_.empty() ) { - synapse_model_ = { kernel::manager< ModelManager >().get_synapse_model_id( "static_synapse" ) }; + synapse_model_ = { kernel::manager< ModelManager >.get_synapse_model_id( "static_synapse" ) }; } - DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_[ 0 ] ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >.get_connector_defaults( synapse_model_[ 0 ] ); if ( weight_.empty() ) { weight_ = { NestModule::create_parameter( ( *syn_defaults )[ names::weight ] ) }; @@ -167,10 +167,10 @@ ConnectionCreator::extract_params_( const DictionaryDatum& dict_datum, std::vect std::string syn_name = ( *dict_datum )[ names::synapse_model ]; // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >.get_synapse_model_id( syn_name ); synapse_model_.push_back( synapse_model_id ); - DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id ); if ( dict_datum->known( names::weight ) ) { weight_.push_back( NestModule::create_parameter( ( *dict_datum )[ names::weight ] ) ); @@ -209,10 +209,10 @@ ConnectionCreator::extract_params_( const DictionaryDatum& dict_datum, std::vect copy_long_if_known( names::synapse_label ); copy_long_if_known( names::receptor_type ); - params.resize( kernel::manager< VPManager >().get_num_threads() ); + params.resize( kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - params.at( kernel::manager< VPManager >().get_thread_id() ) = syn_dict; + params.at( kernel::manager< VPManager >.get_thread_id() ) = syn_dict; } } diff --git a/nestkernel/connection_creator.h b/nestkernel/connection_creator.h index 0ff9838216..30da11917b 100644 --- a/nestkernel/connection_creator.h +++ b/nestkernel/connection_creator.h @@ -259,7 +259,7 @@ ConnectionCreator::connect_to_target_( Iterator from, { for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel::manager< ConnectionManager >().connect( iter->second, + kernel::manager< ConnectionManager >.connect( iter->second, tgt_ptr, tgt_thread, synapse_model_[ indx ], @@ -304,7 +304,7 @@ ConnectionCreator::connect_to_target_poisson_( Iterator from, { for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel::manager< ConnectionManager >().connect( iter->second, + kernel::manager< ConnectionManager >.connect( iter->second, tgt_ptr, tgt_thread, synapse_model_[ indx ], @@ -406,11 +406,11 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, } std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - const int thread_id = kernel::manager< VPManager >().get_thread_id(); + const int thread_id = kernel::manager< VPManager >.get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->begin(); @@ -418,7 +418,7 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); if ( not tgt->is_proxy() ) { @@ -445,7 +445,7 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >.get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -484,10 +484,10 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, } std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with pairwise_bernoulli to devices is not possible." ); @@ -495,7 +495,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, #pragma omp parallel { - const int thread_id = kernel::manager< VPManager >().get_thread_id(); + const int thread_id = kernel::manager< VPManager >.get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->thread_local_begin(); @@ -503,7 +503,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); assert( not tgt->is_proxy() ); @@ -532,7 +532,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >.get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -567,11 +567,11 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, } std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - const int thread_id = kernel::manager< VPManager >().get_thread_id(); + const int thread_id = kernel::manager< VPManager >.get_thread_id(); try { NodeCollection::const_iterator target_begin = target_nc->begin(); @@ -579,7 +579,7 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id, thread_id ); if ( not tgt->is_proxy() ) { @@ -606,7 +606,7 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, } } // omp parallel // check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >.get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -631,7 +631,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, // 3. Draw source nodes and make connections // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." ); @@ -645,7 +645,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, // the network untouched if any target does not have proxies for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - assert( not kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + assert( not kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); } if ( mask_.get() ) @@ -658,7 +658,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_id ); size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); @@ -733,7 +733,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel::manager< ConnectionManager >().connect( + kernel::manager< ConnectionManager >.connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -772,7 +772,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel::manager< ConnectionManager >().connect( + kernel::manager< ConnectionManager >.connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -792,7 +792,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { size_t target_id = ( *tgt_it ).node_id; - Node* const tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); + Node* const tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_id ); size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); @@ -859,7 +859,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel::manager< ConnectionManager >().connect( + kernel::manager< ConnectionManager >.connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -896,7 +896,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, { const double w = weight_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); const double d = delay_[ indx ]->value( rng, source_pos_vector, target_pos_vector, source, tgt ); - kernel::manager< ConnectionManager >().connect( + kernel::manager< ConnectionManager >.connect( source_id, tgt, target_thread, synapse_model_[ indx ], param_dicts_[ indx ][ target_thread ], d, w ); } @@ -921,7 +921,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, // the network untouched if any target does not have proxies // We only need to check the first in the NodeCollection - Node* const first_in_tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_nc->operator[]( 0 ) ); + Node* const first_in_tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_nc->operator[]( 0 ) ); if ( not first_in_tgt->has_proxies() ) { throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." ); @@ -932,7 +932,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) { - assert( not kernel::manager< NodeManager >().get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); + assert( not kernel::manager< NodeManager >.get_node_or_proxy( ( *tgt_it ).node_id )->is_proxy() ); } // Fixed_outdegree connections (fixed fan out) @@ -956,7 +956,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, { const Position< D > source_pos = source_pos_node_id_pair.first; const size_t source_id = source_pos_node_id_pair.second; - const auto src = kernel::manager< NodeManager >().get_node_or_proxy( source_id ); + const auto src = kernel::manager< NodeManager >.get_node_or_proxy( source_id ); const std::vector< double > source_pos_vector = source_pos.get_vector(); // We create a target pos vector here that can be updated with the @@ -977,7 +977,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, { // TODO: Why is probability calculated in source layer, but weight and delay in target layer? target_pos_node_id_pair.first.get_vector( target_pos_vector ); - const auto tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_pos_node_id_pair.second ); + const auto tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_pos_node_id_pair.second ); probabilities.push_back( kernel_->value( grng, source_pos_vector, target_pos_vector, source, tgt ) ); } } @@ -1027,7 +1027,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, std::vector< double > rng_delay_vec; for ( size_t indx = 0; indx < weight_.size(); ++indx ) { - const auto tgt = kernel::manager< NodeManager >().get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); + const auto tgt = kernel::manager< NodeManager >.get_node_or_proxy( target_pos_node_id_pairs[ indx ].second ); rng_weight_vec.push_back( weight_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); rng_delay_vec.push_back( delay_[ indx ]->value( grng, source_pos_vector, target_pos_vector, target, tgt ) ); } @@ -1036,17 +1036,17 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, // required for it. Each VP thus counts the connection as created, but only the VP hosting the // target neuron actually creates the connection. --number_of_connections; - if ( not kernel::manager< NodeManager >().is_local_node_id( target_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( target_id ) ) { continue; } - Node* target_ptr = kernel::manager< NodeManager >().get_node_or_proxy( target_id ); + Node* target_ptr = kernel::manager< NodeManager >.get_node_or_proxy( target_id ); const size_t target_thread = target_ptr->get_thread(); for ( size_t indx = 0; indx < synapse_model_.size(); ++indx ) { - kernel::manager< ConnectionManager >().connect( source_id, + kernel::manager< ConnectionManager >.connect( source_id, target_ptr, target_thread, synapse_model_[ indx ], diff --git a/nestkernel/connection_manager.cpp b/nestkernel/connection_manager.cpp index 4e2051eea2..5157bd6983 100644 --- a/nestkernel/connection_manager.cpp +++ b/nestkernel/connection_manager.cpp @@ -128,7 +128,7 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) sw_construction_connect.reset(); } - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); connections_.resize( num_threads ); secondary_recv_buffer_pos_.resize( num_threads ); compressed_spike_data_.resize( 0 ); @@ -140,11 +140,11 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) // We need to obtain this while in serial context to avoid problems when // increasing the number of threads. - const size_t num_conn_models = kernel::manager< ModelManager >().get_num_connection_models(); + const size_t num_conn_models = kernel::manager< ModelManager >.get_num_connection_models(); #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); connections_.at( tid ) = std::vector< ConnectorBase* >( num_conn_models ); secondary_recv_buffer_pos_.at( tid ) = std::vector< std::vector< size_t > >(); } // of omp parallel @@ -153,11 +153,10 @@ ConnectionManager::initialize( const bool adjust_number_of_threads_or_rng_only ) target_table_.initialize(); target_table_devices_.initialize(); - std::vector< DelayChecker > tmp( kernel::manager< VPManager >().get_num_threads() ); + std::vector< DelayChecker > tmp( kernel::manager< VPManager >.get_num_threads() ); delay_checkers_.swap( tmp ); - std::vector< std::vector< size_t > > tmp2( - kernel::manager< VPManager >().get_num_threads(), std::vector< size_t >() ); + std::vector< std::vector< size_t > > tmp2( kernel::manager< VPManager >.get_num_threads(), std::vector< size_t >() ); num_connections_.swap( tmp2 ); } @@ -199,7 +198,7 @@ ConnectionManager::set_status( const DictionaryDatum& d ) } updateValue< bool >( d, names::keep_source_table, keep_source_table_ ); - if ( not keep_source_table_ and kernel::manager< SPManager >().is_structural_plasticity_enabled() ) + if ( not keep_source_table_ and kernel::manager< SPManager >.is_structural_plasticity_enabled() ) { throw KernelException( "If structural plasticity is enabled, keep_source_table can not be set " @@ -218,7 +217,7 @@ ConnectionManager::set_status( const DictionaryDatum& d ) DelayChecker& ConnectionManager::get_delay_checker() { - return delay_checkers_[ kernel::manager< VPManager >().get_thread_id() ]; + return delay_checkers_[ kernel::manager< VPManager >.get_thread_id() ]; } void @@ -250,18 +249,18 @@ ConnectionManager::get_synapse_status( const size_t source_node_id, const synindex syn_id, const size_t lcid ) const { - kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); + kernel::manager< ModelManager >.assert_valid_syn_id( syn_id, kernel::manager< VPManager >.get_thread_id() ); DictionaryDatum dict( new Dictionary ); ( *dict )[ names::source ] = source_node_id; ( *dict )[ names::synapse_model ] = - LiteralDatum( kernel::manager< ModelManager >().get_connection_model( syn_id, /* thread */ 0 ).get_name() ); + LiteralDatum( kernel::manager< ModelManager >.get_connection_model( syn_id, /* thread */ 0 ).get_name() ); ( *dict )[ names::target_thread ] = tid; ( *dict )[ names::synapse_id ] = syn_id; ( *dict )[ names::port ] = lcid; - const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); - const Node* target = kernel::manager< NodeManager >().get_node_or_proxy( target_node_id, tid ); + const Node* source = kernel::manager< NodeManager >.get_node_or_proxy( source_node_id, tid ); + const Node* target = kernel::manager< NodeManager >.get_node_or_proxy( target_node_id, tid ); // synapses from neurons to neurons and from neurons to globally // receiving devices @@ -296,14 +295,14 @@ ConnectionManager::set_synapse_status( const size_t source_node_id, const size_t lcid, const DictionaryDatum& dict ) { - kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); + kernel::manager< ModelManager >.assert_valid_syn_id( syn_id, kernel::manager< VPManager >.get_thread_id() ); - const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); - const Node* target = kernel::manager< NodeManager >().get_node_or_proxy( target_node_id, tid ); + const Node* source = kernel::manager< NodeManager >.get_node_or_proxy( source_node_id, tid ); + const Node* target = kernel::manager< NodeManager >.get_node_or_proxy( target_node_id, tid ); try { - ConnectorModel& cm = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + ConnectorModel& cm = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); // synapses from neurons to neurons and from neurons to globally // receiving devices if ( ( source->has_proxies() and target->has_proxies() and connections_[ tid ][ syn_id ] ) @@ -330,7 +329,7 @@ ConnectionManager::set_synapse_status( const size_t source_node_id, { throw BadProperty( String::compose( "Setting status of '%1' connecting from node ID %2 to node ID %3 via port %4: %5", - kernel::manager< ModelManager >().get_connection_model( syn_id, tid ).get_name(), + kernel::manager< ModelManager >.get_connection_model( syn_id, tid ).get_name(), source_node_id, target_node_id, lcid, @@ -435,7 +434,7 @@ ConnectionManager::get_third_conn_builder( const std::string& name, void ConnectionManager::calibrate( const TimeConverter& tc ) { - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { delay_checkers_[ tid ].calibrate( tc ); } @@ -500,15 +499,15 @@ ConnectionManager::connect( TokenArray sources, TokenArray targets, const Dictio { const std::string synmodel_name = getValue< std::string >( synmodel ); // The following throws UnknownSynapseType for invalid synmodel_name - syn_id = kernel::manager< ModelManager >().get_synapse_model_id( synmodel_name ); + syn_id = kernel::manager< ModelManager >.get_synapse_model_id( synmodel_name ); } // Connect all sources to all targets for ( auto&& source : sources ) { - auto source_node = kernel::manager< NodeManager >().get_node_or_proxy( source ); + auto source_node = kernel::manager< NodeManager >.get_node_or_proxy( source ); for ( auto&& target : targets ) { - auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( target ); + auto target_node = kernel::manager< NodeManager >.get_node_or_proxy( target ); auto target_thread = target_node->get_thread(); connect_( *source_node, *target_node, source, target_thread, syn_id, syn_spec ); } @@ -519,7 +518,7 @@ ConnectionManager::connect( TokenArray sources, TokenArray targets, const Dictio void ConnectionManager::update_delay_extrema_() { - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { // Once simulation has started, min/max_delay can no longer change, // so there is nothing to update. @@ -533,8 +532,8 @@ ConnectionManager::update_delay_extrema_() { // If no min/max_delay is set explicitly, then the default delay used by the // SPBuilders have to be respected for min/max_delay. - min_delay_ = std::min( min_delay_, kernel::manager< SPManager >().builder_min_delay() ); - max_delay_ = std::max( max_delay_, kernel::manager< SPManager >().builder_max_delay() ); + min_delay_ = std::min( min_delay_, kernel::manager< SPManager >.builder_min_delay() ); + max_delay_ = std::max( max_delay_, kernel::manager< SPManager >.builder_max_delay() ); } // If the user explicitly set min/max_delay, this happend on all MPI ranks, @@ -542,18 +541,18 @@ ConnectionManager::update_delay_extrema_() // explicitly, Connect() cannot induce new extrema. Thuse, we only need to communicate // with other ranks if the user has not set the extrema and connections may have // been created. - if ( not kernel::manager< ConnectionManager >().get_user_set_delay_extrema() - and kernel::manager< ConnectionManager >().connections_have_changed() - and kernel::manager< MPIManager >().get_num_processes() > 1 ) + if ( not kernel::manager< ConnectionManager >.get_user_set_delay_extrema() + and kernel::manager< ConnectionManager >.connections_have_changed() + and kernel::manager< MPIManager >.get_num_processes() > 1 ) { - std::vector< long > min_delays( kernel::manager< MPIManager >().get_num_processes() ); - min_delays[ kernel::manager< MPIManager >().get_rank() ] = min_delay_; - kernel::manager< MPIManager >().communicate( min_delays ); + std::vector< long > min_delays( kernel::manager< MPIManager >.get_num_processes() ); + min_delays[ kernel::manager< MPIManager >.get_rank() ] = min_delay_; + kernel::manager< MPIManager >.communicate( min_delays ); min_delay_ = *std::min_element( min_delays.begin(), min_delays.end() ); - std::vector< long > max_delays( kernel::manager< MPIManager >().get_num_processes() ); - max_delays[ kernel::manager< MPIManager >().get_rank() ] = max_delay_; - kernel::manager< MPIManager >().communicate( max_delays ); + std::vector< long > max_delays( kernel::manager< MPIManager >.get_num_processes() ); + max_delays[ kernel::manager< MPIManager >.get_rank() ] = max_delay_; + kernel::manager< MPIManager >.communicate( max_delays ); max_delay_ = *std::max_element( max_delays.begin(), max_delays.end() ); } @@ -573,9 +572,9 @@ ConnectionManager::connect( const size_t snode_id, const double delay, const double weight ) { - kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); + kernel::manager< ModelManager >.assert_valid_syn_id( syn_id, kernel::manager< VPManager >.get_thread_id() ); - Node* source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, target_thread ); + Node* source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id, target_thread ); ConnectionType connection_type = connection_required( source, target, target_thread ); @@ -602,18 +601,18 @@ ConnectionManager::connect( const size_t snode_id, const DictionaryDatum& params, const synindex syn_id ) { - kernel::manager< ModelManager >().assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); + kernel::manager< ModelManager >.assert_valid_syn_id( syn_id, kernel::manager< VPManager >.get_thread_id() ); - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); - if ( not kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { return false; } - Node* target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); - Node* source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id, target_thread ); + Node* source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id, target_thread ); ConnectionType connection_type = connection_required( source, target, target_thread ); bool connected = true; @@ -664,13 +663,13 @@ ConnectionManager::connect_arrays( long* sources, } } - const auto synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_model ); - const auto syn_model_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); + const auto synapse_model_id = kernel::manager< ModelManager >.get_synapse_model_id( syn_model ); + const auto syn_model_defaults = kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id ); // Dictionary holding additional synapse parameters, passed to the connect call. std::vector< DictionaryDatum > param_dicts; - param_dicts.reserve( kernel::manager< VPManager >().get_num_threads() ); - for ( size_t i = 0; i < kernel::manager< VPManager >().get_num_threads(); ++i ) + param_dicts.reserve( kernel::manager< VPManager >.get_num_threads() ); + for ( size_t i = 0; i < kernel::manager< VPManager >.get_num_threads(); ++i ) { param_dicts.emplace_back( new Dictionary ); for ( auto& param_key : p_keys ) @@ -714,11 +713,11 @@ ConnectionManager::connect_arrays( long* sources, // Vector for storing exceptions raised by threads. std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - const auto tid = kernel::manager< VPManager >().get_thread_id(); + const auto tid = kernel::manager< VPManager >.get_thread_id(); try { auto s = sources; @@ -731,15 +730,15 @@ ConnectionManager::connect_arrays( long* sources, for ( ; s != sources + n; ++s, ++t, ++index_counter ) { - if ( 0 >= *s or static_cast< size_t >( *s ) > kernel::manager< NodeManager >().size() ) + if ( 0 >= *s or static_cast< size_t >( *s ) > kernel::manager< NodeManager >.size() ) { throw UnknownNode( *s ); } - if ( 0 >= *t or static_cast< size_t >( *t ) > kernel::manager< NodeManager >().size() ) + if ( 0 >= *t or static_cast< size_t >( *t ) > kernel::manager< NodeManager >.size() ) { throw UnknownNode( *t ); } - auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( *t, tid ); + auto target_node = kernel::manager< NodeManager >.get_node_or_proxy( *t, tid ); if ( target_node->is_proxy() ) { increment_wd( w, d ); @@ -802,7 +801,7 @@ ConnectionManager::connect_arrays( long* sources, } } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -899,7 +898,7 @@ ConnectionManager::connect_( Node& source, const double delay, const double weight ) { - ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + ConnectorModel& conn_model = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); const bool clopath_archiving = conn_model.has_property( ConnectionModelProperties::REQUIRES_CLOPATH_ARCHIVING ); if ( clopath_archiving and not dynamic_cast< ClopathArchivingNode* >( &target ) ) @@ -1043,7 +1042,7 @@ ConnectionManager::trigger_update_weight( const long vt_id, const std::vector< spikecounter >& dopa_spikes, const double t_trig ) { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); for ( std::vector< ConnectorBase* >::iterator it = connections_[ tid ].begin(); it != connections_[ tid ].end(); ++it ) @@ -1051,7 +1050,7 @@ ConnectionManager::trigger_update_weight( const long vt_id, if ( *it ) { ( *it )->trigger_update_weight( - vt_id, tid, dopa_spikes, t_trig, kernel::manager< ModelManager >().get_connection_models( tid ) ); + vt_id, tid, dopa_spikes, t_trig, kernel::manager< ModelManager >.get_connection_models( tid ) ); } } } @@ -1141,12 +1140,12 @@ ConnectionManager::get_connections( const DictionaryDatum& params ) // Check whether waveform relaxation is used on any MPI process; // needs to be called before update_connection_infrastructure since // it resizes coefficient arrays for secondary events - kernel::manager< NodeManager >().check_wfr_use(); + kernel::manager< NodeManager >.check_wfr_use(); #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - kernel::manager< SimulationManager >().update_connection_infrastructure( tid ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + kernel::manager< SimulationManager >.update_connection_infrastructure( tid ); } } @@ -1156,12 +1155,12 @@ ConnectionManager::get_connections( const DictionaryDatum& params ) { const std::string synmodel_name = getValue< std::string >( syn_model_t ); // The following throws UnknownSynapseType for invalid synmodel_name - syn_id = kernel::manager< ModelManager >().get_synapse_model_id( synmodel_name ); + syn_id = kernel::manager< ModelManager >.get_synapse_model_id( synmodel_name ); get_connections( connectome, source_a, target_a, syn_id, synapse_label ); } else { - for ( syn_id = 0; syn_id < kernel::manager< ModelManager >().get_num_connection_models(); ++syn_id ) + for ( syn_id = 0; syn_id < kernel::manager< ModelManager >.get_num_connection_models(); ++syn_id ) { get_connections( connectome, source_a, target_a, syn_id, synapse_label ); } @@ -1205,7 +1204,7 @@ ConnectionManager::split_to_neuron_device_vectors_( const size_t tid, for ( ; t_id < nodecollection->end(); ++t_id ) { const size_t node_id = ( *t_id ).node_id; - const auto node = kernel::manager< NodeManager >().get_node_or_proxy( node_id, tid ); + const auto node = kernel::manager< NodeManager >.get_node_or_proxy( node_id, tid ); // Normal neuron nodes have proxies. Globally receiving devices, e.g. volume transmitter, don't have a local // receiver, but are connected in the same way as normal neuron nodes. Therefore they have to be treated as such // here. @@ -1372,7 +1371,7 @@ nest::ConnectionManager::get_connections( std::deque< ConnectionID >& connectome throw KernelException( "Invalid attempt to access connection information: source table was cleared." ); } - size_t tid = kernel::manager< VPManager >().get_thread_id(); + size_t tid = kernel::manager< VPManager >.get_thread_id(); std::deque< ConnectionID > conns_in_thread; @@ -1424,7 +1423,7 @@ ConnectionManager::get_sources( const std::vector< size_t >& targets, ( *i ).clear(); } - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { for ( size_t i = 0; i < targets.size(); ++i ) { @@ -1445,7 +1444,7 @@ ConnectionManager::get_targets( const std::vector< size_t >& sources, ( *i ).clear(); } - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { for ( size_t i = 0; i < sources.size(); ++i ) { @@ -1482,23 +1481,23 @@ ConnectionManager::compute_target_data_buffer_size() // has its own data structures, we need to count connections on every // thread separately to compute the total number of sources. size_t num_target_data = 0; - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { num_target_data += get_num_target_data( tid ); } // Determine maximum number of target data across all ranks, because // all ranks need identically sized buffers. - std::vector< long > global_num_target_data( kernel::manager< MPIManager >().get_num_processes() ); - global_num_target_data[ kernel::manager< MPIManager >().get_rank() ] = num_target_data; - kernel::manager< MPIManager >().communicate( global_num_target_data ); + std::vector< long > global_num_target_data( kernel::manager< MPIManager >.get_num_processes() ); + global_num_target_data[ kernel::manager< MPIManager >.get_rank() ] = num_target_data; + kernel::manager< MPIManager >.communicate( global_num_target_data ); const size_t max_num_target_data = *std::max_element( global_num_target_data.begin(), global_num_target_data.end() ); // MPI buffers should have at least two entries per process - const size_t min_num_target_data = 2 * kernel::manager< MPIManager >().get_num_processes(); + const size_t min_num_target_data = 2 * kernel::manager< MPIManager >.get_num_processes(); // Adjust target data buffers accordingly - kernel::manager< MPIManager >().set_buffer_size_target_data( std::max( min_num_target_data, max_num_target_data ) ); + kernel::manager< MPIManager >.set_buffer_size_target_data( std::max( min_num_target_data, max_num_target_data ) ); } void @@ -1519,7 +1518,7 @@ ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const siz if ( connections_[ tid ][ syn_id ] ) { - ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + ConnectorModel& conn_model = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); if ( not is_primary ) @@ -1534,10 +1533,10 @@ ConnectionManager::compute_compressed_secondary_recv_buffer_positions( const siz { const size_t source_node_id = source_table_.get_node_id( tid, syn_id, lcid ); const size_t sg_s_id = source_table_.pack_source_node_id_and_syn_id( source_node_id, syn_id ); - const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source_node_id ); + const size_t source_rank = kernel::manager< MPIManager >.get_process_id_of_node_id( source_node_id ); positions[ lcid ] = buffer_pos_of_source_node_id_syn_id_[ sg_s_id ] - + kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); + + kernel::manager< MPIManager >.get_recv_displacement_secondary_events_in_int( source_rank ); } } } @@ -1551,8 +1550,8 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid // proxy and that it is on thread tid. assert( not target->is_proxy() ); size_t target_vp = target->get_vp(); - assert( kernel::manager< VPManager >().is_local_vp( target_vp ) ); - assert( kernel::manager< VPManager >().vp_to_thread( target_vp ) == tid ); + assert( kernel::manager< VPManager >.is_local_vp( target_vp ) ); + assert( kernel::manager< VPManager >.vp_to_thread( target_vp ) == tid ); // Connections to nodes with proxies (neurons or devices with // proxies) which are local to tid have always to be @@ -1580,7 +1579,7 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid // source may be a proxy on tid. if ( target->one_node_per_process() ) { - if ( kernel::manager< NodeManager >().is_local_node( source ) ) + if ( kernel::manager< NodeManager >.is_local_node( source ) ) { return CONNECT_TO_DEVICE; } @@ -1606,14 +1605,14 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid if ( not source->has_proxies() ) { const size_t target_node_id = target->get_node_id(); - target_vp = kernel::manager< VPManager >().node_id_to_vp( target_node_id ); - const bool target_vp_local = kernel::manager< VPManager >().is_local_vp( target_vp ); - const size_t target_thread = kernel::manager< VPManager >().vp_to_thread( target_vp ); + target_vp = kernel::manager< VPManager >.node_id_to_vp( target_node_id ); + const bool target_vp_local = kernel::manager< VPManager >.is_local_vp( target_vp ); + const size_t target_thread = kernel::manager< VPManager >.vp_to_thread( target_vp ); if ( target_vp_local and target_thread == tid ) { const size_t source_node_id = source->get_node_id(); - source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, target_thread ); + source = kernel::manager< NodeManager >.get_node_or_proxy( source_node_id, target_thread ); return CONNECT_FROM_DEVICE; } } @@ -1626,7 +1625,7 @@ ConnectionManager::connection_required( Node*& source, Node*& target, size_t tid { if ( source->has_proxies() ) { - target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), tid ); + target = kernel::manager< NodeManager >.get_node_or_proxy( target->get_node_id(), tid ); return CONNECT; } @@ -1670,21 +1669,21 @@ ConnectionManager::deliver_secondary_events( const size_t tid, const bool called_from_wfr_update, std::vector< unsigned int >& recv_buffer ) { - const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >().get_connection_models( tid ); - const Time stamp = kernel::manager< SimulationManager >().get_slice_origin() - + Time::step( 1 - kernel::manager< ConnectionManager >().get_min_delay() ); + const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >.get_connection_models( tid ); + const Time stamp = kernel::manager< SimulationManager >.get_slice_origin() + + Time::step( 1 - kernel::manager< ConnectionManager >.get_min_delay() ); const std::vector< std::vector< size_t > >& positions_tid = secondary_recv_buffer_pos_[ tid ]; const synindex syn_id_end = positions_tid.size(); for ( synindex syn_id = 0; syn_id < syn_id_end; ++syn_id ) { - const ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + const ConnectorModel& conn_model = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); const bool supports_wfr = conn_model.has_property( ConnectionModelProperties::SUPPORTS_WFR ); if ( not called_from_wfr_update or supports_wfr ) { if ( positions_tid[ syn_id ].size() > 0 ) { - SecondaryEvent& prototype = kernel::manager< ModelManager >().get_secondary_event_prototype( syn_id, tid ); + SecondaryEvent& prototype = kernel::manager< ModelManager >.get_secondary_event_prototype( syn_id, tid ); size_t lcid = 0; const size_t lcid_end = positions_tid[ syn_id ].size(); @@ -1705,11 +1704,10 @@ ConnectionManager::deliver_secondary_events( const size_t tid, // Read waveform relaxation done marker from last position in every // chunk bool done = true; - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { done = done - and recv_buffer[ kernel::manager< MPIManager >().get_done_marker_position_in_secondary_events_recv_buffer( - rank ) ]; + and recv_buffer[ kernel::manager< MPIManager >.get_done_marker_position_in_secondary_events_recv_buffer( rank ) ]; } return done; } @@ -1747,10 +1745,10 @@ ConnectionManager::remove_disabled_connections( const size_t tid ) void ConnectionManager::resize_connections() { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - connections_.at( kernel::manager< VPManager >().get_thread_id() ) - .resize( kernel::manager< ModelManager >().get_num_connection_models() ); + connections_.at( kernel::manager< VPManager >.get_thread_id() ) + .resize( kernel::manager< ModelManager >.get_num_connection_models() ); source_table_.resize_sources(); target_table_devices_.resize_to_number_of_synapse_types(); @@ -1759,19 +1757,19 @@ ConnectionManager::resize_connections() void ConnectionManager::sync_has_primary_connections() { - has_primary_connections_ = kernel::manager< MPIManager >().any_true( has_primary_connections_ ); + has_primary_connections_ = kernel::manager< MPIManager >.any_true( has_primary_connections_ ); } void ConnectionManager::check_secondary_connections_exist() { - secondary_connections_exist_ = kernel::manager< MPIManager >().any_true( secondary_connections_exist_ ); + secondary_connections_exist_ = kernel::manager< MPIManager >.any_true( secondary_connections_exist_ ); } void ConnectionManager::set_connections_have_changed() { - assert( kernel::manager< VPManager >().get_thread_id() == 0 ); + assert( kernel::manager< VPManager >.get_thread_id() == 0 ); if ( get_connections_has_been_called_ ) { @@ -1804,9 +1802,9 @@ ConnectionManager::collect_compressed_spike_data( const size_t tid ) } // of omp single; implicit barrier source_table_.collect_compressible_sources( tid ); - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { source_table_.fill_compressed_spike_data( compressed_spike_data_ ); @@ -1835,13 +1833,13 @@ ConnectionManager::fill_target_buffer( const size_t tid, do { - const auto& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + const auto& conn_model = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); while ( source_2_idx != csd_maps.at( syn_id ).end() ) { const auto source_gid = source_2_idx->first; - const auto source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source_gid ); + const auto source_rank = kernel::manager< MPIManager >.get_process_id_of_node_id( source_gid ); if ( not( rank_start <= source_rank and source_rank < rank_end ) ) { // We are not responsible for this source. @@ -1866,8 +1864,8 @@ ConnectionManager::fill_target_buffer( const size_t tid, next_target_data.set_is_primary( is_primary ); next_target_data.reset_marker(); next_target_data.set_source_tid( - kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( source_gid ) ) ); - next_target_data.set_source_lid( kernel::manager< VPManager >().node_id_to_lid( source_gid ) ); + kernel::manager< VPManager >.vp_to_thread( kernel::manager< VPManager >.node_id_to_vp( source_gid ) ) ); + next_target_data.set_source_lid( kernel::manager< VPManager >.node_id_to_lid( source_gid ) ); if ( is_primary ) { @@ -1884,7 +1882,7 @@ ConnectionManager::fill_target_buffer( const size_t tid, assert( target_thread == static_cast< unsigned long >( conn_info.get_tid() ) ); const size_t relative_recv_buffer_pos = get_secondary_recv_buffer_position( target_thread, syn_id, conn_info.get_lcid() ) - - kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); + - kernel::manager< MPIManager >.get_recv_displacement_secondary_events_in_int( source_rank ); SecondaryTargetDataFields& secondary_fields = next_target_data.secondary_data; secondary_fields.set_recv_buffer_pos( relative_recv_buffer_pos ); @@ -1932,7 +1930,7 @@ ConnectionManager::fill_target_buffer( const size_t tid, void ConnectionManager::initialize_iteration_state() { - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); iteration_state_.clear(); iteration_state_.reserve( num_threads ); @@ -1950,21 +1948,20 @@ void ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, Event& e ) { target_table_devices_.send_to_device( - tid, source_node_id, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); + tid, source_node_id, e, kernel::manager< ModelManager >.get_connection_models( tid ) ); } void ConnectionManager::send_to_devices( const size_t tid, const size_t source_node_id, SecondaryEvent& e ) { target_table_devices_.send_to_device( - tid, source_node_id, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); + tid, source_node_id, e, kernel::manager< ModelManager >.get_connection_models( tid ) ); } void ConnectionManager::send_from_device( const size_t tid, const size_t ldid, Event& e ) { - target_table_devices_.send_from_device( - tid, ldid, e, kernel::manager< ModelManager >().get_connection_models( tid ) ); + target_table_devices_.send_from_device( tid, ldid, e, kernel::manager< ModelManager >.get_connection_models( tid ) ); } } diff --git a/nestkernel/connector_base.cpp b/nestkernel/connector_base.cpp index daac0d6bc6..22e2bfb4ff 100644 --- a/nestkernel/connector_base.cpp +++ b/nestkernel/connector_base.cpp @@ -39,7 +39,8 @@ ConnectorBase::prepare_weight_recorder_event( WeightRecorderEvent& wr_e, wr_e.set_rport( e.get_rport() ); wr_e.set_stamp( e.get_stamp() ); // Sender is not available for SecondaryEvents, and not needed, so we do not set it to avoid undefined behavior. - wr_e.set_sender_node_id( kernel::manager< ConnectionManager >().get_source_node_id( tid, syn_id, lcid ) ); + // wr_e.set_sender_node_id( kernel::manager< ConnectionManager >.get_source_node_id( tid, syn_id, lcid ) ); + wr_e.set_sender_node_id( kernel::manager< ConnectionManager >.get_source_node_id( tid, syn_id, lcid ) ); wr_e.set_weight( e.get_weight() ); wr_e.set_delay_steps( e.get_delay_steps() ); wr_e.set_receiver( *static_cast< Node* >( cp.get_weight_recorder() ) ); diff --git a/nestkernel/connector_model.cpp b/nestkernel/connector_model.cpp index 52fd112c76..0ae28ab589 100644 --- a/nestkernel/connector_model.cpp +++ b/nestkernel/connector_model.cpp @@ -43,7 +43,7 @@ ConnectorModel::ConnectorModel( const ConnectorModel& cm, const std::string name size_t ConnectorModel::get_synapse_model_id( const std::string& name ) { - return kernel::manager< ModelManager >().get_synapse_model_id( name ); + return kernel::manager< ModelManager >.get_synapse_model_id( name ); } } // namespace nest diff --git a/nestkernel/connector_model_impl.h b/nestkernel/connector_model_impl.h index 62b5b7a310..24bc91abc0 100644 --- a/nestkernel/connector_model_impl.h +++ b/nestkernel/connector_model_impl.h @@ -107,12 +107,12 @@ GenericConnectorModel< ConnectionT >::set_status( const DictionaryDatum& d ) // set_status calls on common properties and default connection may // modify min/max delay, we need to freeze the min/max_delay checking. - kernel::manager< ConnectionManager >().get_delay_checker().freeze_delay_update(); + kernel::manager< ConnectionManager >.get_delay_checker().freeze_delay_update(); cp_.set_status( d, *this ); default_connection_.set_status( d, *this ); - kernel::manager< ConnectionManager >().get_delay_checker().enable_delay_update(); + kernel::manager< ConnectionManager >.get_delay_checker().enable_delay_update(); // we've possibly just got a new default delay. So enforce checking next time // it is used @@ -157,7 +157,7 @@ GenericConnectorModel< ConnectionT >::used_default_delay() if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { const double d = default_connection_.get_delay(); - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( d ); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( d ); } // Let connections without delay contribute to the delay extrema with // wfr_comm_interval. For those connections the min_delay is important @@ -167,8 +167,8 @@ GenericConnectorModel< ConnectionT >::used_default_delay() // without delay is created. else { - const double wfr_comm_interval = kernel::manager< SimulationManager >().get_wfr_comm_interval(); - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( wfr_comm_interval ); + const double wfr_comm_interval = kernel::manager< SimulationManager >.get_wfr_comm_interval(); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( wfr_comm_interval ); } } catch ( BadDelay& e ) @@ -177,8 +177,8 @@ GenericConnectorModel< ConnectionT >::used_default_delay() String::compose( "Default delay of '%1' must be between min_delay %2 " "and max_delay %3.", get_name(), - Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_min_delay() ), - Time::delay_steps_to_ms( kernel::manager< ConnectionManager >().get_max_delay() ) ) ); + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >.get_min_delay() ), + Time::delay_steps_to_ms( kernel::manager< ConnectionManager >.get_max_delay() ) ) ); } default_delay_needs_check_ = false; } @@ -212,7 +212,7 @@ GenericConnectorModel< ConnectionT >::add_connection( Node& src, { if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( delay ); } if ( p->known( names::delay ) ) @@ -231,7 +231,7 @@ GenericConnectorModel< ConnectionT >::add_connection( Node& src, { if ( has_property( ConnectionModelProperties::HAS_DELAY ) ) { - kernel::manager< ConnectionManager >().get_delay_checker().assert_valid_delay_ms( delay ); + kernel::manager< ConnectionManager >.get_delay_checker().assert_valid_delay_ms( delay ); } } else diff --git a/nestkernel/delay_checker.cpp b/nestkernel/delay_checker.cpp index 7e103238d8..f1a9a645fd 100644 --- a/nestkernel/delay_checker.cpp +++ b/nestkernel/delay_checker.cpp @@ -137,7 +137,7 @@ nest::DelayChecker::set_status( const DictionaryDatum& d ) if ( min_delay_updated and max_delay_updated ) { - if ( kernel::manager< ConnectionManager >().get_num_connections() > 0 ) + if ( kernel::manager< ConnectionManager >.get_num_connections() > 0 ) { throw BadProperty( "Connections already exist. Please call ResetKernel first" ); } @@ -162,10 +162,10 @@ nest::DelayChecker::assert_valid_delay_ms( double requested_new_delay ) // if already simulated, the new delay has to be checked against the // min_delay and the max_delay which have been used during simulation - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { - const bool bad_min_delay = new_delay < kernel::manager< ConnectionManager >().get_min_delay(); - const bool bad_max_delay = new_delay > kernel::manager< ConnectionManager >().get_max_delay(); + const bool bad_min_delay = new_delay < kernel::manager< ConnectionManager >.get_min_delay(); + const bool bad_max_delay = new_delay > kernel::manager< ConnectionManager >.get_max_delay(); if ( bad_min_delay or bad_max_delay ) { throw BadDelay( new_delay_ms, @@ -223,10 +223,10 @@ nest::DelayChecker::assert_two_valid_delays_steps( long new_delay1, long new_del throw BadDelay( Time::delay_steps_to_ms( ldelay ), "Delay must be greater than or equal to resolution" ); } - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { - const bool bad_min_delay = ldelay < kernel::manager< ConnectionManager >().get_min_delay(); - const bool bad_max_delay = hdelay > kernel::manager< ConnectionManager >().get_max_delay(); + const bool bad_min_delay = ldelay < kernel::manager< ConnectionManager >.get_min_delay(); + const bool bad_max_delay = hdelay > kernel::manager< ConnectionManager >.get_max_delay(); if ( bad_min_delay ) { throw BadDelay( diff --git a/nestkernel/eprop_archiving_node.h b/nestkernel/eprop_archiving_node.h index 392f6a893c..5a6b48643c 100644 --- a/nestkernel/eprop_archiving_node.h +++ b/nestkernel/eprop_archiving_node.h @@ -253,7 +253,7 @@ EpropArchivingNode< HistEntryT >::erase_used_eprop_history() return; } - const long update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const long update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); auto it_update_hist = update_history_.begin(); diff --git a/nestkernel/eprop_archiving_node_recurrent.h b/nestkernel/eprop_archiving_node_recurrent.h index ac416ea53d..b4f719e329 100644 --- a/nestkernel/eprop_archiving_node_recurrent.h +++ b/nestkernel/eprop_archiving_node_recurrent.h @@ -484,7 +484,7 @@ EpropArchivingNodeRecurrent< hist_shift_required >::write_firing_rate_reg_to_his return; } - const double update_interval = kernel::manager< SimulationManager >().get_eprop_update_interval().get_steps(); + const double update_interval = kernel::manager< SimulationManager >.get_eprop_update_interval().get_steps(); const double dt = Time::get_resolution().get_ms(); const long shift = Time::get_resolution().get_steps(); diff --git a/nestkernel/event.cpp b/nestkernel/event.cpp index 7a4bc721b9..5d8878404d 100644 --- a/nestkernel/event.cpp +++ b/nestkernel/event.cpp @@ -55,7 +55,7 @@ Event::retrieve_sender_node_id_from_source_table() const } else { - const size_t node_id = kernel::manager< ConnectionManager >().get_source_node_id( + const size_t node_id = kernel::manager< ConnectionManager >.get_source_node_id( sender_spike_data_.get_tid(), sender_spike_data_.get_syn_id(), sender_spike_data_.get_lcid() ); return node_id; } diff --git a/nestkernel/event_delivery_manager.cpp b/nestkernel/event_delivery_manager.cpp index b96743cbe3..095c8ecd5f 100644 --- a/nestkernel/event_delivery_manager.cpp +++ b/nestkernel/event_delivery_manager.cpp @@ -88,7 +88,7 @@ EventDeliveryManager::initialize( const bool adjust_number_of_threads_or_rng_onl send_recv_buffer_resize_log_.clear(); } - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); local_spike_counter_.resize( num_threads, 0 ); reset_counters(); @@ -98,7 +98,7 @@ EventDeliveryManager::initialize( const bool adjust_number_of_threads_or_rng_onl #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); if ( not emitted_spikes_register_[ tid ] ) { @@ -197,26 +197,26 @@ void EventDeliveryManager::resize_send_recv_buffers_target_data() { // compute send receive counts and allocate memory for buffers - send_buffer_target_data_.resize( kernel::manager< MPIManager >().get_buffer_size_target_data() ); - recv_buffer_target_data_.resize( kernel::manager< MPIManager >().get_buffer_size_target_data() ); + send_buffer_target_data_.resize( kernel::manager< MPIManager >.get_buffer_size_target_data() ); + recv_buffer_target_data_.resize( kernel::manager< MPIManager >.get_buffer_size_target_data() ); } void EventDeliveryManager::resize_send_recv_buffers_spike_data_() { - if ( kernel::manager< MPIManager >().get_buffer_size_spike_data() > send_buffer_spike_data_.size() ) + if ( kernel::manager< MPIManager >.get_buffer_size_spike_data() > send_buffer_spike_data_.size() ) { - send_buffer_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); - recv_buffer_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); - send_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); - recv_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >().get_buffer_size_spike_data() ); + send_buffer_spike_data_.resize( kernel::manager< MPIManager >.get_buffer_size_spike_data() ); + recv_buffer_spike_data_.resize( kernel::manager< MPIManager >.get_buffer_size_spike_data() ); + send_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >.get_buffer_size_spike_data() ); + recv_buffer_off_grid_spike_data_.resize( kernel::manager< MPIManager >.get_buffer_size_spike_data() ); } } void EventDeliveryManager::configure_spike_data_buffers() { - assert( kernel::manager< ConnectionManager >().get_min_delay() != 0 ); + assert( kernel::manager< ConnectionManager >.get_min_delay() != 0 ); configure_spike_register(); @@ -231,7 +231,7 @@ EventDeliveryManager::configure_spike_register() { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); reset_spike_register_( tid ); } } @@ -240,18 +240,16 @@ void EventDeliveryManager::configure_secondary_buffers() { send_buffer_secondary_events_.clear(); - send_buffer_secondary_events_.resize( - kernel::manager< MPIManager >().get_send_buffer_size_secondary_events_in_int() ); + send_buffer_secondary_events_.resize( kernel::manager< MPIManager >.get_send_buffer_size_secondary_events_in_int() ); recv_buffer_secondary_events_.clear(); - recv_buffer_secondary_events_.resize( - kernel::manager< MPIManager >().get_recv_buffer_size_secondary_events_in_int() ); + recv_buffer_secondary_events_.resize( kernel::manager< MPIManager >.get_recv_buffer_size_secondary_events_in_int() ); } void EventDeliveryManager::init_moduli() { - long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); - long max_delay = kernel::manager< ConnectionManager >().get_max_delay(); + long min_delay = kernel::manager< ConnectionManager >.get_min_delay(); + long max_delay = kernel::manager< ConnectionManager >.get_max_delay(); assert( min_delay != 0 ); assert( max_delay != 0 ); @@ -263,7 +261,7 @@ EventDeliveryManager::init_moduli() for ( long d = 0; d < min_delay + max_delay; ++d ) { - moduli_[ d ] = ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) % ( min_delay + max_delay ); + moduli_[ d ] = ( kernel::manager< SimulationManager >.get_clock().get_steps() + d ) % ( min_delay + max_delay ); } // Slice-based ring-buffers have one bin per min_delay steps, @@ -273,15 +271,15 @@ EventDeliveryManager::init_moduli() slice_moduli_.resize( min_delay + max_delay ); for ( long d = 0; d < min_delay + max_delay; ++d ) { - slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) / min_delay ) % nbuff; + slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >.get_clock().get_steps() + d ) / min_delay ) % nbuff; } } void EventDeliveryManager::update_moduli() { - long min_delay = kernel::manager< ConnectionManager >().get_min_delay(); - long max_delay = kernel::manager< ConnectionManager >().get_max_delay(); + long min_delay = kernel::manager< ConnectionManager >.get_min_delay(); + long max_delay = kernel::manager< ConnectionManager >.get_max_delay(); assert( min_delay != 0 ); assert( max_delay != 0 ); @@ -298,7 +296,7 @@ EventDeliveryManager::update_moduli() const size_t nbuff = static_cast< size_t >( std::ceil( static_cast< double >( min_delay + max_delay ) / min_delay ) ); for ( long d = 0; d < min_delay + max_delay; ++d ) { - slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >().get_clock().get_steps() + d ) / min_delay ) % nbuff; + slice_moduli_[ d ] = ( ( kernel::manager< SimulationManager >.get_clock().get_steps() + d ) / min_delay ) % nbuff; } } @@ -328,10 +326,10 @@ void EventDeliveryManager::write_done_marker_secondary_events_( const bool done ) { // write done marker at last position in every chunk - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { - send_buffer_secondary_events_[ kernel::manager< MPIManager >() - .get_done_marker_position_in_secondary_events_send_buffer( rank ) ] = done; + send_buffer_secondary_events_ + [ kernel::manager< MPIManager >.get_done_marker_position_in_secondary_events_send_buffer( rank ) ] = done; } } @@ -339,14 +337,14 @@ void EventDeliveryManager::gather_secondary_events( const bool done ) { write_done_marker_secondary_events_( done ); - kernel::manager< MPIManager >().communicate_secondary_events_Alltoallv( + kernel::manager< MPIManager >.communicate_secondary_events_Alltoallv( send_buffer_secondary_events_, recv_buffer_secondary_events_ ); } bool EventDeliveryManager::deliver_secondary_events( const size_t tid, const bool called_from_wfr_update ) { - return kernel::manager< ConnectionManager >().deliver_secondary_events( + return kernel::manager< ConnectionManager >.deliver_secondary_events( tid, called_from_wfr_update, recv_buffer_secondary_events_ ); } @@ -371,14 +369,14 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // NOTE: For meaning and logic of SpikeData flags for detecting complete transmission // and information for shrink/grow, see comment in spike_data.h. - const size_t old_buff_size_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); + const size_t old_buff_size_per_rank = kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank(); if ( global_max_spikes_per_rank_ < send_recv_buffer_shrink_limit_ * old_buff_size_per_rank ) { const size_t new_buff_size_per_rank = std::max( 2UL, static_cast< size_t >( ( 1 + send_recv_buffer_shrink_spare_ ) * global_max_spikes_per_rank_ ) ); - kernel::manager< MPIManager >().set_buffer_size_spike_data( - kernel::manager< MPIManager >().get_num_processes() * new_buff_size_per_rank ); + kernel::manager< MPIManager >.set_buffer_size_spike_data( + kernel::manager< MPIManager >.get_num_processes() * new_buff_size_per_rank ); resize_send_recv_buffers_spike_data_(); send_recv_buffer_resize_log_.add_entry( global_max_spikes_per_rank_, new_buff_size_per_rank ); } @@ -397,7 +395,7 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // Set marker at end of each chunk to DEFAULT reset_complete_marker_spike_data_( send_buffer_position, send_buffer ); - std::vector< size_t > num_spikes_per_rank( kernel::manager< MPIManager >().get_num_processes(), 0 ); + std::vector< size_t > num_spikes_per_rank( kernel::manager< MPIManager >.get_num_processes(), 0 ); // Collocate spikes to send buffer collocate_spike_data_buffers_( send_buffer_position, emitted_spikes_register_, send_buffer, num_spikes_per_rank ); @@ -421,19 +419,19 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer // We introduce an explicit barrier at this point to measure how long each process idles until all other processes // reached this point as well. This barrier is directly followed by another implicit barrier due to global // communication. - kernel::manager< SimulationManager >().get_mpi_synchronization_stopwatch().start(); - kernel::manager< MPIManager >().synchronize(); - kernel::manager< SimulationManager >().get_mpi_synchronization_stopwatch().stop(); + kernel::manager< SimulationManager >.get_mpi_synchronization_stopwatch().start(); + kernel::manager< MPIManager >.synchronize(); + kernel::manager< SimulationManager >.get_mpi_synchronization_stopwatch().stop(); #endif // Given that we templatize by plain vs offgrid, this if should not be necessary, but ... if ( off_grid_spiking_ ) { - kernel::manager< MPIManager >().communicate_off_grid_spike_data_Alltoall( send_buffer, recv_buffer ); + kernel::manager< MPIManager >.communicate_off_grid_spike_data_Alltoall( send_buffer, recv_buffer ); } else { - kernel::manager< MPIManager >().communicate_spike_data_Alltoall( send_buffer, recv_buffer ); + kernel::manager< MPIManager >.communicate_spike_data_Alltoall( send_buffer, recv_buffer ); } sw_communicate_spike_data_.stop(); @@ -441,15 +439,15 @@ EventDeliveryManager::gather_spike_data_( std::vector< SpikeDataT >& send_buffer global_max_spikes_per_rank_ = get_global_max_spikes_per_rank_( send_buffer_position, recv_buffer ); all_spikes_transmitted = - global_max_spikes_per_rank_ <= kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); + global_max_spikes_per_rank_ <= kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank(); if ( not all_spikes_transmitted ) { const size_t new_buff_size_per_rank = static_cast< size_t >( ( 1 + send_recv_buffer_grow_extra_ ) * global_max_spikes_per_rank_ ); - kernel::manager< MPIManager >().set_buffer_size_spike_data( - kernel::manager< MPIManager >().get_num_processes() * new_buff_size_per_rank ); + kernel::manager< MPIManager >.set_buffer_size_spike_data( + kernel::manager< MPIManager >.get_num_processes() * new_buff_size_per_rank ); resize_send_recv_buffers_spike_data_(); send_recv_buffer_resize_log_.add_entry( global_max_spikes_per_rank_, new_buff_size_per_rank ); } @@ -504,9 +502,9 @@ EventDeliveryManager::set_end_marker_( const SendBufferPosition& send_buffer_pos { // See comment in spike_data.h for logic. const bool collocate_complete = local_max_spikes_per_rank - <= static_cast< size_t >( kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank() ); + <= static_cast< size_t >( kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank() ); - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { const size_t end_idx = send_buffer_position.end( rank ) - 1; if ( not collocate_complete ) @@ -547,7 +545,7 @@ void EventDeliveryManager::reset_complete_marker_spike_data_( const SendBufferPosition& send_buffer_position, std::vector< SpikeDataT >& send_buffer ) const { - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { const size_t idx = send_buffer_position.end( rank ) - 1; send_buffer[ idx ].reset_marker(); @@ -562,7 +560,7 @@ EventDeliveryManager::get_global_max_spikes_per_rank_( const SendBufferPosition& // TODO: send_buffer_position not needed here, only used to get endpoint of each per-rank section of buffer size_t maximum = 0; - for ( size_t target_rank = 0; target_rank < kernel::manager< MPIManager >().get_num_processes(); ++target_rank ) + for ( size_t target_rank = 0; target_rank < kernel::manager< MPIManager >.get_num_processes(); ++target_rank ) { const auto& end_entry = recv_buffer[ send_buffer_position.end( target_rank ) - 1 ]; size_t max_per_thread_max_spikes_per_rank = 0; @@ -573,7 +571,7 @@ EventDeliveryManager::get_global_max_spikes_per_rank_( const SendBufferPosition& else { assert( end_entry.is_end_marker() ); - max_per_thread_max_spikes_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); + max_per_thread_max_spikes_per_rank = kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank(); } maximum = std::max( max_per_thread_max_spikes_per_rank, maximum ); } @@ -599,25 +597,25 @@ void EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< SpikeDataT >& recv_buffer ) { // deliver only at beginning of time slice - if ( kernel::manager< SimulationManager >().get_from_step() > 0 ) + if ( kernel::manager< SimulationManager >.get_from_step() > 0 ) { return; } - const size_t spike_buffer_size_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); - const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >().get_connection_models( tid ); + const size_t spike_buffer_size_per_rank = kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank(); + const std::vector< ConnectorModel* >& cm = kernel::manager< ModelManager >.get_connection_models( tid ); // prepare Time objects for every possible time stamp within min_delay_ - std::vector< Time > prepared_timestamps( kernel::manager< ConnectionManager >().get_min_delay() ); - for ( size_t lag = 0; lag < static_cast< size_t >( kernel::manager< ConnectionManager >().get_min_delay() ); ++lag ) + std::vector< Time > prepared_timestamps( kernel::manager< ConnectionManager >.get_min_delay() ); + for ( size_t lag = 0; lag < static_cast< size_t >( kernel::manager< ConnectionManager >.get_min_delay() ); ++lag ) { // Subtract min_delay because spikes were emitted in previous time slice and we use current clock. - prepared_timestamps[ lag ] = kernel::manager< SimulationManager >().get_clock() - + Time::step( lag + 1 - kernel::manager< ConnectionManager >().get_min_delay() ); + prepared_timestamps[ lag ] = kernel::manager< SimulationManager >.get_clock() + + Time::step( lag + 1 - kernel::manager< ConnectionManager >.get_min_delay() ); } // Deliver spikes sent by each rank in order - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { // Continue with next rank if no spikes were sent by current rank if ( recv_buffer[ rank * spike_buffer_size_per_rank ].is_invalid_marker() ) @@ -649,7 +647,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik size_t syn_id_batch[ SPIKES_PER_BATCH ]; size_t lcid_batch[ SPIKES_PER_BATCH ]; - if ( not kernel::manager< ConnectionManager >().use_compressed_spikes() ) + if ( not kernel::manager< ConnectionManager >.use_compressed_spikes() ) { for ( size_t i = 0; i < num_batches; ++i ) { @@ -667,7 +665,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( tid_batch[ j ] == tid ) { - kernel::manager< ConnectionManager >().send( + kernel::manager< ConnectionManager >.send( tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } @@ -689,7 +687,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( tid_batch[ j ] == tid ) { - kernel::manager< ConnectionManager >().send( + kernel::manager< ConnectionManager >.send( tid_batch[ j ], syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } @@ -714,7 +712,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { // find the spike-data entry for this thread const std::vector< SpikeData >& compressed_spike_data = - kernel::manager< ConnectionManager >().get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); + kernel::manager< ConnectionManager >.get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); lcid_batch[ j ] = compressed_spike_data[ tid ].get_lcid(); } for ( size_t j = 0; j < SPIKES_PER_BATCH; ++j ) @@ -730,7 +728,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( lcid_batch[ j ] != invalid_lcid ) { - kernel::manager< ConnectionManager >().send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >.send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } @@ -751,7 +749,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { // find the spike-data entry for this thread const std::vector< SpikeData >& compressed_spike_data = - kernel::manager< ConnectionManager >().get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); + kernel::manager< ConnectionManager >.get_compressed_spike_data( syn_id_batch[ j ], lcid_batch[ j ] ); lcid_batch[ j ] = compressed_spike_data[ tid ].get_lcid(); } for ( size_t j = 0; j < num_remaining_entries; ++j ) @@ -767,7 +765,7 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik { if ( lcid_batch[ j ] != invalid_lcid ) { - kernel::manager< ConnectionManager >().send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); + kernel::manager< ConnectionManager >.send( tid, syn_id_batch[ j ], lcid_batch[ j ], cm, se_batch[ j ] ); } } } // if-else not compressed @@ -778,16 +776,16 @@ EventDeliveryManager::deliver_events_( const size_t tid, const std::vector< Spik void EventDeliveryManager::gather_target_data( const size_t tid ) { - assert( not kernel::manager< ConnectionManager >().is_source_table_cleared() ); + assert( not kernel::manager< ConnectionManager >.is_source_table_cleared() ); // assume all threads have some work to do gather_completed_checker_.set_false( tid ); assert( gather_completed_checker_.all_false() ); - const AssignedRanks assigned_ranks = kernel::manager< VPManager >().get_assigned_ranks( tid ); + const AssignedRanks assigned_ranks = kernel::manager< VPManager >.get_assigned_ranks( tid ); - kernel::manager< ConnectionManager >().prepare_target_table( tid ); - kernel::manager< ConnectionManager >().reset_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >.prepare_target_table( tid ); + kernel::manager< ConnectionManager >.reset_source_table_entry_point( tid ); while ( gather_completed_checker_.any_false() ) { @@ -797,19 +795,19 @@ EventDeliveryManager::gather_target_data( const size_t tid ) #pragma omp master { - if ( kernel::manager< MPIManager >().adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) + if ( kernel::manager< MPIManager >.adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) { resize_send_recv_buffers_target_data(); } } // of omp master; (no barrier) - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); - kernel::manager< ConnectionManager >().restore_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >.restore_source_table_entry_point( tid ); TargetSendBufferPosition send_buffer_position( - assigned_ranks, kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank() ); + assigned_ranks, kernel::manager< MPIManager >.get_send_recv_count_target_data_per_rank() ); const bool gather_completed = collocate_target_data_buffers_( tid, assigned_ranks, send_buffer_position ); gather_completed_checker_.logical_and( tid, gather_completed ); @@ -818,16 +816,16 @@ EventDeliveryManager::gather_target_data( const size_t tid ) { set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } - kernel::manager< ConnectionManager >().save_source_table_entry_point( tid ); - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< ConnectionManager >.save_source_table_entry_point( tid ); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); - kernel::manager< ConnectionManager >().clean_source_table( tid ); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< ConnectionManager >.clean_source_table( tid ); #pragma omp master { sw_communicate_target_data_.start(); - kernel::manager< MPIManager >().communicate_target_data_Alltoall( + kernel::manager< MPIManager >.communicate_target_data_Alltoall( send_buffer_target_data_, recv_buffer_target_data_ ); sw_communicate_target_data_.stop(); } // of omp master (no barriers!) @@ -837,31 +835,31 @@ EventDeliveryManager::gather_target_data( const size_t tid ) gather_completed_checker_.logical_and( tid, distribute_completed ); // resize mpi buffers, if necessary and allowed - if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >().adaptive_target_buffers() ) + if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >.adaptive_target_buffers() ) { #pragma omp master { - buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >().increase_buffer_size_target_data(); + buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >.increase_buffer_size_target_data(); } #pragma omp barrier } } // of while - kernel::manager< ConnectionManager >().clear_source_table( tid ); + kernel::manager< ConnectionManager >.clear_source_table( tid ); } void EventDeliveryManager::gather_target_data_compressed( const size_t tid ) { - assert( not kernel::manager< ConnectionManager >().is_source_table_cleared() ); + assert( not kernel::manager< ConnectionManager >.is_source_table_cleared() ); // assume all threads have some work to do gather_completed_checker_.set_false( tid ); assert( gather_completed_checker_.all_false() ); - const AssignedRanks assigned_ranks = kernel::manager< VPManager >().get_assigned_ranks( tid ); + const AssignedRanks assigned_ranks = kernel::manager< VPManager >.get_assigned_ranks( tid ); - kernel::manager< ConnectionManager >().prepare_target_table( tid ); + kernel::manager< ConnectionManager >.prepare_target_table( tid ); while ( gather_completed_checker_.any_false() ) { @@ -870,17 +868,17 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) #pragma omp master { - if ( kernel::manager< MPIManager >().adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) + if ( kernel::manager< MPIManager >.adaptive_target_buffers() and buffer_size_target_data_has_changed_ ) { resize_send_recv_buffers_target_data(); } } // of omp master; no barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); TargetSendBufferPosition send_buffer_position( - assigned_ranks, kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank() ); + assigned_ranks, kernel::manager< MPIManager >.get_send_recv_count_target_data_per_rank() ); const bool gather_completed = collocate_target_data_buffers_compressed_( tid, assigned_ranks, send_buffer_position ); @@ -892,14 +890,14 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) set_complete_marker_target_data_( assigned_ranks, send_buffer_position ); } - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp master { sw_communicate_target_data_.start(); - kernel::manager< MPIManager >().communicate_target_data_Alltoall( + kernel::manager< MPIManager >.communicate_target_data_Alltoall( send_buffer_target_data_, recv_buffer_target_data_ ); sw_communicate_target_data_.stop(); } // of omp master (no barrier) @@ -912,20 +910,20 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid ) gather_completed_checker_.logical_and( tid, distribute_completed ); // resize mpi buffers, if necessary and allowed - if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >().adaptive_target_buffers() ) + if ( gather_completed_checker_.any_false() and kernel::manager< MPIManager >.adaptive_target_buffers() ) { #pragma omp master { - buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >().increase_buffer_size_target_data(); + buffer_size_target_data_has_changed_ = kernel::manager< MPIManager >.increase_buffer_size_target_data(); } // of omp master (no barrier) - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); } } // of while - kernel::manager< ConnectionManager >().clear_source_table( tid ); + kernel::manager< ConnectionManager >.clear_source_table( tid ); } bool @@ -941,7 +939,7 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, // no ranks to process for this thread if ( assigned_ranks.begin == assigned_ranks.end ) { - kernel::manager< ConnectionManager >().no_targets_to_process( tid ); + kernel::manager< ConnectionManager >.no_targets_to_process( tid ); return is_source_table_read; } @@ -958,7 +956,7 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, while ( true ) { - valid_next_target_data = kernel::manager< ConnectionManager >().get_next_target_data( + valid_next_target_data = kernel::manager< ConnectionManager >.get_next_target_data( tid, assigned_ranks.begin, assigned_ranks.end, source_rank, next_target_data ); if ( valid_next_target_data ) // add valid entry to MPI buffer { @@ -966,11 +964,11 @@ EventDeliveryManager::collocate_target_data_buffers_( const size_t tid, { // entry does not fit in this part of the MPI buffer any more, // so we need to reject it - kernel::manager< ConnectionManager >().reject_last_target_data( tid ); + kernel::manager< ConnectionManager >.reject_last_target_data( tid ); // after rejecting the last target, we need to save the // position to start at this point again next communication // round - kernel::manager< ConnectionManager >().save_source_table_entry_point( tid ); + kernel::manager< ConnectionManager >.save_source_table_entry_point( tid ); // we have just rejected an entry, so source table can not be // fully read is_source_table_read = false; @@ -1030,7 +1028,7 @@ EventDeliveryManager::collocate_target_data_buffers_compressed_( const size_t ti send_buffer_target_data_[ send_buffer_position.begin( rank ) ].set_invalid_marker(); } - const bool is_source_table_read = kernel::manager< ConnectionManager >().fill_target_buffer( + const bool is_source_table_read = kernel::manager< ConnectionManager >.fill_target_buffer( tid, assigned_ranks.begin, assigned_ranks.end, send_buffer_target_data_, send_buffer_position ); return is_source_table_read; @@ -1053,9 +1051,9 @@ nest::EventDeliveryManager::distribute_target_data_buffers_( const size_t tid ) { bool are_others_completed = true; const unsigned int send_recv_count_target_data_per_rank = - kernel::manager< MPIManager >().get_send_recv_count_target_data_per_rank(); + kernel::manager< MPIManager >.get_send_recv_count_target_data_per_rank(); - for ( size_t rank = 0; rank < kernel::manager< MPIManager >().get_num_processes(); ++rank ) + for ( size_t rank = 0; rank < kernel::manager< MPIManager >.get_num_processes(); ++rank ) { // Check last entry for completed marker if ( not recv_buffer_target_data_[ ( rank + 1 ) * send_recv_count_target_data_per_rank - 1 ].is_complete_marker() ) @@ -1074,7 +1072,7 @@ nest::EventDeliveryManager::distribute_target_data_buffers_( const size_t tid ) const TargetData& target_data = recv_buffer_target_data_[ rank * send_recv_count_target_data_per_rank + i ]; if ( target_data.get_source_tid() == tid ) { - kernel::manager< ConnectionManager >().add_target( tid, rank, target_data ); + kernel::manager< ConnectionManager >.add_target( tid, rank, target_data ); } // Is this the last target from this rank? diff --git a/nestkernel/event_delivery_manager.h b/nestkernel/event_delivery_manager.h index dd221bb332..d85ce2ba0a 100644 --- a/nestkernel/event_delivery_manager.h +++ b/nestkernel/event_delivery_manager.h @@ -538,22 +538,22 @@ inline void EventDeliveryManager::send_local_( Node& source, EventT& e, const long lag ) { assert( not source.has_proxies() ); - e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( lag + 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >.get_slice_origin() + Time::step( lag + 1 ) ); e.set_sender( source ); const size_t t = source.get_thread(); const size_t ldid = source.get_local_device_id(); - kernel::manager< ConnectionManager >().send_from_device( t, ldid, e ); + kernel::manager< ConnectionManager >.send_from_device( t, ldid, e ); } inline void EventDeliveryManager::send_local_( Node& source, SecondaryEvent& e, const long ) { assert( not source.has_proxies() ); - e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >.get_slice_origin() + Time::step( 1 ) ); e.set_sender( source ); const size_t t = source.get_thread(); const size_t ldid = source.get_local_device_id(); - kernel::manager< ConnectionManager >().send_from_device( t, ldid, e ); + kernel::manager< ConnectionManager >.send_from_device( t, ldid, e ); } template < class EventT > @@ -574,7 +574,7 @@ EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const lon { local_spike_counter_[ tid ] += e.get_multiplicity(); - e.set_stamp( kernel::manager< SimulationManager >().get_slice_origin() + Time::step( lag + 1 ) ); + e.set_stamp( kernel::manager< SimulationManager >.get_slice_origin() + Time::step( lag + 1 ) ); e.set_sender( source ); if ( source.is_off_grid() ) @@ -585,7 +585,7 @@ EventDeliveryManager::send< SpikeEvent >( Node& source, SpikeEvent& e, const lon { send_remote( tid, e, lag ); } - kernel::manager< ConnectionManager >().send_to_devices( tid, source_node_id, e ); + kernel::manager< ConnectionManager >.send_to_devices( tid, source_node_id, e ); } else { @@ -605,8 +605,8 @@ inline void EventDeliveryManager::send_remote( size_t tid, SpikeEvent& e, const long lag ) { // Put the spike in a buffer for the remote machines - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel::manager< ConnectionManager >().get_remote_targets_of_local_node( tid, lid ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel::manager< ConnectionManager >.get_remote_targets_of_local_node( tid, lid ); for ( const auto& target : targets ) { @@ -622,8 +622,8 @@ inline void EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const long lag ) { // Put the spike in a buffer for the remote machines - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( e.get_sender().get_node_id() ); - const auto& targets = kernel::manager< ConnectionManager >().get_remote_targets_of_local_node( tid, lid ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( e.get_sender().get_node_id() ); + const auto& targets = kernel::manager< ConnectionManager >.get_remote_targets_of_local_node( tid, lid ); for ( const auto& target : targets ) { @@ -638,9 +638,9 @@ EventDeliveryManager::send_off_grid_remote( size_t tid, SpikeEvent& e, const lon inline void EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); const size_t source_node_id = source.get_node_id(); - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); if ( source.has_proxies() ) { @@ -652,7 +652,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) for ( const auto& syn_id : supported_syn_ids ) { const std::vector< size_t >& positions = - kernel::manager< ConnectionManager >().get_secondary_send_buffer_positions( tid, lid, syn_id ); + kernel::manager< ConnectionManager >.get_secondary_send_buffer_positions( tid, lid, syn_id ); for ( size_t i = 0; i < positions.size(); ++i ) { @@ -660,7 +660,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) e >> it; } } - kernel::manager< ConnectionManager >().send_to_devices( tid, source_node_id, e ); + kernel::manager< ConnectionManager >.send_to_devices( tid, source_node_id, e ); } else { @@ -673,7 +673,7 @@ EventDeliveryManager::send_secondary( Node& source, SecondaryEvent& e ) inline size_t EventDeliveryManager::write_toggle() const { - return kernel::manager< SimulationManager >().get_slice() % 2; + return kernel::manager< SimulationManager >.get_slice() % 2; } } // namespace nest diff --git a/nestkernel/free_layer.h b/nestkernel/free_layer.h index e3f44f607d..9c4ebba30c 100644 --- a/nestkernel/free_layer.h +++ b/nestkernel/free_layer.h @@ -130,7 +130,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) 0, []( size_t a, NodeIDTriple b ) { - const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( b.node_id ); + const auto node = kernel::manager< NodeManager >.get_mpi_local_node_or_device_head( b.node_id ); return node->is_proxy() ? a : a + 1; } ); @@ -150,7 +150,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) { assert( nc_it != this->node_collection_->end() ); Position< D > point = getValue< std::vector< double > >( *it ); - const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); + const auto node = kernel::manager< NodeManager >.get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); assert( node ); if ( not node->is_proxy() ) { @@ -188,7 +188,7 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) // max_point on all processes. Position< D > point = pos->get_values( rng ); - const auto node = kernel::manager< NodeManager >().get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); + const auto node = kernel::manager< NodeManager >.get_mpi_local_node_or_device_head( ( *nc_it ).node_id ); assert( node ); if ( not node->is_proxy() ) { @@ -284,7 +284,7 @@ FreeLayer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const { // Node index in node collection is global to NEST, so we need to scale down // to get right indices into positions_, which has only rank-local data. - const size_t n_procs = kernel::manager< MPIManager >().get_num_processes(); + const size_t n_procs = kernel::manager< MPIManager >.get_num_processes(); size_t pos_idx = ( *nc_it ).nc_index / n_procs; size_t step = nc_it.get_step_size() / n_procs; @@ -340,7 +340,7 @@ FreeLayer< D >::communicate_positions_( Ins iter, NodeCollectionPTR node_collect // This array will be filled with node ID,pos_x,pos_y[,pos_z] for global nodes: std::vector< double > global_node_id_pos; std::vector< int > displacements; - kernel::manager< MPIManager >().communicate( local_node_id_pos, global_node_id_pos, displacements ); + kernel::manager< MPIManager >.communicate( local_node_id_pos, global_node_id_pos, displacements ); // To avoid copying the vector one extra time in order to sort, we // sneakishly use reinterpret_cast @@ -403,7 +403,7 @@ FreeLayer< D >::lid_to_position_id_( size_t lid ) const } else { - const auto num_procs = kernel::manager< MPIManager >().get_num_processes(); + const auto num_procs = kernel::manager< MPIManager >.get_num_processes(); return lid / num_procs; } } diff --git a/nestkernel/kernel_manager.cpp b/nestkernel/kernel_manager.cpp index eafae80e29..4d2c2b211d 100644 --- a/nestkernel/kernel_manager.cpp +++ b/nestkernel/kernel_manager.cpp @@ -43,20 +43,20 @@ namespace nest KernelManager::KernelManager() : fingerprint_( 0 ) - , managers( { &kernel::manager< LoggingManager >(), - &kernel::manager< MPIManager >(), - &kernel::manager< VPManager >(), - &kernel::manager< ModuleManager >(), - &kernel::manager< RandomManager >(), - &kernel::manager< SimulationManager >(), - &kernel::manager< ModelRangeManager >(), - &kernel::manager< ConnectionManager >(), - &kernel::manager< SPManager >(), - &kernel::manager< EventDeliveryManager >(), - &kernel::manager< IOManager >(), - &kernel::manager< ModelManager >(), - &kernel::manager< MUSICManager >(), - &kernel::manager< NodeManager >() } ) + , managers( { &kernel::manager< LoggingManager >, + &kernel::manager< MPIManager >, + &kernel::manager< VPManager >, + &kernel::manager< ModuleManager >, + &kernel::manager< RandomManager >, + &kernel::manager< SimulationManager >, + &kernel::manager< ModelRangeManager >, + &kernel::manager< ConnectionManager >, + &kernel::manager< SPManager >, + &kernel::manager< EventDeliveryManager >, + &kernel::manager< IOManager >, + &kernel::manager< ModelManager >, + &kernel::manager< MUSICManager >, + &kernel::manager< NodeManager > } ) , initialized_( false ) { } @@ -135,7 +135,7 @@ KernelManager::change_number_of_threads( size_t new_num_threads ) ( *it )->finalize( /* adjust_number_of_threads_or_rng_only */ true ); } - kernel::manager< VPManager >().set_num_threads( new_num_threads ); + kernel::manager< VPManager >.set_num_threads( new_num_threads ); // Initialize in original order with new number of threads set for ( auto& manager : managers ) @@ -146,13 +146,13 @@ KernelManager::change_number_of_threads( size_t new_num_threads ) // Finalizing deleted all register components. Now that all infrastructure // is in place again, we can tell modules to re-register the components // they provide. - kernel::manager< ModuleManager >().reinitialize_dynamic_modules(); + kernel::manager< ModuleManager >.reinitialize_dynamic_modules(); // Prepare timers and set the number of threads for multi-threaded timers - kernel::manager< SimulationManager >().reset_timers_for_preparation(); - kernel::manager< SimulationManager >().reset_timers_for_dynamics(); - kernel::manager< EventDeliveryManager >().reset_timers_for_preparation(); - kernel::manager< EventDeliveryManager >().reset_timers_for_dynamics(); + kernel::manager< SimulationManager >.reset_timers_for_preparation(); + kernel::manager< SimulationManager >.reset_timers_for_dynamics(); + kernel::manager< EventDeliveryManager >.reset_timers_for_preparation(); + kernel::manager< EventDeliveryManager >.reset_timers_for_dynamics(); } void diff --git a/nestkernel/kernel_manager.h b/nestkernel/kernel_manager.h index 0f579aa81c..4a688a10cc 100644 --- a/nestkernel/kernel_manager.h +++ b/nestkernel/kernel_manager.h @@ -170,22 +170,6 @@ namespace nest { -// Forward declarations to avoid pulling all manager headers here. -class LoggingManager; -class MPIManager; -class VPManager; -class ModuleManager; -class RandomManager; -class SimulationManager; -class ModelRangeManager; -class ConnectionManager; -class SPManager; -class EventDeliveryManager; -class IOManager; -class ModelManager; -class MUSICManager; -class NodeManager; - class KernelManager : public ManagerInterface { unsigned long fingerprint_; @@ -275,14 +259,7 @@ namespace kernel { template < class T > -inline T g_manager_instance; // one per type across all TUs - -template < class T > -T& -manager() noexcept -{ - return g_manager_instance< T >; -} +inline T manager; } diff --git a/nestkernel/layer.cpp b/nestkernel/layer.cpp index aa6b031cb0..337bb5bf0f 100644 --- a/nestkernel/layer.cpp +++ b/nestkernel/layer.cpp @@ -56,7 +56,7 @@ AbstractLayer::create_layer( const DictionaryDatum& layer_dict ) AbstractLayer* layer_local = nullptr; auto element_name = getValue< std::string >( layer_dict, names::elements ); - auto element_id = kernel::manager< ModelManager >().get_node_model_id( element_name ); + auto element_id = kernel::manager< ModelManager >.get_node_model_id( element_name ); if ( layer_dict->known( names::positions ) ) { @@ -144,7 +144,7 @@ AbstractLayer::create_layer( const DictionaryDatum& layer_dict ) NodeCollectionMetadataPTR layer_meta( new LayerMetadata( layer_safe ) ); // We have at least one element, create a NodeCollection for it - NodeCollectionPTR node_collection = kernel::manager< NodeManager >().add_node( element_id, length ); + NodeCollectionPTR node_collection = kernel::manager< NodeManager >.add_node( element_id, length ); node_collection->set_metadata( layer_meta ); diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 054f79f35c..f97080a4c7 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -973,7 +973,7 @@ Layer< D >::dump_connections( std::ostream& out, def( conn_filter, names::source, NodeCollectionDatum( node_collection ) ); def( conn_filter, names::target, NodeCollectionDatum( target_layer->get_node_collection() ) ); def( conn_filter, names::synapse_model, syn_model ); - ArrayDatum connectome = kernel::manager< ConnectionManager >().get_connections( conn_filter ); + ArrayDatum connectome = kernel::manager< ConnectionManager >.get_connections( conn_filter ); // Get positions of remote nodes std::vector< std::pair< Position< D >, size_t > >* src_vec = get_global_positions_vector( node_collection ); @@ -998,7 +998,7 @@ Layer< D >::dump_connections( std::ostream& out, previous_source_node_id = source_node_id; } - DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( source_node_id, + DictionaryDatum result_dict = kernel::manager< ConnectionManager >.get_synapse_status( source_node_id, conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), diff --git a/nestkernel/model.cpp b/nestkernel/model.cpp index c46b470a64..97b24e8870 100644 --- a/nestkernel/model.cpp +++ b/nestkernel/model.cpp @@ -49,7 +49,7 @@ Model::Model( const std::string& name ) void Model::set_threads() { - set_threads_( kernel::manager< VPManager >().get_num_threads() ); + set_threads_( kernel::manager< VPManager >.get_num_threads() ); } void @@ -130,7 +130,7 @@ Model::get_status() } ( *d )[ names::instantiations ] = Token( tmp ); - ( *d )[ names::type_id ] = LiteralDatum( kernel::manager< ModelManager >().get_node_model( type_id_ )->get_name() ); + ( *d )[ names::type_id ] = LiteralDatum( kernel::manager< ModelManager >.get_node_model( type_id_ )->get_name() ); for ( size_t t = 0; t < tmp.size(); ++t ) { diff --git a/nestkernel/model_manager.cpp b/nestkernel/model_manager.cpp index 7ac034b8ff..76415e425b 100644 --- a/nestkernel/model_manager.cpp +++ b/nestkernel/model_manager.cpp @@ -69,7 +69,7 @@ ModelManager::initialize( const bool ) proxynode_model_->set_threads(); } - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); // Make space for one vector of connection models per thread connection_models_.resize( num_threads ); @@ -101,7 +101,7 @@ ModelManager::get_num_connection_models() const return 0; } - return connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).size(); + return connection_models_.at( kernel::manager< VPManager >.get_thread_id() ).size(); } void @@ -174,7 +174,7 @@ ModelManager::register_node_model_( Model* model ) #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t t = kernel::manager< VPManager >.get_thread_id(); proxy_nodes_[ t ].push_back( create_proxynode_( t, id ) ); } @@ -198,7 +198,7 @@ ModelManager::copy_node_model_( const size_t old_id, Name new_name, DictionaryDa #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t t = kernel::manager< VPManager >.get_thread_id(); proxy_nodes_[ t ].push_back( create_proxynode_( t, new_id ) ); } } @@ -206,9 +206,9 @@ ModelManager::copy_node_model_( const size_t old_id, Name new_name, DictionaryDa void ModelManager::copy_connection_model_( const size_t old_id, Name new_name, DictionaryDatum params ) { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); - const size_t new_id = connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).size(); + const size_t new_id = connection_models_.at( kernel::manager< VPManager >.get_thread_id() ).size(); if ( new_id == invalid_synindex ) { @@ -222,11 +222,11 @@ ModelManager::copy_connection_model_( const size_t old_id, Name new_name, Dictio #pragma omp parallel { - const size_t thread_id = kernel::manager< VPManager >().get_thread_id(); + const size_t thread_id = kernel::manager< VPManager >.get_thread_id(); connection_models_.at( thread_id ) .push_back( get_connection_model( old_id, thread_id ).clone( new_name.toString(), new_id ) ); - kernel::manager< ConnectionManager >().resize_connections(); + kernel::manager< ConnectionManager >.resize_connections(); } set_synapse_defaults_( new_id, params ); // handles parallelism internally @@ -274,16 +274,16 @@ void ModelManager::set_synapse_defaults_( size_t model_id, const DictionaryDatum& params ) { params->clear_access_flags(); - assert_valid_syn_id( model_id, kernel::manager< VPManager >().get_thread_id() ); + assert_valid_syn_id( model_id, kernel::manager< VPManager >.get_thread_id() ); std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); // We have to run this in parallel to set the status on nodes that exist on each // thread, such as volume_transmitter. #pragma omp parallel { - size_t tid = kernel::manager< VPManager >().get_thread_id(); + size_t tid = kernel::manager< VPManager >.get_thread_id(); try { @@ -297,7 +297,7 @@ ModelManager::set_synapse_defaults_( size_t model_id, const DictionaryDatum& par } } - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised_.at( tid ).get() ) { @@ -340,17 +340,17 @@ ModelManager::get_synapse_model_id( std::string model_name ) DictionaryDatum ModelManager::get_connector_defaults( synindex syn_id ) const { - assert_valid_syn_id( syn_id, kernel::manager< VPManager >().get_thread_id() ); + assert_valid_syn_id( syn_id, kernel::manager< VPManager >.get_thread_id() ); DictionaryDatum dict( new Dictionary() ); - for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >().get_num_threads() ); ++t ) + for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >.get_num_threads() ); ++t ) { // each call adds to num_connections connection_models_[ t ][ syn_id ]->get_status( dict ); } - ( *dict )[ names::num_connections ] = kernel::manager< ConnectionManager >().get_num_connections( syn_id ); + ( *dict )[ names::num_connections ] = kernel::manager< ConnectionManager >.get_num_connections( syn_id ); ( *dict )[ names::element_type ] = "synapse"; return dict; @@ -410,7 +410,7 @@ ModelManager::calibrate( const TimeConverter& tc ) { model->calibrate_time( tc ); } - for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >().get_num_threads() ); ++t ) + for ( size_t t = 0; t < static_cast< size_t >( kernel::manager< VPManager >.get_num_threads() ); ++t ) { for ( auto&& connection_model : connection_models_[ t ] ) { @@ -426,8 +426,8 @@ ModelManager::calibrate( const TimeConverter& tc ) bool ModelManager::compare_model_by_id_( const int a, const int b ) { - return kernel::manager< ModelManager >().get_node_model( a )->get_name() - < kernel::manager< ModelManager >().get_node_model( b )->get_name(); + return kernel::manager< ModelManager >.get_node_model( a )->get_name() + < kernel::manager< ModelManager >.get_node_model( b )->get_name(); } void diff --git a/nestkernel/model_manager.h b/nestkernel/model_manager.h index a05c3ad746..d7403b3929 100644 --- a/nestkernel/model_manager.h +++ b/nestkernel/model_manager.h @@ -370,7 +370,7 @@ template < typename CompleteConnectionT > void ModelManager::register_specific_connection_model_( const std::string& name ) { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); if ( synapsedict_->known( name ) ) { @@ -398,18 +398,18 @@ ModelManager::register_specific_connection_model_( const std::string& name ) { conn_model->get_secondary_event()->add_syn_id( new_syn_id ); } - connection_models_.at( kernel::manager< VPManager >().get_thread_id() ).push_back( conn_model ); - kernel::manager< ConnectionManager >().resize_connections(); + connection_models_.at( kernel::manager< VPManager >.get_thread_id() ).push_back( conn_model ); + kernel::manager< ConnectionManager >.resize_connections(); } // end of parallel section } inline Node* ModelManager::get_proxy_node( size_t tid, size_t node_id ) { - const int model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); + const int model_id = kernel::manager< ModelRangeManager >.get_model_id( node_id ); Node* proxy = proxy_nodes_[ tid ].at( model_id ); proxy->set_node_id_( node_id ); - proxy->set_vp( kernel::manager< VPManager >().node_id_to_vp( node_id ) ); + proxy->set_vp( kernel::manager< VPManager >.node_id_to_vp( node_id ) ); return proxy; } diff --git a/nestkernel/modelrange_manager.cpp b/nestkernel/modelrange_manager.cpp index 957ed0c5d9..fe14d2f7d8 100644 --- a/nestkernel/modelrange_manager.cpp +++ b/nestkernel/modelrange_manager.cpp @@ -113,7 +113,7 @@ ModelRangeManager::get_model_id( size_t node_id ) const nest::Model* nest::ModelRangeManager::get_model_of_node_id( size_t node_id ) { - return kernel::manager< ModelManager >().get_node_model( get_model_id( node_id ) ); + return kernel::manager< ModelManager >.get_node_model( get_model_id( node_id ) ); } const modelrange& diff --git a/nestkernel/module_manager.cpp b/nestkernel/module_manager.cpp index 5c2bcb44f4..48005f2cf1 100644 --- a/nestkernel/module_manager.cpp +++ b/nestkernel/module_manager.cpp @@ -116,7 +116,7 @@ ModuleManager::install( const std::string& name ) { // We cannot have connections without network elements, so we only need to check nodes. // Simulating an empty network causes no problems, so we don't have to check for that. - if ( kernel::manager< NodeManager >().size() > 0 ) + if ( kernel::manager< NodeManager >.size() > 0 ) { throw KernelException( "Network elements have been created, so external modules can no longer be imported. " diff --git a/nestkernel/mpi_manager.cpp b/nestkernel/mpi_manager.cpp index 1ea8714ff7..8a623facbb 100644 --- a/nestkernel/mpi_manager.cpp +++ b/nestkernel/mpi_manager.cpp @@ -91,8 +91,8 @@ nest::MPIManager::init_mpi( int*, char*** ) // use 2 processes entries (need at least two // entries per process to use flag of first entry as validity and // last entry to communicate end of communication) - kernel::manager< MPIManager >().set_buffer_size_target_data( 2 ); - kernel::manager< MPIManager >().set_buffer_size_spike_data( 2 ); + kernel::manager< MPIManager >.set_buffer_size_target_data( 2 ); + kernel::manager< MPIManager >.set_buffer_size_spike_data( 2 ); recv_counts_secondary_events_in_int_per_rank_.resize( 1, 0 ); recv_displacements_secondary_events_in_int_per_rank_.resize( 1, 0 ); @@ -113,9 +113,8 @@ nest::MPIManager::set_communicator( MPI_Comm global_comm ) // use at least 2 * number of processes entries (need at least two // entries per process to use flag of first entry as validity and // last entry to communicate end of communication) - kernel::manager< MPIManager >().set_buffer_size_target_data( - 2 * kernel::manager< MPIManager >().get_num_processes() ); - kernel::manager< MPIManager >().set_buffer_size_spike_data( 2 * kernel::manager< MPIManager >().get_num_processes() ); + kernel::manager< MPIManager >.set_buffer_size_target_data( 2 * kernel::manager< MPIManager >.get_num_processes() ); + kernel::manager< MPIManager >.set_buffer_size_spike_data( 2 * kernel::manager< MPIManager >.get_num_processes() ); } void @@ -127,9 +126,9 @@ nest::MPIManager::init_mpi( int* argc, char** argv[] ) if ( init == 0 ) { #ifdef HAVE_MUSIC - kernel::manager< MUSICManager >().init_music( argc, argv ); + kernel::manager< MUSICManager >.init_music( argc, argv ); // get a communicator from MUSIC - set_communicator( static_cast< MPI_Comm >( kernel::manager< MUSICManager >().communicator() ) ); + set_communicator( static_cast< MPI_Comm >( kernel::manager< MUSICManager >.communicator() ) ); #else /* #ifdef HAVE_MUSIC */ int provided_thread_level; MPI_Init_thread( argc, argv, MPI_THREAD_FUNNELED, &provided_thread_level ); @@ -287,7 +286,7 @@ nest::MPIManager::mpi_finalize( int exitcode ) { if ( exitcode == 0 ) { - kernel::manager< MUSICManager >().music_finalize(); // calls MPI_Finalize() + kernel::manager< MUSICManager >.music_finalize(); // calls MPI_Finalize() } else { diff --git a/nestkernel/music_event_handler.cpp b/nestkernel/music_event_handler.cpp index b0cd209b4f..6f8855e0c5 100644 --- a/nestkernel/music_event_handler.cpp +++ b/nestkernel/music_event_handler.cpp @@ -95,7 +95,7 @@ MusicEventHandler::publish_port() { if ( not published_ ) { - music_port_ = kernel::manager< MUSICManager >().get_music_setup()->publishEventInput( portname_ ); + music_port_ = kernel::manager< MUSICManager >.get_music_setup()->publishEventInput( portname_ ); // MUSIC wants seconds, NEST has miliseconds const double acceptable_latency_s = 0.001 * acceptable_latency_; diff --git a/nestkernel/music_rate_in_handler.cpp b/nestkernel/music_rate_in_handler.cpp index f70686aed5..40d6a189cc 100644 --- a/nestkernel/music_rate_in_handler.cpp +++ b/nestkernel/music_rate_in_handler.cpp @@ -85,7 +85,7 @@ MusicRateInHandler::publish_port() if ( not published_ ) { - MUSIC::Setup* s = kernel::manager< MUSICManager >().get_music_setup(); + MUSIC::Setup* s = kernel::manager< MUSICManager >.get_music_setup(); if ( s == 0 ) { throw MUSICSimulationHasRun( "" ); @@ -126,7 +126,7 @@ MusicRateInHandler::publish_port() void MusicRateInHandler::update( Time const&, const long, const long ) { - const size_t buffer_size = kernel::manager< ConnectionManager >().get_min_delay(); + const size_t buffer_size = kernel::manager< ConnectionManager >.get_min_delay(); std::vector< double > new_rates( buffer_size, 0.0 ); for ( size_t channel = 0; channel < channelmap_.size(); ++channel ) diff --git a/nestkernel/nest.cpp b/nestkernel/nest.cpp index 21d0fbe825..b5c1e72f0f 100644 --- a/nestkernel/nest.cpp +++ b/nestkernel/nest.cpp @@ -48,8 +48,8 @@ namespace nest void init_nest( int* argc, char** argv[] ) { - kernel::manager< MPIManager >().init_mpi( argc, argv ); - kernel::manager< KernelManager >().initialize(); + kernel::manager< MPIManager >.init_mpi( argc, argv ); + kernel::manager< KernelManager >.initialize(); } void @@ -65,54 +65,54 @@ install_module( const std::string& ) void reset_kernel() { - kernel::manager< KernelManager >().reset(); + kernel::manager< KernelManager >.reset(); } void register_logger_client( const deliver_logging_event_ptr client_callback ) { - kernel::manager< LoggingManager >().register_logging_client( client_callback ); + kernel::manager< LoggingManager >.register_logging_client( client_callback ); } void print_nodes_to_stream( std::ostream& ostr ) { - kernel::manager< NodeManager >().print( ostr ); + kernel::manager< NodeManager >.print( ostr ); } RngPtr get_rank_synced_rng() { - return kernel::manager< RandomManager >().get_rank_synced_rng(); + return kernel::manager< RandomManager >.get_rank_synced_rng(); } RngPtr get_vp_synced_rng( size_t tid ) { - return kernel::manager< RandomManager >().get_vp_synced_rng( tid ); + return kernel::manager< RandomManager >.get_vp_synced_rng( tid ); } RngPtr get_vp_specific_rng( size_t tid ) { - return kernel::manager< RandomManager >().get_vp_specific_rng( tid ); + return kernel::manager< RandomManager >.get_vp_specific_rng( tid ); } void set_kernel_status( const DictionaryDatum& dict ) { dict->clear_access_flags(); - kernel::manager< KernelManager >().set_status( dict ); + kernel::manager< KernelManager >.set_status( dict ); ALL_ENTRIES_ACCESSED( *dict, "SetKernelStatus", "Unread dictionary entries: " ); } DictionaryDatum get_kernel_status() { - assert( kernel::manager< KernelManager >().is_initialized() ); + assert( kernel::manager< KernelManager >.is_initialized() ); DictionaryDatum d( new Dictionary ); - kernel::manager< KernelManager >().get_status( d ); + kernel::manager< KernelManager >.get_status( d ); return d; } @@ -120,13 +120,13 @@ get_kernel_status() void set_node_status( const size_t node_id, const DictionaryDatum& dict ) { - kernel::manager< NodeManager >().set_status( node_id, dict ); + kernel::manager< NodeManager >.set_status( node_id, dict ); } DictionaryDatum get_node_status( const size_t node_id ) { - return kernel::manager< NodeManager >().get_status( node_id ); + return kernel::manager< NodeManager >.get_status( node_id ); } void @@ -141,7 +141,7 @@ set_connection_status( const ConnectionDatum& conn, const DictionaryDatum& dict dict->clear_access_flags(); - kernel::manager< ConnectionManager >().set_synapse_status( source_node_id, target_node_id, tid, syn_id, p, dict ); + kernel::manager< ConnectionManager >.set_synapse_status( source_node_id, target_node_id, tid, syn_id, p, dict ); ALL_ENTRIES_ACCESSED2( *dict, "SetStatus", @@ -153,7 +153,7 @@ set_connection_status( const ConnectionDatum& conn, const DictionaryDatum& dict DictionaryDatum get_connection_status( const ConnectionDatum& conn ) { - return kernel::manager< ConnectionManager >().get_synapse_status( conn.get_source_node_id(), + return kernel::manager< ConnectionManager >.get_synapse_status( conn.get_source_node_id(), conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), @@ -168,14 +168,14 @@ create( const Name& model_name, const size_t n_nodes ) throw RangeCheck(); } - const size_t model_id = kernel::manager< ModelManager >().get_node_model_id( model_name ); - return kernel::manager< NodeManager >().add_node( model_id, n_nodes ); + const size_t model_id = kernel::manager< ModelManager >.get_node_model_id( model_name ); + return kernel::manager< NodeManager >.add_node( model_id, n_nodes ); } NodeCollectionPTR get_nodes( const DictionaryDatum& params, const bool local_only ) { - return kernel::manager< NodeManager >().get_nodes( params, local_only ); + return kernel::manager< NodeManager >.get_nodes( params, local_only ); } void @@ -184,7 +184,7 @@ connect( NodeCollectionPTR sources, const DictionaryDatum& connectivity, const std::vector< DictionaryDatum >& synapse_params ) { - kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, synapse_params ); + kernel::manager< ConnectionManager >.connect( sources, targets, connectivity, synapse_params ); } void @@ -195,7 +195,7 @@ connect_tripartite( NodeCollectionPTR sources, const DictionaryDatum& third_connectivity, const std::map< Name, std::vector< DictionaryDatum > >& synapse_specs ) { - kernel::manager< ConnectionManager >().connect_tripartite( + kernel::manager< ConnectionManager >.connect_tripartite( sources, targets, third, connectivity, third_connectivity, synapse_specs ); } @@ -209,7 +209,7 @@ connect_arrays( long* sources, size_t n, std::string syn_model ) { - kernel::manager< ConnectionManager >().connect_arrays( + kernel::manager< ConnectionManager >.connect_arrays( sources, targets, weights, delays, p_keys, p_values, n, syn_model ); } @@ -218,7 +218,7 @@ get_connections( const DictionaryDatum& dict ) { dict->clear_access_flags(); - ArrayDatum array = kernel::manager< ConnectionManager >().get_connections( dict ); + ArrayDatum array = kernel::manager< ConnectionManager >.get_connections( dict ); ALL_ENTRIES_ACCESSED( *dict, "GetConnections", "Unread dictionary entries: " ); @@ -231,8 +231,8 @@ disconnect( const ArrayDatum& conns ) for ( size_t conn_index = 0; conn_index < conns.size(); ++conn_index ) { const auto conn_datum = getValue< ConnectionDatum >( conns.get( conn_index ) ); - const auto target_node = kernel::manager< NodeManager >().get_node_or_proxy( conn_datum.get_target_node_id() ); - kernel::manager< SPManager >().disconnect( + const auto target_node = kernel::manager< NodeManager >.get_node_or_proxy( conn_datum.get_target_node_id() ); + kernel::manager< SPManager >.disconnect( conn_datum.get_source_node_id(), target_node, conn_datum.get_target_thread(), conn_datum.get_synapse_model_id() ); } } @@ -265,38 +265,38 @@ run( const double& time ) "of the simulation resolution." ); } - kernel::manager< SimulationManager >().run( t_sim ); + kernel::manager< SimulationManager >.run( t_sim ); } void prepare() { - kernel::manager< KernelManager >().prepare(); + kernel::manager< KernelManager >.prepare(); } void cleanup() { - kernel::manager< KernelManager >().cleanup(); + kernel::manager< KernelManager >.cleanup(); } void copy_model( const Name& oldmodname, const Name& newmodname, const DictionaryDatum& dict ) { - kernel::manager< ModelManager >().copy_model( oldmodname, newmodname, dict ); + kernel::manager< ModelManager >.copy_model( oldmodname, newmodname, dict ); } void set_model_defaults( const std::string component, const DictionaryDatum& dict ) { - if ( kernel::manager< ModelManager >().set_model_defaults( component, dict ) ) + if ( kernel::manager< ModelManager >.set_model_defaults( component, dict ) ) { return; } - if ( kernel::manager< IOManager >().is_valid_recording_backend( component ) ) + if ( kernel::manager< IOManager >.is_valid_recording_backend( component ) ) { - kernel::manager< IOManager >().set_recording_backend_status( component, dict ); + kernel::manager< IOManager >.set_recording_backend_status( component, dict ); return; } @@ -308,8 +308,8 @@ get_model_defaults( const std::string component ) { try { - const size_t model_id = kernel::manager< ModelManager >().get_node_model_id( component ); - return kernel::manager< ModelManager >().get_node_model( model_id )->get_status(); + const size_t model_id = kernel::manager< ModelManager >.get_node_model_id( component ); + return kernel::manager< ModelManager >.get_node_model( model_id )->get_status(); } catch ( UnknownModelName& ) { @@ -318,17 +318,17 @@ get_model_defaults( const std::string component ) try { - const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( component ); - return kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); + const size_t synapse_model_id = kernel::manager< ModelManager >.get_synapse_model_id( component ); + return kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id ); } catch ( UnknownSynapseType& ) { // ignore errors; throw at the end of the function if that's reached } - if ( kernel::manager< IOManager >().is_valid_recording_backend( component ) ) + if ( kernel::manager< IOManager >.is_valid_recording_backend( component ) ) { - return kernel::manager< IOManager >().get_recording_backend_status( component ); + return kernel::manager< IOManager >.get_recording_backend_status( component ); } throw UnknownComponent( component ); @@ -368,7 +368,7 @@ apply( const ParameterDatum& param, const NodeCollectionDatum& nc ) RngPtr rng = get_rank_synced_rng(); for ( auto it = nc->begin(); it < nc->end(); ++it ) { - auto node = kernel::manager< NodeManager >().get_node_or_proxy( ( *it ).node_id ); + auto node = kernel::manager< NodeManager >.get_node_or_proxy( ( *it ).node_id ); result.push_back( param->value( rng, node ) ); } return result; diff --git a/nestkernel/nest.h b/nestkernel/nest.h index 3b61772b01..f52f381058 100644 --- a/nestkernel/nest.h +++ b/nestkernel/nest.h @@ -190,14 +190,14 @@ template < template < typename > class ConnectorModelT > void register_connection_model( const std::string& name ) { - kernel::manager< ModelManager >().register_connection_model< ConnectorModelT >( name ); + kernel::manager< ModelManager >.register_connection_model< ConnectorModelT >( name ); } template < typename NodeModelT > void register_node_model( const std::string& name, std::string deprecation_info ) { - kernel::manager< ModelManager >().register_node_model< NodeModelT >( name, deprecation_info ); + kernel::manager< ModelManager >.register_node_model< NodeModelT >( name, deprecation_info ); } } diff --git a/nestkernel/nestmodule.cpp b/nestkernel/nestmodule.cpp index 40ef15d704..db7826420e 100644 --- a/nestkernel/nestmodule.cpp +++ b/nestkernel/nestmodule.cpp @@ -375,7 +375,7 @@ NestModule::SetStatus_aaFunction::execute( SLIInterpreter* i ) const { ConnectionDatum con_id = getValue< ConnectionDatum >( conn_a[ con ] ); dict->clear_access_flags(); - kernel::manager< ConnectionManager >().set_synapse_status( con_id.get_source_node_id(), + kernel::manager< ConnectionManager >.set_synapse_status( con_id.get_source_node_id(), con_id.get_target_node_id(), con_id.get_target_thread(), con_id.get_synapse_model_id(), @@ -393,7 +393,7 @@ NestModule::SetStatus_aaFunction::execute( SLIInterpreter* i ) const DictionaryDatum dict = getValue< DictionaryDatum >( dict_a[ con ] ); ConnectionDatum con_id = getValue< ConnectionDatum >( conn_a[ con ] ); dict->clear_access_flags(); - kernel::manager< ConnectionManager >().set_synapse_status( con_id.get_source_node_id(), + kernel::manager< ConnectionManager >.set_synapse_status( con_id.get_source_node_id(), con_id.get_target_node_id(), con_id.get_target_thread(), con_id.get_synapse_model_id(), @@ -457,7 +457,7 @@ NestModule::GetStatus_CFunction::execute( SLIInterpreter* i ) const ConnectionDatum conn = getValue< ConnectionDatum >( i->OStack.pick( 0 ) ); - DictionaryDatum result_dict = kernel::manager< ConnectionManager >().get_synapse_status( conn.get_source_node_id(), + DictionaryDatum result_dict = kernel::manager< ConnectionManager >.get_synapse_status( conn.get_source_node_id(), conn.get_target_node_id(), conn.get_target_thread(), conn.get_synapse_model_id(), @@ -480,12 +480,11 @@ NestModule::GetStatus_aFunction::execute( SLIInterpreter* i ) const for ( size_t nt = 0; nt < n_results; ++nt ) { ConnectionDatum con_id = getValue< ConnectionDatum >( conns.get( nt ) ); - DictionaryDatum result_dict = - kernel::manager< ConnectionManager >().get_synapse_status( con_id.get_source_node_id(), - con_id.get_target_node_id(), - con_id.get_target_thread(), - con_id.get_synapse_model_id(), - con_id.get_port() ); + DictionaryDatum result_dict = kernel::manager< ConnectionManager >.get_synapse_status( con_id.get_source_node_id(), + con_id.get_target_node_id(), + con_id.get_target_thread(), + con_id.get_synapse_model_id(), + con_id.get_port() ); result.push_back( result_dict ); } @@ -558,7 +557,7 @@ NestModule::Install_sFunction::execute( SLIInterpreter* i ) const const std::string modulename = getValue< std::string >( i->OStack.pick( 0 ) ); - kernel::manager< ModuleManager >().install( modulename ); + kernel::manager< ModuleManager >.install( modulename ); i->OStack.pop(); i->EStack.pop(); @@ -630,7 +629,7 @@ NestModule::CopyModel_l_l_DFunction::execute( SLIInterpreter* i ) const const Name new_name = getValue< Name >( i->OStack.pick( 1 ) ); DictionaryDatum params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); - kernel::manager< ModelManager >().copy_model( old_name, new_name, params ); + kernel::manager< ModelManager >.copy_model( old_name, new_name, params ); i->OStack.pop( 3 ); i->EStack.pop(); @@ -694,7 +693,7 @@ NestModule::Disconnect_g_g_D_DFunction::execute( SLIInterpreter* i ) const DictionaryDatum synapse_params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); // dictionary access checking is handled by disconnect - kernel::manager< SPManager >().disconnect( sources, targets, connectivity, synapse_params ); + kernel::manager< SPManager >.disconnect( sources, targets, connectivity, synapse_params ); i->OStack.pop( 4 ); i->EStack.pop(); @@ -719,7 +718,7 @@ NestModule::Disconnect_aFunction::execute( SLIInterpreter* i ) const void NestModule::Connect_g_g_D_DFunction::execute( SLIInterpreter* i ) const { - kernel::manager< ConnectionManager >().sw_construction_connect.start(); + kernel::manager< ConnectionManager >.sw_construction_connect.start(); i->assert_stack_load( 4 ); @@ -729,18 +728,18 @@ NestModule::Connect_g_g_D_DFunction::execute( SLIInterpreter* i ) const DictionaryDatum synapse_params = getValue< DictionaryDatum >( i->OStack.pick( 0 ) ); // dictionary access checking is handled by connect - kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, { synapse_params } ); + kernel::manager< ConnectionManager >.connect( sources, targets, connectivity, { synapse_params } ); i->OStack.pop( 4 ); i->EStack.pop(); - kernel::manager< ConnectionManager >().sw_construction_connect.stop(); + kernel::manager< ConnectionManager >.sw_construction_connect.stop(); } void NestModule::Connect_g_g_D_aFunction::execute( SLIInterpreter* i ) const { - kernel::manager< ConnectionManager >().sw_construction_connect.start(); + kernel::manager< ConnectionManager >.sw_construction_connect.start(); i->assert_stack_load( 4 ); @@ -756,19 +755,19 @@ NestModule::Connect_g_g_D_aFunction::execute( SLIInterpreter* i ) const } // dictionary access checking is handled by connect - kernel::manager< ConnectionManager >().connect( sources, targets, connectivity, synapse_params ); + kernel::manager< ConnectionManager >.connect( sources, targets, connectivity, synapse_params ); i->OStack.pop( 4 ); i->EStack.pop(); - kernel::manager< ConnectionManager >().sw_construction_connect.stop(); + kernel::manager< ConnectionManager >.sw_construction_connect.stop(); } void NestModule::ConnectTripartite_g_g_g_D_D_DFunction::execute( SLIInterpreter* i ) const { - kernel::manager< ConnectionManager >().sw_construction_connect.start(); + kernel::manager< ConnectionManager >.sw_construction_connect.start(); i->assert_stack_load( 6 ); @@ -799,26 +798,26 @@ NestModule::ConnectTripartite_g_g_g_D_D_DFunction::execute( SLIInterpreter* i ) i->OStack.pop( 6 ); i->EStack.pop(); - kernel::manager< ConnectionManager >().sw_construction_connect.stop(); + kernel::manager< ConnectionManager >.sw_construction_connect.stop(); } void NestModule::ConnectSonata_D_Function::execute( SLIInterpreter* i ) const { - kernel::manager< ConnectionManager >().sw_construction_connect.start(); + kernel::manager< ConnectionManager >.sw_construction_connect.start(); i->assert_stack_load( 2 ); DictionaryDatum graph_specs = getValue< DictionaryDatum >( i->OStack.pick( 1 ) ); const long hyberslab_size = getValue< long >( i->OStack.pick( 0 ) ); - kernel::manager< ConnectionManager >().connect_sonata( graph_specs, hyberslab_size ); + kernel::manager< ConnectionManager >.connect_sonata( graph_specs, hyberslab_size ); i->OStack.pop( 2 ); i->EStack.pop(); - kernel::manager< ConnectionManager >().sw_construction_connect.stop(); + kernel::manager< ConnectionManager >.sw_construction_connect.stop(); } /** @BeginDocumentation @@ -838,7 +837,7 @@ NestModule::ConnectSonata_D_Function::execute( SLIInterpreter* i ) const void NestModule::MemoryInfoFunction::execute( SLIInterpreter* i ) const { - kernel::manager< ModelManager >().memory_info(); + kernel::manager< ModelManager >.memory_info(); i->EStack.pop(); } @@ -863,21 +862,21 @@ NestModule::PrintNodesToStreamFunction::execute( SLIInterpreter* i ) const void NestModule::RankFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel::manager< MPIManager >().get_rank() ); + i->OStack.push( kernel::manager< MPIManager >.get_rank() ); i->EStack.pop(); } void NestModule::NumProcessesFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel::manager< MPIManager >().get_num_processes() ); + i->OStack.push( kernel::manager< MPIManager >.get_num_processes() ); i->EStack.pop(); } void NestModule::SyncProcessesFunction::execute( SLIInterpreter* i ) const { - kernel::manager< MPIManager >().synchronize(); + kernel::manager< MPIManager >.synchronize(); i->EStack.pop(); } @@ -892,11 +891,11 @@ NestModule::TimeCommunication_i_i_bFunction::execute( SLIInterpreter* i ) const double time = 0.0; if ( offgrid ) { - time = kernel::manager< MPIManager >().time_communicate_offgrid( num_bytes, samples ); + time = kernel::manager< MPIManager >.time_communicate_offgrid( num_bytes, samples ); } else { - time = kernel::manager< MPIManager >().time_communicate( num_bytes, samples ); + time = kernel::manager< MPIManager >.time_communicate( num_bytes, samples ); } i->OStack.pop( 3 ); @@ -914,7 +913,7 @@ NestModule::TimeCommunicationv_i_iFunction::execute( SLIInterpreter* i ) const double time = 0.0; - time = kernel::manager< MPIManager >().time_communicatev( num_bytes, samples ); + time = kernel::manager< MPIManager >.time_communicatev( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -931,7 +930,7 @@ NestModule::TimeCommunicationAlltoall_i_iFunction::execute( SLIInterpreter* i ) double time = 0.0; - time = kernel::manager< MPIManager >().time_communicate_alltoall( num_bytes, samples ); + time = kernel::manager< MPIManager >.time_communicate_alltoall( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -948,7 +947,7 @@ NestModule::TimeCommunicationAlltoallv_i_iFunction::execute( SLIInterpreter* i ) double time = 0.0; - time = kernel::manager< MPIManager >().time_communicate_alltoallv( num_bytes, samples ); + time = kernel::manager< MPIManager >.time_communicate_alltoallv( num_bytes, samples ); i->OStack.pop( 2 ); i->OStack.push( time ); @@ -958,7 +957,7 @@ NestModule::TimeCommunicationAlltoallv_i_iFunction::execute( SLIInterpreter* i ) void NestModule::ProcessorNameFunction::execute( SLIInterpreter* i ) const { - i->OStack.push( kernel::manager< MPIManager >().get_processor_name() ); + i->OStack.push( kernel::manager< MPIManager >.get_processor_name() ); i->EStack.pop(); } @@ -968,7 +967,7 @@ NestModule::MPIAbort_iFunction::execute( SLIInterpreter* i ) const { i->assert_stack_load( 1 ); long exitcode = getValue< long >( i->OStack.pick( 0 ) ); - kernel::manager< MPIManager >().mpi_abort( exitcode ); + kernel::manager< MPIManager >.mpi_abort( exitcode ); i->EStack.pop(); } #endif @@ -1291,7 +1290,7 @@ NestModule::SetAcceptableLatencyFunction::execute( SLIInterpreter* i ) const std::string port_name = getValue< std::string >( i->OStack.pick( 1 ) ); double latency = getValue< double >( i->OStack.pick( 0 ) ); - kernel::manager< MUSICManager >().set_music_in_port_acceptable_latency( port_name, latency ); + kernel::manager< MUSICManager >.set_music_in_port_acceptable_latency( port_name, latency ); i->OStack.pop( 2 ); i->EStack.pop(); @@ -1305,7 +1304,7 @@ NestModule::SetMaxBufferedFunction::execute( SLIInterpreter* i ) const std::string port_name = getValue< std::string >( i->OStack.pick( 1 ) ); int maxBuffered = getValue< long >( i->OStack.pick( 0 ) ); - kernel::manager< MUSICManager >().set_music_in_port_max_buffered( port_name, maxBuffered ); + kernel::manager< MUSICManager >.set_music_in_port_max_buffered( port_name, maxBuffered ); i->OStack.pop( 2 ); i->EStack.pop(); @@ -1316,14 +1315,14 @@ NestModule::SetMaxBufferedFunction::execute( SLIInterpreter* i ) const void NestModule::EnableStructuralPlasticity_Function::execute( SLIInterpreter* i ) const { - kernel::manager< SPManager >().enable_structural_plasticity(); + kernel::manager< SPManager >.enable_structural_plasticity(); i->EStack.pop(); } void NestModule::DisableStructuralPlasticity_Function::execute( SLIInterpreter* i ) const { - kernel::manager< SPManager >().disable_structural_plasticity(); + kernel::manager< SPManager >.disable_structural_plasticity(); i->EStack.pop(); } @@ -1334,7 +1333,7 @@ NestModule::SetStdpEps_dFunction::execute( SLIInterpreter* i ) const i->assert_stack_load( 1 ); const double stdp_eps = getValue< double >( i->OStack.top() ); - kernel::manager< ConnectionManager >().set_stdp_eps( stdp_eps ); + kernel::manager< ConnectionManager >.set_stdp_eps( stdp_eps ); i->OStack.pop(); i->EStack.pop(); @@ -1896,7 +1895,7 @@ NestModule::Sub_M_MFunction::execute( SLIInterpreter* i ) const void NestModule::ConnectLayers_g_g_DFunction::execute( SLIInterpreter* i ) const { - kernel::manager< ConnectionManager >().sw_construction_connect.start(); + kernel::manager< ConnectionManager >.sw_construction_connect.start(); i->assert_stack_load( 3 ); @@ -1909,7 +1908,7 @@ NestModule::ConnectLayers_g_g_DFunction::execute( SLIInterpreter* i ) const i->OStack.pop( 3 ); i->EStack.pop(); - kernel::manager< ConnectionManager >().sw_construction_connect.stop(); + kernel::manager< ConnectionManager >.sw_construction_connect.stop(); } void @@ -2189,7 +2188,7 @@ NestModule::init( SLIInterpreter* i ) Token statusd = i->baselookup( Name( "statusdict" ) ); DictionaryDatum dd = getValue< DictionaryDatum >( statusd ); dd->insert( Name( "kernelname" ), new StringDatum( "NEST" ) ); - dd->insert( Name( "is_mpi" ), new BoolDatum( kernel::manager< MPIManager >().is_mpi_used() ) ); + dd->insert( Name( "is_mpi" ), new BoolDatum( kernel::manager< MPIManager >.is_mpi_used() ) ); register_parameter< ConstantParameter >( "constant" ); register_parameter< UniformParameter >( "uniform" ); diff --git a/nestkernel/node.cpp b/nestkernel/node.cpp index 2fcb87dcf6..dd5499d328 100644 --- a/nestkernel/node.cpp +++ b/nestkernel/node.cpp @@ -116,14 +116,14 @@ Node::get_name() const return std::string( "UnknownNode" ); } - return kernel::manager< ModelManager >().get_node_model( model_id_ )->get_name(); + return kernel::manager< ModelManager >.get_node_model( model_id_ )->get_name(); } Model& Node::get_model_() const { assert( model_id_ >= 0 ); - return *kernel::manager< ModelManager >().get_node_model( model_id_ ); + return *kernel::manager< ModelManager >.get_node_model( model_id_ ); } DictionaryDatum @@ -151,7 +151,7 @@ Node::get_status_base() DictionaryDatum dict = get_status_dict_(); // add information available for all nodes - ( *dict )[ names::local ] = kernel::manager< NodeManager >().is_local_node( this ); + ( *dict )[ names::local ] = kernel::manager< NodeManager >.is_local_node( this ); ( *dict )[ names::model ] = LiteralDatum( get_name() ); ( *dict )[ names::model_id ] = get_model_id(); ( *dict )[ names::global_id ] = get_node_id(); diff --git a/nestkernel/node_collection.cpp b/nestkernel/node_collection.cpp index edb95aed9a..050cf9e571 100644 --- a/nestkernel/node_collection.cpp +++ b/nestkernel/node_collection.cpp @@ -63,24 +63,24 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( 0 ) - , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >().get_num_processes() ) + , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >.get_num_processes() ) : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, - kernel::manager< VPManager >().get_num_virtual_processes() ) + kernel::manager< VPManager >.get_num_virtual_processes() ) : stride ) ) , kind_( kind ) , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL - ? kernel::manager< MPIManager >().get_rank() - : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >().get_vp() : invalid_thread ) ) + ? kernel::manager< MPIManager >.get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >.get_vp() : invalid_thread ) ) , primitive_collection_( &collection ) , composite_collection_( nullptr ) { assert( not collection_ptr.get() or collection_ptr.get() == &collection ); assert( element_idx_ <= collection.size() ); // allow == for end() - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( "NCIT Prim ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), part_idx_, element_idx_, step_, @@ -97,14 +97,14 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( part ) - , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >().get_num_processes() ) + , step_( kind == NCIteratorKind::RANK_LOCAL ? std::lcm( stride, kernel::manager< MPIManager >.get_num_processes() ) : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, - kernel::manager< VPManager >().get_num_virtual_processes() ) + kernel::manager< VPManager >.get_num_virtual_processes() ) : stride ) ) , kind_( kind ) , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL - ? kernel::manager< MPIManager >().get_rank() - : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >().get_vp() : invalid_thread ) ) + ? kernel::manager< MPIManager >.get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel::manager< VPManager >.get_vp() : invalid_thread ) ) , primitive_collection_( nullptr ) , composite_collection_( &collection ) { @@ -113,10 +113,10 @@ nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, // Allow <= for end iterator assert( ( part < collection.parts_.size() and offset <= collection.parts_[ part ].size() ) ); - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( "NCIT Comp ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), part_idx_, element_idx_, step_, @@ -225,21 +225,21 @@ nc_const_iterator::advance_local_iter_to_new_part_( size_t n ) { case NCIteratorKind::RANK_LOCAL: { - const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); - const size_t current_rank = kernel::manager< MPIManager >().get_rank(); + const size_t num_ranks = kernel::manager< MPIManager >.get_num_processes(); + const size_t current_rank = kernel::manager< MPIManager >.get_rank(); std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( num_ranks, current_rank, part_idx_, element_idx_, NodeCollectionComposite::gid_to_rank_ ); - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( String::compose( - "ACIL rk %1, pix %2, eix %3", kernel::manager< MPIManager >().get_rank(), part_idx_, element_idx_ ) ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( + "ACIL rk %1, pix %2, eix %3", kernel::manager< MPIManager >.get_rank(), part_idx_, element_idx_ ) ); ) break; } case NCIteratorKind::THREAD_LOCAL: { - const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const size_t num_vps = kernel::manager< VPManager >.get_num_virtual_processes(); const size_t current_vp = - kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); + kernel::manager< VPManager >.thread_to_vp( kernel::manager< VPManager >.get_thread_id() ); std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( num_vps, current_vp, part_idx_, element_idx_, NodeCollectionComposite::gid_to_vp_ ); @@ -291,9 +291,9 @@ nc_const_iterator::operator*() const { if ( not composite_collection_->valid_idx_( part_idx_, element_idx_ ) ) { - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( "nci::op* comp err rk %1, lp %2, le %3, pix %4, eix %5, end_pix %6, end_eix %7", - kernel::manager< MPIManager >().get_rank(), + kernel::manager< MPIManager >.get_rank(), composite_collection_->last_part_, composite_collection_->last_elem_, part_idx_, @@ -314,7 +314,7 @@ nc_const_iterator::operator*() const } NodeCollection::NodeCollection() - : fingerprint_( kernel::manager< KernelManager >().get_fingerprint() ) + : fingerprint_( kernel::manager< KernelManager >.get_fingerprint() ) { } @@ -403,7 +403,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) { size_t current_first = node_ids[ 0 ]; size_t current_last = current_first; - size_t current_model = kernel::manager< ModelRangeManager >().get_model_id( node_ids[ 0 ] ); + size_t current_model = kernel::manager< ModelRangeManager >.get_model_id( node_ids[ 0 ] ); std::vector< NodeCollectionPrimitive > parts; @@ -416,7 +416,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) } old_node_id = *node_id; - const size_t next_model = kernel::manager< ModelRangeManager >().get_model_id( *node_id ); + const size_t next_model = kernel::manager< ModelRangeManager >.get_model_id( *node_id ); if ( next_model == current_model and *node_id == ( current_last + 1 ) ) { @@ -449,7 +449,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) bool NodeCollection::valid() const { - return fingerprint_ == kernel::manager< KernelManager >().get_fingerprint(); + return fingerprint_ == kernel::manager< KernelManager >.get_fingerprint(); } void @@ -471,7 +471,7 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, , last_( last ) , model_id_( model_id ) , metadata_( meta ) - , nodes_have_no_proxies_( not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies() ) + , nodes_have_no_proxies_( not kernel::manager< ModelManager >.get_node_model( model_id_ )->has_proxies() ) { assert( first_ <= last_ ); assert_consistent_model_ids_( model_id_ ); @@ -482,7 +482,7 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last, siz , last_( last ) , model_id_( model_id ) , metadata_( nullptr ) - , nodes_have_no_proxies_( not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies() ) + , nodes_have_no_proxies_( not kernel::manager< ModelManager >.get_node_model( model_id_ )->has_proxies() ) { assert( first_ <= last_ ); } @@ -496,18 +496,18 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last ) assert( first_ <= last_ ); // find the model_id - const auto first_model_id = kernel::manager< ModelRangeManager >().get_model_id( first ); + const auto first_model_id = kernel::manager< ModelRangeManager >.get_model_id( first ); const auto init_index = first + 1; for ( size_t node_id = init_index; node_id <= last; ++node_id ) { - const auto model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); + const auto model_id = kernel::manager< ModelRangeManager >.get_model_id( node_id ); if ( model_id != first_model_id ) { throw BadProperty( "model ids does not match" ); } } model_id_ = first_model_id; - nodes_have_no_proxies_ = not kernel::manager< ModelManager >().get_node_model( model_id_ )->has_proxies(); + nodes_have_no_proxies_ = not kernel::manager< ModelManager >.get_node_model( model_id_ )->has_proxies(); } NodeCollectionPrimitive::NodeCollectionPrimitive() @@ -535,7 +535,7 @@ NodeCollection::to_array( const std::string& selection ) const // We need to defined zero explicitly here, otherwise push_back() does strange things const size_t zero = 0; node_ids.push_back( zero ); - node_ids.push_back( kernel::manager< VPManager >().get_thread_id() ); + node_ids.push_back( kernel::manager< VPManager >.get_thread_id() ); node_ids.push_back( zero ); const auto end_it = end(); @@ -648,10 +648,10 @@ NodeCollectionPrimitive::operator+( NodeCollectionPTR rhs ) const NodeCollection::const_iterator NodeCollectionPrimitive::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t num_processes = kernel::manager< MPIManager >().get_num_processes(); - const size_t rank = kernel::manager< MPIManager >().get_rank(); + const size_t num_processes = kernel::manager< MPIManager >.get_num_processes(); + const size_t rank = kernel::manager< MPIManager >.get_rank(); const size_t first_elem_rank = - kernel::manager< MPIManager >().get_process_id_of_vp( kernel::manager< VPManager >().node_id_to_vp( first_ ) ); + kernel::manager< MPIManager >.get_process_id_of_vp( kernel::manager< VPManager >.node_id_to_vp( first_ ) ); const size_t elem_idx = ( rank - first_elem_rank + num_processes ) % num_processes; if ( elem_idx > size() ) // Too few node IDs to be shared among all MPI processes. @@ -667,10 +667,9 @@ NodeCollectionPrimitive::rank_local_begin( NodeCollectionPTR cp ) const NodeCollection::const_iterator NodeCollectionPrimitive::thread_local_begin( NodeCollectionPTR cp ) const { - const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); - const size_t current_vp = - kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); - const size_t vp_first_node = kernel::manager< VPManager >().node_id_to_vp( first_ ); + const size_t num_vps = kernel::manager< VPManager >.get_num_virtual_processes(); + const size_t current_vp = kernel::manager< VPManager >.thread_to_vp( kernel::manager< VPManager >.get_thread_id() ); + const size_t vp_first_node = kernel::manager< VPManager >.node_id_to_vp( first_ ); const size_t offset = ( current_vp - vp_first_node + num_vps ) % num_vps; if ( offset >= size() ) // Too few node IDs to be shared among all vps. @@ -737,7 +736,7 @@ void NodeCollectionPrimitive::print_primitive( std::ostream& out ) const { const std::string model = - model_id_ != invalid_index ? kernel::manager< ModelManager >().get_node_model( model_id_ )->get_name() : "none"; + model_id_ != invalid_index ? kernel::manager< ModelManager >.get_node_model( model_id_ )->get_name() : "none"; out << "model=" << model << ", size=" << size(); @@ -768,12 +767,12 @@ NodeCollectionPrimitive::assert_consistent_model_ids_( const size_t expected_mod { for ( size_t node_id = first_; node_id <= last_; ++node_id ) { - const auto model_id = kernel::manager< ModelRangeManager >().get_model_id( node_id ); + const auto model_id = kernel::manager< ModelRangeManager >.get_model_id( node_id ); if ( model_id != expected_model_id ) { - const auto node_model = kernel::manager< ModelRangeManager >().get_model_of_node_id( model_id )->get_name(); + const auto node_model = kernel::manager< ModelRangeManager >.get_model_of_node_id( model_id )->get_name(); const auto expected_model = - kernel::manager< ModelRangeManager >().get_model_of_node_id( expected_model_id )->get_name(); + kernel::manager< ModelRangeManager >.get_model_of_node_id( expected_model_id )->get_name(); const auto message = "All nodes must have the same model (node with ID " + std::to_string( node_id ) + " has model " + node_model + ", expected " + expected_model + ")"; throw BadProperty( message ); @@ -1134,11 +1133,11 @@ NodeCollectionComposite::specific_local_begin_( size_t period, elem_idx += first_elem; } - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( "SPLB rk %1, thr %2, phase_first %3, offs %4, stp %5, sto %6," " pix %7, lp %8, le %9, primsz %10, nprts: %11, this: %12", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), phase_first_node, offset, first_part, @@ -1182,20 +1181,20 @@ NodeCollectionComposite::specific_local_begin_( size_t period, size_t NodeCollectionComposite::gid_to_vp_( size_t gid ) { - return kernel::manager< VPManager >().node_id_to_vp( gid ); + return kernel::manager< VPManager >.node_id_to_vp( gid ); } size_t NodeCollectionComposite::gid_to_rank_( size_t gid ) { - return kernel::manager< MPIManager >().get_process_id_of_vp( kernel::manager< VPManager >().node_id_to_vp( gid ) ); + return kernel::manager< MPIManager >.get_process_id_of_vp( kernel::manager< VPManager >.node_id_to_vp( gid ) ); } NodeCollection::const_iterator NodeCollectionComposite::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t num_ranks = kernel::manager< MPIManager >().get_num_processes(); - const size_t current_rank = kernel::manager< MPIManager >().get_rank(); + const size_t num_ranks = kernel::manager< MPIManager >.get_num_processes(); + const size_t current_rank = kernel::manager< MPIManager >.get_rank(); const auto [ part_index, part_offset ] = specific_local_begin_( num_ranks, current_rank, first_part_, first_elem_, gid_to_rank_ ); @@ -1217,9 +1216,8 @@ NodeCollectionComposite::rank_local_begin( NodeCollectionPTR cp ) const NodeCollection::const_iterator NodeCollectionComposite::thread_local_begin( NodeCollectionPTR cp ) const { - const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); - const size_t current_vp = - kernel::manager< VPManager >().thread_to_vp( kernel::manager< VPManager >().get_thread_id() ); + const size_t num_vps = kernel::manager< VPManager >.get_num_virtual_processes(); + const size_t current_vp = kernel::manager< VPManager >.thread_to_vp( kernel::manager< VPManager >.get_thread_id() ); const auto [ part_index, part_offset ] = specific_local_begin_( num_vps, current_vp, first_part_, first_elem_, gid_to_vp_ ); @@ -1256,9 +1254,9 @@ NodeCollectionComposite::slice( size_t start, size_t end, size_t stride ) const "InvalidNodeCollection: note that ResetKernel invalidates all previously created NodeCollections." ); } - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( "Calling NCC from slice()" ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( "Calling NCC from slice()" ); ) const auto new_composite = NodeCollectionComposite( *this, start, end, stride ); - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( "Calling NCC from slice() --- DONE" ); ) + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( "Calling NCC from slice() --- DONE" ); ) if ( stride == 1 and new_composite.first_part_ == new_composite.last_part_ ) { @@ -1267,7 +1265,7 @@ NodeCollectionComposite::slice( size_t start, size_t end, size_t stride ) const new_composite.first_elem_, new_composite.last_elem_ + 1 ); } - FULL_LOGGING_ONLY( kernel::manager< KernelManager >().write_to_dump( + FULL_LOGGING_ONLY( kernel::manager< KernelManager >.write_to_dump( String::compose( "NewComposite: fp %1, fe %2, lp %3, le %4, sz %5, strd %6", new_composite.first_part_, new_composite.first_elem_, @@ -1420,7 +1418,7 @@ NodeCollectionComposite::print_me( std::ostream& out ) const { // Need to count the primitive, so can't start at begin() out << "\n" + space - << "model=" << kernel::manager< ModelManager >().get_node_model( first_in_primitive.model_id )->get_name() + << "model=" << kernel::manager< ModelManager >.get_node_model( first_in_primitive.model_id )->get_name() << ", size=" << primitive_size << ", "; if ( primitive_size == 1 ) { @@ -1449,7 +1447,7 @@ NodeCollectionComposite::print_me( std::ostream& out ) const // Need to also print the last primitive out << "\n" + space - << "model=" << kernel::manager< ModelManager >().get_node_model( first_in_primitive.model_id )->get_name() + << "model=" << kernel::manager< ModelManager >.get_node_model( first_in_primitive.model_id )->get_name() << ", size=" << primitive_size << ", "; if ( primitive_size == 1 ) { diff --git a/nestkernel/node_manager.cpp b/nestkernel/node_manager.cpp index 2fac11c5ce..db348b5987 100644 --- a/nestkernel/node_manager.cpp +++ b/nestkernel/node_manager.cpp @@ -72,8 +72,8 @@ NodeManager::initialize( const bool adjust_number_of_threads_or_rng_only ) { // explicitly force construction of wfr_nodes_vec_ to ensure consistent state wfr_network_size_ = 0; - local_nodes_.resize( kernel::manager< VPManager >().get_num_threads() ); - num_thread_local_devices_.resize( kernel::manager< VPManager >().get_num_threads(), 0 ); + local_nodes_.resize( kernel::manager< VPManager >.get_num_threads() ); + num_thread_local_devices_.resize( kernel::manager< VPManager >.get_num_threads(), 0 ); ensure_valid_thread_local_ids(); if ( not adjust_number_of_threads_or_rng_only ) @@ -113,7 +113,7 @@ NodeManager::add_node( size_t model_id, long n ) throw BadProperty(); } - Model* model = kernel::manager< ModelManager >().get_node_model( model_id ); + Model* model = kernel::manager< ModelManager >.get_node_model( model_id ); assert( model ); model->deprecation_warning( "Create" ); @@ -128,10 +128,10 @@ NodeManager::add_node( size_t model_id, long n ) throw KernelException( "OutOfMemory" ); } - kernel::manager< ModelRangeManager >().add_range( model_id, min_node_id, max_node_id ); + kernel::manager< ModelRangeManager >.add_range( model_id, min_node_id, max_node_id ); // clear any exceptions from previous call - std::vector< std::shared_ptr< WrappedThreadException > >( kernel::manager< VPManager >().get_num_threads() ) + std::vector< std::shared_ptr< WrappedThreadException > >( kernel::manager< VPManager >.get_num_threads() ) .swap( exceptions_raised_ ); auto nc_ptr = NodeCollectionPTR( new NodeCollectionPrimitive( min_node_id, max_node_id, model_id ) ); @@ -151,7 +151,7 @@ NodeManager::add_node( size_t model_id, long n ) } // check if any exceptions have been raised - for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >.get_num_threads(); ++t ) { if ( exceptions_raised_.at( t ).get() ) { @@ -163,7 +163,7 @@ NodeManager::add_node( size_t model_id, long n ) // successfully if ( model->is_off_grid() ) { - kernel::manager< EventDeliveryManager >().set_off_grid_communication( true ); + kernel::manager< EventDeliveryManager >.set_off_grid_communication( true ); LOG( M_INFO, "NodeManager::add_node", "Neuron models emitting precisely timed spikes exist: " @@ -174,12 +174,12 @@ NodeManager::add_node( size_t model_id, long n ) // resize the target table for delivery of events to devices to make sure the first dimension // matches the number of local nodes and the second dimension matches number of synapse types - kernel::manager< ConnectionManager >().resize_target_table_devices_to_number_of_neurons(); + kernel::manager< ConnectionManager >.resize_target_table_devices_to_number_of_neurons(); #pragma omp parallel { // must be called in parallel context to properly configure per-thread data structures - kernel::manager< ConnectionManager >().resize_target_table_devices_to_number_of_synapse_types(); + kernel::manager< ConnectionManager >.resize_target_table_devices_to_number_of_synapse_types(); } sw_construction_create_.stop(); @@ -190,7 +190,7 @@ NodeManager::add_node( size_t model_id, long n ) void NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id ) { - const size_t num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); + const size_t num_vps = kernel::manager< VPManager >.get_num_virtual_processes(); // Upper limit for number of neurons per thread; in practice, either // max_new_per_thread-1 or max_new_per_thread nodes will be created. const size_t max_new_per_thread = @@ -198,7 +198,7 @@ NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t t = kernel::manager< VPManager >.get_thread_id(); try { @@ -206,8 +206,8 @@ NodeManager::add_neurons_( Model& model, size_t min_node_id, size_t max_node_id // Need to find smallest node ID with: // - node ID local to this vp // - node_id >= min_node_id - const size_t vp = kernel::manager< VPManager >().thread_to_vp( t ); - const size_t min_node_id_vp = kernel::manager< VPManager >().node_id_to_vp( min_node_id ); + const size_t vp = kernel::manager< VPManager >.thread_to_vp( t ); + const size_t min_node_id_vp = kernel::manager< VPManager >.node_id_to_vp( min_node_id ); size_t node_id = min_node_id + ( num_vps + vp - min_node_id_vp ) % num_vps; @@ -241,7 +241,7 @@ NodeManager::add_devices_( Model& model, size_t min_node_id, size_t max_node_id #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t t = kernel::manager< VPManager >.get_thread_id(); try { model.reserve_additional( t, n_per_thread ); @@ -255,7 +255,7 @@ NodeManager::add_devices_( Model& model, size_t min_node_id, size_t max_node_id node->set_node_id_( node_id ); node->set_model_id( model.get_model_id() ); node->set_thread( t ); - node->set_vp( kernel::manager< VPManager >().thread_to_vp( t ) ); + node->set_vp( kernel::manager< VPManager >.thread_to_vp( t ) ); node->set_local_device_id( num_thread_local_devices_[ t ] - 1 ); node->set_initialized(); @@ -277,7 +277,7 @@ NodeManager::add_music_nodes_( Model& model, size_t min_node_id, size_t max_node { #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); + const size_t t = kernel::manager< VPManager >.get_thread_id(); try { if ( t == 0 ) @@ -291,7 +291,7 @@ NodeManager::add_music_nodes_( Model& model, size_t min_node_id, size_t max_node node->set_node_id_( node_id ); node->set_model_id( model.get_model_id() ); node->set_thread( 0 ); - node->set_vp( kernel::manager< VPManager >().thread_to_vp( 0 ) ); + node->set_vp( kernel::manager< VPManager >.thread_to_vp( 0 ) ); node->set_local_device_id( num_thread_local_devices_[ t ] - 1 ); node->set_initialized(); @@ -348,10 +348,10 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) if ( params->empty() ) { std::vector< std::vector< long > > nodes_on_thread; - nodes_on_thread.resize( kernel::manager< VPManager >().get_num_threads() ); + nodes_on_thread.resize( kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - size_t tid = kernel::manager< VPManager >().get_thread_id(); + size_t tid = kernel::manager< VPManager >.get_thread_id(); for ( auto node : get_local_nodes( tid ) ) { @@ -368,7 +368,7 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) } else { - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { // Select those nodes fulfilling the key/value pairs of the dictionary for ( auto node : get_local_nodes( tid ) ) @@ -400,7 +400,7 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) if ( not local_only ) { std::vector< long > globalnodes; - kernel::manager< MPIManager >().communicate( nodes, globalnodes ); + kernel::manager< MPIManager >.communicate( nodes, globalnodes ); for ( size_t i = 0; i < globalnodes.size(); ++i ) { @@ -427,21 +427,21 @@ NodeManager::get_nodes( const DictionaryDatum& params, const bool local_only ) bool NodeManager::is_local_node( Node* n ) const { - return kernel::manager< VPManager >().is_local_vp( n->get_vp() ); + return kernel::manager< VPManager >.is_local_vp( n->get_vp() ); } bool NodeManager::is_local_node_id( size_t node_id ) const { - const size_t vp = kernel::manager< VPManager >().node_id_to_vp( node_id ); - return kernel::manager< VPManager >().is_local_vp( vp ); + const size_t vp = kernel::manager< VPManager >.node_id_to_vp( node_id ); + return kernel::manager< VPManager >.is_local_vp( vp ); } size_t NodeManager::get_max_num_local_nodes() const { return static_cast< size_t >( - ceil( static_cast< double >( size() ) / kernel::manager< VPManager >().get_num_virtual_processes() ) ); + ceil( static_cast< double >( size() ) / kernel::manager< VPManager >.get_num_virtual_processes() ) ); } size_t @@ -453,13 +453,13 @@ NodeManager::get_num_thread_local_devices( size_t t ) const Node* NodeManager::get_node_or_proxy( size_t node_id, size_t t ) { - assert( t < kernel::manager< VPManager >().get_num_threads() ); + assert( t < kernel::manager< VPManager >.get_num_threads() ); assert( node_id <= size() ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >.get_proxy_node( t, node_id ); } return node; @@ -470,17 +470,17 @@ NodeManager::get_node_or_proxy( size_t node_id ) { assert( 0 < node_id and node_id <= size() ); - size_t vp = kernel::manager< VPManager >().node_id_to_vp( node_id ); - if ( not kernel::manager< VPManager >().is_local_vp( vp ) ) + size_t vp = kernel::manager< VPManager >.node_id_to_vp( node_id ); + if ( not kernel::manager< VPManager >.is_local_vp( vp ) ) { - return kernel::manager< ModelManager >().get_proxy_node( 0, node_id ); + return kernel::manager< ModelManager >.get_proxy_node( 0, node_id ); } - size_t t = kernel::manager< VPManager >().vp_to_thread( vp ); + size_t t = kernel::manager< VPManager >.vp_to_thread( vp ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >.get_proxy_node( t, node_id ); } return node; @@ -489,13 +489,13 @@ NodeManager::get_node_or_proxy( size_t node_id ) Node* NodeManager::get_mpi_local_node_or_device_head( size_t node_id ) { - size_t t = kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node_id ) ); + size_t t = kernel::manager< VPManager >.vp_to_thread( kernel::manager< VPManager >.node_id_to_vp( node_id ) ); Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( not node ) { - return kernel::manager< ModelManager >().get_proxy_node( t, node_id ); + return kernel::manager< ModelManager >.get_proxy_node( t, node_id ); } if ( not node->has_proxies() ) { @@ -508,7 +508,7 @@ NodeManager::get_mpi_local_node_or_device_head( size_t node_id ) std::vector< Node* > NodeManager::get_thread_siblings( size_t node_id ) const { - size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + size_t num_threads = kernel::manager< VPManager >.get_num_threads(); std::vector< Node* > siblings( num_threads ); for ( size_t t = 0; t < num_threads; ++t ) { @@ -552,9 +552,9 @@ NodeManager::ensure_valid_thread_local_ids() // We clear the existing wfr_nodes_vec_ and then rebuild it. wfr_nodes_vec_.clear(); - wfr_nodes_vec_.resize( kernel::manager< VPManager >().get_num_threads() ); + wfr_nodes_vec_.resize( kernel::manager< VPManager >.get_num_threads() ); - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { wfr_nodes_vec_[ tid ].clear(); @@ -584,7 +584,7 @@ NodeManager::ensure_valid_thread_local_ids() // step, because gather_events() has to be done in an // openmp single section wfr_is_used_ = false; - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( wfr_nodes_vec_[ tid ].size() > 0 ) { @@ -600,7 +600,7 @@ NodeManager::destruct_nodes_() { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); for ( auto node : local_nodes_[ tid ] ) { delete node.get_node(); @@ -639,7 +639,7 @@ NodeManager::prepare_node_( Node* n ) void NodeManager::prepare_nodes() { - assert( kernel::manager< KernelManager >().is_initialized() ); + assert( kernel::manager< KernelManager >.is_initialized() ); // We initialize the buffers of each node and calibrate it. @@ -647,11 +647,11 @@ NodeManager::prepare_nodes() size_t num_active_wfr_nodes = 0; // counts nodes that use waveform relaxation std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel reduction( + : num_active_nodes, num_active_wfr_nodes ) { - size_t t = kernel::manager< VPManager >().get_thread_id(); + size_t t = kernel::manager< VPManager >.get_thread_id(); // We prepare nodes in a parallel region. Therefore, we need to catch // exceptions here and then handle them after the parallel region. @@ -678,7 +678,7 @@ NodeManager::prepare_nodes() } // omp parallel // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -705,7 +705,7 @@ NodeManager::post_run_cleanup() { #pragma omp parallel { - size_t t = kernel::manager< VPManager >().get_thread_id(); + size_t t = kernel::manager< VPManager >.get_thread_id(); SparseNodeArray::const_iterator n; for ( n = local_nodes_[ t ].begin(); n != local_nodes_[ t ].end(); ++n ) { @@ -719,7 +719,7 @@ NodeManager::finalize_nodes() { #pragma omp parallel { - size_t tid = kernel::manager< VPManager >().get_thread_id(); + size_t tid = kernel::manager< VPManager >.get_thread_id(); SparseNodeArray::const_iterator n; for ( n = local_nodes_[ tid ].begin(); n != local_nodes_[ tid ].end(); ++n ) { @@ -731,15 +731,15 @@ NodeManager::finalize_nodes() void NodeManager::check_wfr_use() { - wfr_is_used_ = kernel::manager< MPIManager >().any_true( wfr_is_used_ ); + wfr_is_used_ = kernel::manager< MPIManager >.any_true( wfr_is_used_ ); - GapJunctionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() - * ( kernel::manager< SimulationManager >().get_wfr_interpolation_order() + 1 ) ); - InstantaneousRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); - DelayedRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); - DiffusionConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); - LearningSignalConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); - SICEvent::set_coeff_length( kernel::manager< ConnectionManager >().get_min_delay() ); + GapJunctionEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() + * ( kernel::manager< SimulationManager >.get_wfr_interpolation_order() + 1 ) ); + InstantaneousRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() ); + DelayedRateConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() ); + DiffusionConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() ); + LearningSignalConnectionEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() ); + SICEvent::set_coeff_length( kernel::manager< ConnectionManager >.get_min_delay() ); } void @@ -749,13 +749,13 @@ NodeManager::print( std::ostream& out ) const const double max_node_id_width = std::floor( std::log10( max_node_id ) ); const double node_id_range_width = 6 + 2 * max_node_id_width; - for ( std::vector< modelrange >::const_iterator it = kernel::manager< ModelRangeManager >().begin(); - it != kernel::manager< ModelRangeManager >().end(); + for ( std::vector< modelrange >::const_iterator it = kernel::manager< ModelRangeManager >.begin(); + it != kernel::manager< ModelRangeManager >.end(); ++it ) { const size_t first_node_id = it->get_first_node_id(); const size_t last_node_id = it->get_last_node_id(); - const Model* mod = kernel::manager< ModelManager >().get_node_model( it->get_model_id() ); + const Model* mod = kernel::manager< ModelManager >.get_node_model( it->get_model_id() ); std::stringstream node_id_range_strs; node_id_range_strs << std::setw( max_node_id_width + 1 ) << first_node_id; @@ -765,7 +765,7 @@ NodeManager::print( std::ostream& out ) const } out << std::setw( node_id_range_width ) << std::left << node_id_range_strs.str() << " " << mod->get_name(); - if ( it + 1 != kernel::manager< ModelRangeManager >().end() ) + if ( it + 1 != kernel::manager< ModelRangeManager >.end() ) { out << std::endl; } @@ -775,7 +775,7 @@ NodeManager::print( std::ostream& out ) const void NodeManager::set_status( size_t node_id, const DictionaryDatum& d ) { - for ( size_t t = 0; t < kernel::manager< VPManager >().get_num_threads(); ++t ) + for ( size_t t = 0; t < kernel::manager< VPManager >.get_num_threads(); ++t ) { Node* node = local_nodes_[ t ].get_node_by_node_id( node_id ); if ( node ) diff --git a/nestkernel/parameter.cpp b/nestkernel/parameter.cpp index 518a204f5e..08a5da62a5 100644 --- a/nestkernel/parameter.cpp +++ b/nestkernel/parameter.cpp @@ -91,15 +91,15 @@ NormalParameter::NormalParameter( const DictionaryDatum& d ) normal_distribution::param_type param( mean_, std_ ); dist.param( param ); assert( normal_dists_.size() == 0 ); - normal_dists_.resize( kernel::manager< VPManager >().get_num_threads(), dist ); + normal_dists_.resize( kernel::manager< VPManager >.get_num_threads(), dist ); } double NormalParameter::value( RngPtr rng, Node* node ) { const auto tid = node - ? kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ) ) - : kernel::manager< VPManager >().get_thread_id(); + ? kernel::manager< VPManager >.vp_to_thread( kernel::manager< VPManager >.node_id_to_vp( node->get_node_id() ) ) + : kernel::manager< VPManager >.get_thread_id(); return normal_dists_[ tid ]( rng ); } @@ -118,15 +118,15 @@ LognormalParameter::LognormalParameter( const DictionaryDatum& d ) const lognormal_distribution::param_type param( mean_, std_ ); dist.param( param ); assert( lognormal_dists_.size() == 0 ); - lognormal_dists_.resize( kernel::manager< VPManager >().get_num_threads(), dist ); + lognormal_dists_.resize( kernel::manager< VPManager >.get_num_threads(), dist ); } double LognormalParameter::value( RngPtr rng, Node* node ) { const auto tid = node - ? kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node->get_node_id() ) ) - : kernel::manager< VPManager >().get_thread_id(); + ? kernel::manager< VPManager >.vp_to_thread( kernel::manager< VPManager >.node_id_to_vp( node->get_node_id() ) ) + : kernel::manager< VPManager >.get_thread_id(); return lognormal_dists_[ tid ]( rng ); } @@ -138,7 +138,7 @@ NodePosParameter::get_node_pos_( Node* node ) const { throw KernelException( "NodePosParameter: not node" ); } - NodeCollectionPTR nc = kernel::manager< NodeManager >().node_id_to_node_collection( node ); + NodeCollectionPTR nc = kernel::manager< NodeManager >.node_id_to_node_collection( node ); if ( not nc.get() ) { throw KernelException( "NodePosParameter: not nc" ); diff --git a/nestkernel/per_thread_bool_indicator.cpp b/nestkernel/per_thread_bool_indicator.cpp index 6448879c65..2cd55f298f 100644 --- a/nestkernel/per_thread_bool_indicator.cpp +++ b/nestkernel/per_thread_bool_indicator.cpp @@ -49,7 +49,7 @@ PerThreadBoolIndicator::operator[]( const size_t tid ) void PerThreadBoolIndicator::initialize( const size_t num_threads, const bool status ) { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); per_thread_status_.clear(); per_thread_status_.resize( num_threads, BoolIndicatorUInt64( status ) ); size_ = num_threads; @@ -66,7 +66,7 @@ PerThreadBoolIndicator::initialize( const size_t num_threads, const bool status bool PerThreadBoolIndicator::all_false() const { - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); // We need two barriers here to ensure that no thread can continue and change the result // before all threads have determined the result. #pragma omp barrier @@ -75,42 +75,42 @@ PerThreadBoolIndicator::all_false() const bool ret = ( are_true_ == 0 ); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::all_true() const { - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ == size_ ); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_false() const { - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ < size_ ); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); return ret; } bool PerThreadBoolIndicator::any_true() const { - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier bool ret = ( are_true_ > 0 ); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); return ret; } diff --git a/nestkernel/proxynode.cpp b/nestkernel/proxynode.cpp index 7ccde535bc..f91a95bbd3 100644 --- a/nestkernel/proxynode.cpp +++ b/nestkernel/proxynode.cpp @@ -46,56 +46,56 @@ proxynode::proxynode( size_t node_id, size_t model_id, size_t vp ) size_t proxynode::send_test_event( Node& target, size_t receptor_type, synindex syn_id, bool dummy_target ) { - Model* model = kernel::manager< ModelManager >().get_node_model( get_model_id() ); + Model* model = kernel::manager< ModelManager >.get_node_model( get_model_id() ); return model->send_test_event( target, receptor_type, syn_id, dummy_target ); } void proxynode::sends_secondary_event( GapJunctionEvent& ge ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( ge ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( ge ); } void proxynode::sends_secondary_event( InstantaneousRateConnectionEvent& re ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( DiffusionConnectionEvent& de ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( de ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( de ); } void proxynode::sends_secondary_event( DelayedRateConnectionEvent& re ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( LearningSignalConnectionEvent& re ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( re ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( re ); } void proxynode::sends_secondary_event( SICEvent& sic ) { - kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_secondary_event( sic ); + kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_secondary_event( sic ); } nest::SignalType proxynode::sends_signal() const { - return kernel::manager< ModelManager >().get_node_model( get_model_id() )->sends_signal(); + return kernel::manager< ModelManager >.get_node_model( get_model_id() )->sends_signal(); } void proxynode::get_status( DictionaryDatum& d ) const { - const Model* model = kernel::manager< ModelManager >().get_node_model( model_id_ ); + const Model* model = kernel::manager< ModelManager >.get_node_model( model_id_ ); const Name element_type = model->get_prototype().get_element_type(); ( *d )[ names::element_type ] = LiteralDatum( element_type ); } diff --git a/nestkernel/random_manager.cpp b/nestkernel/random_manager.cpp index 94ff239dc8..8b53770dc8 100644 --- a/nestkernel/random_manager.cpp +++ b/nestkernel/random_manager.cpp @@ -87,13 +87,13 @@ nest::RandomManager::initialize( const bool adjust_number_of_threads_or_rng_only // Create new RNGs of the currently used RNG type. rank_synced_rng_ = rng_types_[ current_rng_type_ ]->create( { base_seed_, RANK_SYNCED_SEEDER_ } ); - vp_synced_rngs_.resize( kernel::manager< VPManager >().get_num_threads() ); - vp_specific_rngs_.resize( kernel::manager< VPManager >().get_num_threads() ); + vp_synced_rngs_.resize( kernel::manager< VPManager >.get_num_threads() ); + vp_specific_rngs_.resize( kernel::manager< VPManager >.get_num_threads() ); #pragma omp parallel { - const auto tid = kernel::manager< VPManager >().get_thread_id(); - const std::uint32_t vp = kernel::manager< VPManager >().get_vp(); // type required for rng initializer + const auto tid = kernel::manager< VPManager >.get_thread_id(); + const std::uint32_t vp = kernel::manager< VPManager >.get_vp(); // type required for rng initializer vp_synced_rngs_[ tid ] = rng_types_[ current_rng_type_ ]->create( { base_seed_, THREAD_SYNCED_SEEDER_ } ); vp_specific_rngs_[ tid ] = rng_types_[ current_rng_type_ ]->create( { base_seed_, THREAD_SPECIFIC_SEEDER_, vp } ); } @@ -189,7 +189,7 @@ nest::RandomManager::check_rng_synchrony() const for ( auto n = 0; n < NUM_ROUNDS; ++n ) { const auto r = rank_synced_rng_->drand(); - if ( not kernel::manager< MPIManager >().equal_cross_ranks( r ) ) + if ( not kernel::manager< MPIManager >.equal_cross_ranks( r ) ) { throw KernelException( "Rank-synchronized random number generators are out of sync." ); } @@ -198,7 +198,7 @@ nest::RandomManager::check_rng_synchrony() const // We check thread-synchrony under all circumstances to keep the code simple. for ( auto n = 0; n < NUM_ROUNDS; ++n ) { - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); double local_min = std::numeric_limits< double >::max(); double local_max = std::numeric_limits< double >::min(); for ( size_t t = 0; t < num_threads; ++t ) @@ -214,7 +214,7 @@ nest::RandomManager::check_rng_synchrony() const local_min = -std::numeric_limits< double >::infinity(); } - if ( not kernel::manager< MPIManager >().equal_cross_ranks( local_min ) ) + if ( not kernel::manager< MPIManager >.equal_cross_ranks( local_min ) ) { throw KernelException( "Thread-synchronized random number generators are out of sync." ); } diff --git a/nestkernel/recording_backend_ascii.cpp b/nestkernel/recording_backend_ascii.cpp index 9294c78660..b0474b0177 100644 --- a/nestkernel/recording_backend_ascii.cpp +++ b/nestkernel/recording_backend_ascii.cpp @@ -49,7 +49,7 @@ nest::RecordingBackendASCII::~RecordingBackendASCII() throw() void nest::RecordingBackendASCII::initialize() { - data_map tmp( kernel::manager< VPManager >().get_num_threads() ); + data_map tmp( kernel::manager< VPManager >.get_num_threads() ); device_data_.swap( tmp ); } @@ -160,8 +160,8 @@ nest::RecordingBackendASCII::write( const RecordingDevice& device, const std::string nest::RecordingBackendASCII::compute_vp_node_id_string_( const RecordingDevice& device ) const { - const double num_vps = kernel::manager< VPManager >().get_num_virtual_processes(); - const double num_nodes = kernel::manager< NodeManager >().size(); + const double num_vps = kernel::manager< VPManager >.get_num_virtual_processes(); + const double num_nodes = kernel::manager< NodeManager >.size(); const int vp_digits = static_cast< int >( std::floor( std::log10( num_vps ) ) + 1 ); const int node_id_digits = static_cast< int >( std::floor( std::log10( num_nodes ) ) + 1 ); @@ -255,7 +255,7 @@ nest::RecordingBackendASCII::DeviceData::open_file() std::string filename = compute_filename_(); std::ifstream test( filename.c_str() ); - if ( test.good() and not kernel::manager< IOManager >().overwrite_files() ) + if ( test.good() and not kernel::manager< IOManager >.overwrite_files() ) { std::string msg = String::compose( "The file '%1' already exists and overwriting files is disabled. To overwrite files, set " @@ -348,7 +348,7 @@ nest::RecordingBackendASCII::DeviceData::set_status( const DictionaryDatum& d ) bool time_in_steps = false; if ( updateValue< bool >( d, names::time_in_steps, time_in_steps ) ) { - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { throw BadProperty( "Property time_in_steps cannot be set after Simulate has been called." ); } @@ -360,7 +360,7 @@ nest::RecordingBackendASCII::DeviceData::set_status( const DictionaryDatum& d ) std::string nest::RecordingBackendASCII::DeviceData::compute_filename_() const { - std::string data_path = kernel::manager< IOManager >().get_data_path(); + std::string data_path = kernel::manager< IOManager >.get_data_path(); if ( not data_path.empty() and not( data_path[ data_path.size() - 1 ] == '/' ) ) { data_path += '/'; @@ -372,7 +372,7 @@ nest::RecordingBackendASCII::DeviceData::compute_filename_() const label = modelname_; } - std::string data_prefix = kernel::manager< IOManager >().get_data_prefix(); + std::string data_prefix = kernel::manager< IOManager >.get_data_prefix(); return data_path + data_prefix + label + vp_node_id_string_ + "." + file_extension_; } diff --git a/nestkernel/recording_backend_memory.cpp b/nestkernel/recording_backend_memory.cpp index bef37d88e3..a91c114486 100644 --- a/nestkernel/recording_backend_memory.cpp +++ b/nestkernel/recording_backend_memory.cpp @@ -36,7 +36,7 @@ nest::RecordingBackendMemory::~RecordingBackendMemory() throw() void nest::RecordingBackendMemory::initialize() { - device_data_map tmp( kernel::manager< VPManager >().get_num_threads() ); + device_data_map tmp( kernel::manager< VPManager >.get_num_threads() ); device_data_.swap( tmp ); } @@ -265,7 +265,7 @@ nest::RecordingBackendMemory::DeviceData::set_status( const DictionaryDatum& d ) bool time_in_steps = false; if ( updateValue< bool >( d, names::time_in_steps, time_in_steps ) ) { - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { throw BadProperty( "Property time_in_steps cannot be set after Simulate has been called." ); } diff --git a/nestkernel/recording_backend_mpi.cpp b/nestkernel/recording_backend_mpi.cpp index 2b4a18f8f4..c34a9be4cf 100644 --- a/nestkernel/recording_backend_mpi.cpp +++ b/nestkernel/recording_backend_mpi.cpp @@ -46,7 +46,7 @@ nest::RecordingBackendMPI::~RecordingBackendMPI() throw() void nest::RecordingBackendMPI::initialize() { - auto nthreads = kernel::manager< VPManager >().get_num_threads(); + auto nthreads = kernel::manager< VPManager >.get_num_threads(); std::vector< std::vector< std::vector< std::array< double, 3 > > > > empty_vector( nthreads ); buffer_.swap( empty_vector ); device_map devices( nthreads ); @@ -132,14 +132,14 @@ nest::RecordingBackendMPI::prepare() } prepared_ = true; size_t thread_id_master = 0; -#pragma omp parallel default( none ) shared( thread_id_master ) +#pragma omp parallel shared( thread_id_master ) { #pragma omp master { // Create the connection with MPI // 1) take all the ports of the connections // get port and update the list of devices - thread_id_master = kernel::manager< VPManager >().get_thread_id(); + thread_id_master = kernel::manager< VPManager >.get_thread_id(); } } int count_max = 0; @@ -190,10 +190,10 @@ nest::RecordingBackendMPI::prepare() msg << "Connect to " << it_comm.first.data() << "\n"; LOG( M_INFO, "MPI Record connect", msg.str() ); } -#pragma omp parallel default( none ) shared( thread_id_master ) +#pragma omp parallel shared( thread_id_master ) { // Update all the threads - size_t thread_id = kernel::manager< VPManager >().get_thread_id(); + size_t thread_id = kernel::manager< VPManager >.get_thread_id(); if ( thread_id != thread_id_master ) { for ( auto& it_device : devices_[ thread_id ] ) @@ -293,7 +293,7 @@ nest::RecordingBackendMPI::cleanup() } // clear map of device commMap_.clear(); - size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >.get_thread_id(); for ( auto& it_device : devices_[ thread_id_master ] ) { std::get< 0 >( it_device.second ) = -1; @@ -329,7 +329,7 @@ nest::RecordingBackendMPI::write( const RecordingDevice& device, const std::vector< long >& ) { // For each event send a message through the right MPI communicator - const size_t thread_id = kernel::manager< VPManager >().get_thread_id(); + const size_t thread_id = kernel::manager< VPManager >.get_thread_id(); const size_t sender = event.get_sender_node_id(); const size_t recorder = device.get_node_id(); const Time stamp = event.get_stamp(); @@ -388,12 +388,12 @@ nest::RecordingBackendMPI::get_port( const size_t index_node, const std::string& // path of the file : path+label+id+.txt // (file contains only one line with name of the port ) std::ostringstream basename; - const std::string& path = kernel::manager< IOManager >().get_data_path(); + const std::string& path = kernel::manager< IOManager >.get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel::manager< IOManager >().get_data_prefix(); + basename << kernel::manager< IOManager >.get_data_prefix(); if ( not label.empty() ) { diff --git a/nestkernel/recording_backend_screen.cpp b/nestkernel/recording_backend_screen.cpp index 73e3722537..9cc04457e4 100644 --- a/nestkernel/recording_backend_screen.cpp +++ b/nestkernel/recording_backend_screen.cpp @@ -31,7 +31,7 @@ void nest::RecordingBackendScreen::initialize() { - device_data_map tmp( kernel::manager< VPManager >().get_num_threads() ); + device_data_map tmp( kernel::manager< VPManager >.get_num_threads() ); device_data_.swap( tmp ); } diff --git a/nestkernel/recording_backend_sionlib.cpp b/nestkernel/recording_backend_sionlib.cpp index 38c544e31a..8071e40050 100644 --- a/nestkernel/recording_backend_sionlib.cpp +++ b/nestkernel/recording_backend_sionlib.cpp @@ -60,7 +60,7 @@ nest::RecordingBackendSIONlib::~RecordingBackendSIONlib() throw() void nest::RecordingBackendSIONlib::initialize() { - device_map devices( kernel::manager< VPManager >().get_num_threads() ); + device_map devices( kernel::manager< VPManager >.get_num_threads() ); devices_.swap( devices ); } @@ -169,11 +169,11 @@ nest::RecordingBackendSIONlib::open_files_() WrappedThreadException* we = nullptr; // This code is executed in a parallel region (opened above)! - const size_t t = kernel::manager< VPManager >().get_thread_id(); - const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >.get_thread_id(); + const size_t task = kernel::manager< VPManager >.thread_to_vp( t ); if ( not task ) { - t_start_ = kernel::manager< SimulationManager >().get_time().get_ms(); + t_start_ = kernel::manager< SimulationManager >.get_time().get_ms(); } // set n_rec counters to zero in every device on every thread @@ -198,7 +198,7 @@ nest::RecordingBackendSIONlib::open_files_() std::string filename = build_filename_(); std::ifstream test( filename.c_str() ); - if ( test.good() & not kernel::manager< IOManager >().overwrite_files() ) + if ( test.good() & not kernel::manager< IOManager >.overwrite_files() ) { std::string msg = String::compose( "The device file '%1' exists already and will not be overwritten. " @@ -217,12 +217,12 @@ nest::RecordingBackendSIONlib::open_files_() #endif /* BG_MULTIFILE */ sion_int32 fs_block_size = -1; sion_int64 sion_chunksize = P_.sion_chunksize_; - int rank = kernel::manager< MPIManager >().get_rank(); + int rank = kernel::manager< MPIManager >.get_rank(); file.sid = sion_paropen_ompi( filename.c_str(), P_.sion_collective_ ? "bw,cmerge,collsize=-1" : "bw", &n_files, - kernel::manager< MPIManager >().get_communicator(), + kernel::manager< MPIManager >.get_communicator(), &local_comm, &sion_chunksize, &fs_block_size, @@ -272,8 +272,8 @@ nest::RecordingBackendSIONlib::close_files_() #pragma omp parallel { - const size_t t = kernel::manager< VPManager >().get_thread_id(); - const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >.get_thread_id(); + const size_t task = kernel::manager< VPManager >.thread_to_vp( t ); assert( ( files_.find( task ) != files_.end() ) and "initialize() was not called before calling cleanup()" ); @@ -307,7 +307,7 @@ nest::RecordingBackendSIONlib::close_files_() // accumulate number of recorded data points over all ranks unsigned long n_rec_total = 0; MPI_Reduce( - &n_rec, &n_rec_total, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, kernel::manager< MPIManager >().get_communicator() ); + &n_rec, &n_rec_total, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, kernel::manager< MPIManager >.get_communicator() ); assert( sizeof( unsigned long ) <= sizeof( sion_uint64 ) ); it->second.info.n_rec = static_cast< sion_uint64 >( n_rec_total ); } @@ -328,7 +328,7 @@ nest::RecordingBackendSIONlib::close_files_() sion_int64 info_pos; } data_end = { info_blk, info_pos }; - double t_end = kernel::manager< SimulationManager >().get_time().get_ms(); + double t_end = kernel::manager< SimulationManager >.get_time().get_ms(); double resolution = Time::get_resolution().get_ms(); sion_fwrite( &t_start_, sizeof( double ), 1, file.sid ); @@ -521,12 +521,12 @@ const std::string nest::RecordingBackendSIONlib::build_filename_() const { std::ostringstream basename; - const std::string& path = kernel::manager< IOManager >().get_data_path(); + const std::string& path = kernel::manager< IOManager >.get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel::manager< IOManager >().get_data_prefix(); + basename << kernel::manager< IOManager >.get_data_prefix(); return basename.str() + P_.filename_; } @@ -676,8 +676,8 @@ nest::RecordingBackendSIONlib::post_step_hook() return; } - const size_t t = kernel::manager< VPManager >().get_thread_id(); - const size_t task = kernel::manager< VPManager >().thread_to_vp( t ); + const size_t t = kernel::manager< VPManager >.get_thread_id(); + const size_t task = kernel::manager< VPManager >.thread_to_vp( t ); FileEntry& file = files_[ task ]; SIONBuffer& buffer = file.buffer; diff --git a/nestkernel/recording_device.cpp b/nestkernel/recording_device.cpp index 0a2dd8ba3b..40241bba81 100644 --- a/nestkernel/recording_device.cpp +++ b/nestkernel/recording_device.cpp @@ -46,7 +46,7 @@ nest::RecordingDevice::RecordingDevice( const RecordingDevice& rd ) void nest::RecordingDevice::set_initialized_() { - kernel::manager< IOManager >().enroll_recorder( P_.record_to_, *this, backend_params_ ); + kernel::manager< IOManager >.enroll_recorder( P_.record_to_, *this, backend_params_ ); } void @@ -54,8 +54,7 @@ nest::RecordingDevice::pre_run_hook( const std::vector< Name >& double_value_nam const std::vector< Name >& long_value_names ) { Device::pre_run_hook(); - kernel::manager< IOManager >().set_recording_value_names( - P_.record_to_, *this, double_value_names, long_value_names ); + kernel::manager< IOManager >.set_recording_value_names( P_.record_to_, *this, double_value_names, long_value_names ); } const std::string& @@ -85,7 +84,7 @@ nest::RecordingDevice::Parameters_::set( const DictionaryDatum& d ) std::string record_to; if ( updateValue< std::string >( d, names::record_to, record_to ) ) { - if ( not kernel::manager< IOManager >().is_valid_recording_backend( record_to ) ) + if ( not kernel::manager< IOManager >.is_valid_recording_backend( record_to ) ) { std::string msg = String::compose( "Unknown recording backend '%1'", record_to ); throw BadProperty( msg ); @@ -126,7 +125,7 @@ nest::RecordingDevice::State_::set( const DictionaryDatum& d ) void nest::RecordingDevice::set_status( const DictionaryDatum& d ) { - if ( kernel::manager< SimulationManager >().has_been_prepared() ) + if ( kernel::manager< SimulationManager >.has_been_prepared() ) { throw BadProperty( "Recorder parameters cannot be changed while inside a Prepare/Run/Cleanup context." ); } @@ -152,7 +151,7 @@ nest::RecordingDevice::set_status( const DictionaryDatum& d ) } } - kernel::manager< IOManager >().check_recording_backend_device_status( ptmp.record_to_, backend_params ); + kernel::manager< IOManager >.check_recording_backend_device_status( ptmp.record_to_, backend_params ); // cache all properties accessed by the backend in private member backend_params_->clear(); @@ -167,7 +166,7 @@ nest::RecordingDevice::set_status( const DictionaryDatum& d ) } else { - kernel::manager< IOManager >().enroll_recorder( ptmp.record_to_, *this, d ); + kernel::manager< IOManager >.enroll_recorder( ptmp.record_to_, *this, d ); } // if we get here, temporaries contain consistent set of properties @@ -188,7 +187,7 @@ nest::RecordingDevice::get_status( DictionaryDatum& d ) const if ( get_node_id() == 0 ) // this is a model prototype, not an actual instance { // first get the defaults from the backend - kernel::manager< IOManager >().get_recording_backend_device_defaults( P_.record_to_, d ); + kernel::manager< IOManager >.get_recording_backend_device_defaults( P_.record_to_, d ); // then overwrite with cached parameters for ( auto kv_pair = backend_params_->begin(); kv_pair != backend_params_->end(); ++kv_pair ) @@ -198,7 +197,7 @@ nest::RecordingDevice::get_status( DictionaryDatum& d ) const } else { - kernel::manager< IOManager >().get_recording_backend_device_status( P_.record_to_, *this, d ); + kernel::manager< IOManager >.get_recording_backend_device_status( P_.record_to_, *this, d ); } } @@ -215,6 +214,6 @@ nest::RecordingDevice::write( const Event& event, const std::vector< double >& double_values, const std::vector< long >& long_values ) { - kernel::manager< IOManager >().write( P_.record_to_, *this, event, double_values, long_values ); + kernel::manager< IOManager >.write( P_.record_to_, *this, event, double_values, long_values ); S_.n_events_++; } diff --git a/nestkernel/ring_buffer.cpp b/nestkernel/ring_buffer.cpp index b4b8cba504..36e3879cec 100644 --- a/nestkernel/ring_buffer.cpp +++ b/nestkernel/ring_buffer.cpp @@ -25,7 +25,7 @@ nest::RingBuffer::RingBuffer() : buffer_( - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(), 0.0 ) { } @@ -34,7 +34,7 @@ void nest::RingBuffer::resize() { size_t size = - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); @@ -52,7 +52,7 @@ nest::RingBuffer::clear() nest::MultRBuffer::MultRBuffer() : buffer_( - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(), 0.0 ) { } @@ -61,7 +61,7 @@ void nest::MultRBuffer::resize() { size_t size = - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); @@ -78,7 +78,7 @@ nest::MultRBuffer::clear() nest::ListRingBuffer::ListRingBuffer() : buffer_( - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay() ) + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay() ) { } @@ -86,7 +86,7 @@ void nest::ListRingBuffer::resize() { size_t size = - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size ); diff --git a/nestkernel/ring_buffer.h b/nestkernel/ring_buffer.h index 96c104a8e7..e48f7fbe7c 100644 --- a/nestkernel/ring_buffer.h +++ b/nestkernel/ring_buffer.h @@ -171,7 +171,7 @@ inline double RingBuffer::get_value( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >.get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -185,7 +185,7 @@ inline double RingBuffer::get_value_wfr_update( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >.get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -197,7 +197,7 @@ RingBuffer::get_value_wfr_update( const long offs ) inline size_t RingBuffer::get_index_( const long d ) const { - const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >.get_modulo( d ); assert( 0 <= idx ); assert( static_cast< size_t >( idx ) < buffer_.size() ); return idx; @@ -267,7 +267,7 @@ inline double MultRBuffer::get_value( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >.get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -280,7 +280,7 @@ MultRBuffer::get_value( const long offs ) inline size_t MultRBuffer::get_index_( const long d ) const { - const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >.get_modulo( d ); assert( 0 <= idx and static_cast< size_t >( idx ) < buffer_.size() ); return idx; } @@ -348,7 +348,7 @@ inline std::list< double >& ListRingBuffer::get_list( const long offs ) { assert( 0 <= offs and static_cast< size_t >( offs ) < buffer_.size() ); - assert( offs < kernel::manager< ConnectionManager >().get_min_delay() ); + assert( offs < kernel::manager< ConnectionManager >.get_min_delay() ); // offs == 0 is beginning of slice, but we have to // take modulo into account when indexing @@ -359,7 +359,7 @@ ListRingBuffer::get_list( const long offs ) inline size_t ListRingBuffer::get_index_( const long d ) const { - const long idx = kernel::manager< EventDeliveryManager >().get_modulo( d ); + const long idx = kernel::manager< EventDeliveryManager >.get_modulo( d ); assert( 0 <= idx ); assert( static_cast< size_t >( idx ) < buffer_.size() ); return idx; @@ -426,7 +426,7 @@ MultiChannelInputBuffer< num_channels >::size() const template < unsigned int num_channels > MultiChannelInputBuffer< num_channels >::MultiChannelInputBuffer() : buffer_( - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(), + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(), std::array< double, num_channels >() ) { } @@ -436,7 +436,7 @@ void MultiChannelInputBuffer< num_channels >::resize() { const size_t size = - kernel::manager< ConnectionManager >().get_min_delay() + kernel::manager< ConnectionManager >().get_max_delay(); + kernel::manager< ConnectionManager >.get_min_delay() + kernel::manager< ConnectionManager >.get_max_delay(); if ( buffer_.size() != size ) { buffer_.resize( size, std::array< double, num_channels >() ); diff --git a/nestkernel/secondary_event.h b/nestkernel/secondary_event.h index b6163fb5db..f7b340545b 100644 --- a/nestkernel/secondary_event.h +++ b/nestkernel/secondary_event.h @@ -476,7 +476,7 @@ template < typename DataType, typename Subclass > void DataSecondaryEvent< DataType, Subclass >::add_syn_id( const synindex synid ) { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); // This is done during connection model cloning, which happens thread-parallel. // To not risk trashing the set data structure, we let only master register the @@ -493,7 +493,7 @@ template < typename DataType, typename Subclass > void DataSecondaryEvent< DataType, Subclass >::set_coeff_length( const size_t coeff_length ) { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); coeff_length_ = coeff_length; } diff --git a/nestkernel/send_buffer_position.cpp b/nestkernel/send_buffer_position.cpp index 4c76e59d21..b34a34f3d6 100644 --- a/nestkernel/send_buffer_position.cpp +++ b/nestkernel/send_buffer_position.cpp @@ -26,12 +26,12 @@ #include "send_buffer_position.h" nest::SendBufferPosition::SendBufferPosition() - : begin_( kernel::manager< MPIManager >().get_num_processes(), 0 ) - , end_( kernel::manager< MPIManager >().get_num_processes(), 0 ) - , idx_( kernel::manager< MPIManager >().get_num_processes(), 0 ) + : begin_( kernel::manager< MPIManager >.get_num_processes(), 0 ) + , end_( kernel::manager< MPIManager >.get_num_processes(), 0 ) + , idx_( kernel::manager< MPIManager >.get_num_processes(), 0 ) { - const size_t num_procs = kernel::manager< MPIManager >().get_num_processes(); - const size_t send_recv_count_per_rank = kernel::manager< MPIManager >().get_send_recv_count_spike_data_per_rank(); + const size_t num_procs = kernel::manager< MPIManager >.get_num_processes(); + const size_t send_recv_count_per_rank = kernel::manager< MPIManager >.get_send_recv_count_spike_data_per_rank(); for ( size_t rank = 0; rank < num_procs; ++rank ) { diff --git a/nestkernel/simulation_manager.cpp b/nestkernel/simulation_manager.cpp index eb14b15959..8061d0f645 100644 --- a/nestkernel/simulation_manager.cpp +++ b/nestkernel/simulation_manager.cpp @@ -177,7 +177,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) from_step_ = 0; slice_ = 0; // clear all old spikes - kernel::manager< EventDeliveryManager >().configure_spike_data_buffers(); + kernel::manager< EventDeliveryManager >.configure_spike_data_buffers(); } } @@ -194,7 +194,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) if ( tics_per_ms_updated or res_updated ) { std::vector< std::string > errors; - if ( kernel::manager< NodeManager >().size() > 0 ) + if ( kernel::manager< NodeManager >.size() > 0 ) { errors.push_back( "Nodes have already been created" ); } @@ -202,7 +202,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) { errors.push_back( "Network has been simulated" ); } - if ( kernel::manager< ModelManager >().are_model_defaults_modified() ) + if ( kernel::manager< ModelManager >.are_model_defaults_modified() ) { errors.push_back( "Model defaults were modified" ); } @@ -241,8 +241,8 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) // adjust to new resolution clock_.calibrate(); // adjust delays in the connection system to new resolution - kernel::manager< ConnectionManager >().calibrate( time_converter ); - kernel::manager< ModelManager >().calibrate( time_converter ); + kernel::manager< ConnectionManager >.calibrate( time_converter ); + kernel::manager< ModelManager >.calibrate( time_converter ); std::string msg = String::compose( "Tics per ms and resolution changed from %1 tics and %2 ms to %3 tics and %4 ms.", @@ -278,8 +278,8 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) Time::set_resolution( resd ); clock_.calibrate(); // adjust to new resolution // adjust delays in the connection system to new resolution - kernel::manager< ConnectionManager >().calibrate( time_converter ); - kernel::manager< ModelManager >().calibrate( time_converter ); + kernel::manager< ConnectionManager >.calibrate( time_converter ); + kernel::manager< ModelManager >.calibrate( time_converter ); std::string msg = String::compose( "Temporal resolution changed from %1 to %2 ms.", old_res, resd ); LOG( M_INFO, "SimulationManager::set_status", msg ); @@ -305,7 +305,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) bool wfr; if ( updateValue< bool >( d, names::use_wfr, wfr ) ) { - if ( kernel::manager< NodeManager >().size() > 0 ) + if ( kernel::manager< NodeManager >.size() > 0 ) { LOG( M_ERROR, "SimulationManager::set_status", @@ -339,7 +339,7 @@ nest::SimulationManager::set_status( const DictionaryDatum& d ) "relaxation is disabled. Set use_wfr to true first." ); throw KernelException(); } - else if ( kernel::manager< ConnectionManager >().get_num_connections() != 0 ) + else if ( kernel::manager< ConnectionManager >.get_num_connections() != 0 ) { LOG( M_ERROR, "SimulationManager::set_status", @@ -507,7 +507,7 @@ nest::SimulationManager::get_status( DictionaryDatum& d ) void nest::SimulationManager::prepare() { - assert( kernel::manager< KernelManager >().is_initialized() ); + assert( kernel::manager< KernelManager >.is_initialized() ); if ( prepared_ ) { @@ -528,7 +528,7 @@ nest::SimulationManager::prepare() // reset profiling timers reset_timers_for_dynamics(); - kernel::manager< EventDeliveryManager >().reset_timers_for_dynamics(); + kernel::manager< EventDeliveryManager >.reset_timers_for_dynamics(); t_real_ = 0; t_slice_begin_ = timeval(); // set to timeval{0, 0} as unset flag @@ -536,39 +536,39 @@ nest::SimulationManager::prepare() // find shortest and longest delay across all MPI processes // this call sets the member variables - kernel::manager< ConnectionManager >().update_delay_extrema_(); - kernel::manager< EventDeliveryManager >().init_moduli(); + kernel::manager< ConnectionManager >.update_delay_extrema_(); + kernel::manager< EventDeliveryManager >.init_moduli(); // if at the beginning of a simulation, set up spike buffers if ( not simulated_ ) { - kernel::manager< EventDeliveryManager >().configure_spike_data_buffers(); + kernel::manager< EventDeliveryManager >.configure_spike_data_buffers(); } - kernel::manager< NodeManager >().ensure_valid_thread_local_ids(); - kernel::manager< NodeManager >().prepare_nodes(); + kernel::manager< NodeManager >.ensure_valid_thread_local_ids(); + kernel::manager< NodeManager >.prepare_nodes(); // we have to do enter_runtime after prepare_nodes, since we use // calibrate to map the ports of MUSIC devices, which has to be done // before enter_runtime if ( not simulated_ ) // only enter the runtime mode once { - double tick = Time::get_resolution().get_ms() * kernel::manager< ConnectionManager >().get_min_delay(); - kernel::manager< MUSICManager >().enter_runtime( tick ); + double tick = Time::get_resolution().get_ms() * kernel::manager< ConnectionManager >.get_min_delay(); + kernel::manager< MUSICManager >.enter_runtime( tick ); } prepared_ = true; // check whether waveform relaxation is used on any MPI process; // needs to be called before update_connection_intrastructure_since // it resizes coefficient arrays for secondary events - kernel::manager< NodeManager >().check_wfr_use(); + kernel::manager< NodeManager >.check_wfr_use(); - if ( kernel::manager< NodeManager >().have_nodes_changed() - or kernel::manager< ConnectionManager >().connections_have_changed() ) + if ( kernel::manager< NodeManager >.have_nodes_changed() + or kernel::manager< ConnectionManager >.connections_have_changed() ) { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); update_connection_infrastructure( tid ); } // of omp parallel } @@ -620,7 +620,7 @@ nest::SimulationManager::run( Time const& t ) { assert_valid_simtime( t ); - kernel::manager< RandomManager >().check_rng_synchrony(); + kernel::manager< RandomManager >.check_rng_synchrony(); if ( not prepared_ ) { @@ -637,10 +637,10 @@ nest::SimulationManager::run( Time const& t ) return; } - kernel::manager< IOManager >().pre_run_hook(); + kernel::manager< IOManager >.pre_run_hook(); // Reset local spike counters within event_delivery_manager - kernel::manager< EventDeliveryManager >().reset_counters(); + kernel::manager< EventDeliveryManager >.reset_counters(); sw_simulate_.start(); @@ -648,14 +648,14 @@ nest::SimulationManager::run( Time const& t ) // of a simulation, it has been reset properly elsewhere. If // a simulation was ended and is now continued, from_step_ will // have the proper value. to_step_ is set as in advance_time(). - to_step_ = std::min( from_step_ + to_do_, kernel::manager< ConnectionManager >().get_min_delay() ); + to_step_ = std::min( from_step_ + to_do_, kernel::manager< ConnectionManager >.get_min_delay() ); // Warn about possible inconsistencies, see #504. // This test cannot come any earlier, because we first need to compute // min_delay_ // above. - if ( t.get_steps() % kernel::manager< ConnectionManager >().get_min_delay() != 0 ) + if ( t.get_steps() % kernel::manager< ConnectionManager >.get_min_delay() != 0 ) { LOG( M_WARNING, "SimulationManager::run", @@ -669,8 +669,8 @@ nest::SimulationManager::run( Time const& t ) call_update_(); - kernel::manager< IOManager >().post_run_hook(); - kernel::manager< RandomManager >().check_rng_synchrony(); + kernel::manager< IOManager >.post_run_hook(); + kernel::manager< RandomManager >.check_rng_synchrony(); sw_simulate_.stop(); } @@ -691,30 +691,30 @@ nest::SimulationManager::cleanup() return; } - kernel::manager< NodeManager >().finalize_nodes(); + kernel::manager< NodeManager >.finalize_nodes(); prepared_ = false; } void nest::SimulationManager::call_update_() { - assert( kernel::manager< KernelManager >().is_initialized() and not inconsistent_state_ ); + assert( kernel::manager< KernelManager >.is_initialized() and not inconsistent_state_ ); std::ostringstream os; double t_sim = to_do_ * Time::get_resolution().get_ms(); - size_t num_active_nodes = kernel::manager< NodeManager >().get_num_active_nodes(); + size_t num_active_nodes = kernel::manager< NodeManager >.get_num_active_nodes(); os << "Number of local nodes: " << num_active_nodes << std::endl; os << "Simulation time (ms): " << t_sim; #ifdef _OPENMP - os << std::endl << "Number of OpenMP threads: " << kernel::manager< VPManager >().get_num_threads(); + os << std::endl << "Number of OpenMP threads: " << kernel::manager< VPManager >.get_num_threads(); #else os << std::endl << "Not using OpenMP"; #endif #ifdef HAVE_MPI - os << std::endl << "Number of MPI processes: " << kernel::manager< MPIManager >().get_num_processes(); + os << std::endl << "Number of MPI processes: " << kernel::manager< MPIManager >.get_num_processes(); #else os << std::endl << "Not using MPI"; #endif @@ -746,7 +746,7 @@ nest::SimulationManager::call_update_() std::cout << std::endl; } - kernel::manager< MPIManager >().synchronize(); + kernel::manager< MPIManager >.synchronize(); LOG( M_INFO, "SimulationManager::run", "Simulation finished." ); } @@ -760,10 +760,10 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) sw_communicate_prepare_.start(); - kernel::manager< ConnectionManager >().sort_connections( tid ); + kernel::manager< ConnectionManager >.sort_connections( tid ); sw_gather_target_data_.start(); - kernel::manager< ConnectionManager >().restructure_connection_tables( tid ); - kernel::manager< ConnectionManager >().collect_compressed_spike_data( tid ); + kernel::manager< ConnectionManager >.restructure_connection_tables( tid ); + kernel::manager< ConnectionManager >.collect_compressed_spike_data( tid ); sw_gather_target_data_.stop(); get_omp_synchronization_construction_stopwatch().start(); @@ -772,22 +772,22 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) #pragma omp single { - kernel::manager< ConnectionManager >().compute_target_data_buffer_size(); - kernel::manager< EventDeliveryManager >().resize_send_recv_buffers_target_data(); + kernel::manager< ConnectionManager >.compute_target_data_buffer_size(); + kernel::manager< EventDeliveryManager >.resize_send_recv_buffers_target_data(); // check whether primary and secondary connections exists on any // compute node - kernel::manager< ConnectionManager >().sync_has_primary_connections(); - kernel::manager< ConnectionManager >().check_secondary_connections_exist(); + kernel::manager< ConnectionManager >.sync_has_primary_connections(); + kernel::manager< ConnectionManager >.check_secondary_connections_exist(); } - if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >.secondary_connections_exist() ) { get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier get_omp_synchronization_construction_stopwatch().stop(); - kernel::manager< ConnectionManager >().compute_compressed_secondary_recv_buffer_positions( tid ); + kernel::manager< ConnectionManager >.compute_compressed_secondary_recv_buffer_positions( tid ); get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier @@ -795,8 +795,8 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) #pragma omp single { - kernel::manager< MPIManager >().communicate_recv_counts_secondary_events(); - kernel::manager< EventDeliveryManager >().configure_secondary_buffers(); + kernel::manager< MPIManager >.communicate_recv_counts_secondary_events(); + kernel::manager< EventDeliveryManager >.configure_secondary_buffers(); } } @@ -804,25 +804,25 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) // communicate connection information from postsynaptic to // presynaptic side - if ( kernel::manager< ConnectionManager >().use_compressed_spikes() ) + if ( kernel::manager< ConnectionManager >.use_compressed_spikes() ) { #pragma omp barrier #pragma omp single { - kernel::manager< ConnectionManager >().initialize_iteration_state(); // could possibly be combined with s'th above + kernel::manager< ConnectionManager >.initialize_iteration_state(); // could possibly be combined with s'th above } - kernel::manager< EventDeliveryManager >().gather_target_data_compressed( tid ); + kernel::manager< EventDeliveryManager >.gather_target_data_compressed( tid ); } else { - kernel::manager< EventDeliveryManager >().gather_target_data( tid ); + kernel::manager< EventDeliveryManager >.gather_target_data( tid ); } sw_gather_target_data_.stop(); - if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >.secondary_connections_exist() ) { - kernel::manager< ConnectionManager >().compress_secondary_send_buffer_pos( tid ); + kernel::manager< ConnectionManager >.compress_secondary_send_buffer_pos( tid ); } get_omp_synchronization_construction_stopwatch().start(); @@ -830,9 +830,9 @@ nest::SimulationManager::update_connection_infrastructure( const size_t tid ) get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { - kernel::manager< ConnectionManager >().clear_compressed_spike_data_map(); - kernel::manager< NodeManager >().set_have_nodes_changed( false ); - kernel::manager< ConnectionManager >().unset_connections_have_changed(); + kernel::manager< ConnectionManager >.clear_compressed_spike_data_map(); + kernel::manager< NodeManager >.set_have_nodes_changed( false ); + kernel::manager< ConnectionManager >.unset_connections_have_changed(); } sw_communicate_prepare_.stop(); } @@ -855,12 +855,12 @@ nest::SimulationManager::update_() bool update_time_limit_exceeded = false; std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); // parallel section begins #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); // We update in a parallel region. Therefore, we need to catch // exceptions here and then handle them after the parallel region. @@ -883,18 +883,18 @@ nest::SimulationManager::update_() // reach target neurons before spikes are propagated through eprop synapses. // This sequence safeguards the gradient computation from missing critical information // from the time step preceding the arrival of the spike triggering the weight update. - if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >.secondary_connections_exist() ) { sw_deliver_secondary_data_.start(); - kernel::manager< EventDeliveryManager >().deliver_secondary_events( tid, false ); + kernel::manager< EventDeliveryManager >.deliver_secondary_events( tid, false ); sw_deliver_secondary_data_.stop(); } - if ( kernel::manager< ConnectionManager >().has_primary_connections() ) + if ( kernel::manager< ConnectionManager >.has_primary_connections() ) { sw_deliver_spike_data_.start(); // Deliver spikes from receive buffer to ring buffers. - kernel::manager< EventDeliveryManager >().deliver_events( tid ); + kernel::manager< EventDeliveryManager >.deliver_events( tid ); sw_deliver_spike_data_.stop(); } @@ -919,11 +919,11 @@ nest::SimulationManager::update_() // MUSIC *before* MUSIC time is advanced if ( slice_ > 0 ) { - kernel::manager< MUSICManager >().advance_music_time(); + kernel::manager< MUSICManager >.advance_music_time(); } // the following could be made thread-safe - kernel::manager< MUSICManager >().update_music_event_handlers( clock_, from_step_, to_step_ ); + kernel::manager< MUSICManager >.update_music_event_handlers( clock_, from_step_, to_step_ ); } // end of master section, all threads have to synchronize at this point #pragma omp barrier @@ -933,8 +933,8 @@ nest::SimulationManager::update_() // preliminary update of nodes that use waveform relaxtion, only // necessary if secondary connections exist and any node uses // wfr - if ( kernel::manager< ConnectionManager >().secondary_connections_exist() - and kernel::manager< NodeManager >().wfr_is_used() ) + if ( kernel::manager< ConnectionManager >.secondary_connections_exist() + and kernel::manager< NodeManager >.wfr_is_used() ) { #pragma omp single { @@ -945,15 +945,15 @@ nest::SimulationManager::update_() // needs to be done in omp single since to_step_ is a scheduler // variable old_to_step = to_step_; - if ( to_step_ < kernel::manager< ConnectionManager >().get_min_delay() ) + if ( to_step_ < kernel::manager< ConnectionManager >.get_min_delay() ) { - to_step_ = kernel::manager< ConnectionManager >().get_min_delay(); + to_step_ = kernel::manager< ConnectionManager >.get_min_delay(); } } bool max_iterations_reached = true; const std::vector< Node* >& thread_local_wfr_nodes = - kernel::manager< NodeManager >().get_wfr_nodes_on_thread( tid ); + kernel::manager< NodeManager >.get_wfr_nodes_on_thread( tid ); for ( long n = 0; n < wfr_max_iterations_; ++n ) { bool done_p = true; @@ -988,7 +988,7 @@ nest::SimulationManager::update_() } // gather SecondaryEvents (e.g. GapJunctionEvents) - kernel::manager< EventDeliveryManager >().gather_secondary_events( done_all ); + kernel::manager< EventDeliveryManager >.gather_secondary_events( done_all ); // reset done and done_all //(needs to be in the single threaded part) @@ -998,7 +998,7 @@ nest::SimulationManager::update_() // deliver SecondaryEvents generated during wfr_update // returns the done value over all threads - done_p = kernel::manager< EventDeliveryManager >().deliver_secondary_events( tid, true ); + done_p = kernel::manager< EventDeliveryManager >.deliver_secondary_events( tid, true ); if ( done_p ) { @@ -1022,14 +1022,14 @@ nest::SimulationManager::update_() } // of if(wfr_is_used) // end of preliminary update - if ( kernel::manager< SPManager >().is_structural_plasticity_enabled() + if ( kernel::manager< SPManager >.is_structural_plasticity_enabled() and ( std::fmod( Time( Time::step( clock_.get_steps() + from_step_ ) ).get_ms(), - kernel::manager< SPManager >().get_structural_plasticity_update_interval() ) + kernel::manager< SPManager >.get_structural_plasticity_update_interval() ) == 0 ) ) { #pragma omp barrier - for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); - i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >.get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >.get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1040,11 +1040,11 @@ nest::SimulationManager::update_() get_omp_synchronization_simulation_stopwatch().stop(); #pragma omp single { - kernel::manager< SPManager >().update_structural_plasticity(); + kernel::manager< SPManager >.update_structural_plasticity(); } // Remove 10% of the vacant elements - for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); - i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >.get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >.get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1060,7 +1060,7 @@ nest::SimulationManager::update_() } // of structural plasticity sw_update_.start(); - const SparseNodeArray& thread_local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& thread_local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); for ( SparseNodeArray::const_iterator n = thread_local_nodes.begin(); n != thread_local_nodes.end(); ++n ) { @@ -1083,18 +1083,18 @@ nest::SimulationManager::update_() #pragma omp master { // gather and deliver only at end of slice, i.e., end of min_delay step - if ( to_step_ == kernel::manager< ConnectionManager >().get_min_delay() ) + if ( to_step_ == kernel::manager< ConnectionManager >.get_min_delay() ) { - if ( kernel::manager< ConnectionManager >().has_primary_connections() ) + if ( kernel::manager< ConnectionManager >.has_primary_connections() ) { sw_gather_spike_data_.start(); - kernel::manager< EventDeliveryManager >().gather_spike_data(); + kernel::manager< EventDeliveryManager >.gather_spike_data(); sw_gather_spike_data_.stop(); } - if ( kernel::manager< ConnectionManager >().secondary_connections_exist() ) + if ( kernel::manager< ConnectionManager >.secondary_connections_exist() ) { sw_gather_secondary_data_.start(); - kernel::manager< EventDeliveryManager >().gather_secondary_events( true ); + kernel::manager< EventDeliveryManager >.gather_secondary_events( true ); sw_gather_secondary_data_.stop(); } } @@ -1120,7 +1120,7 @@ nest::SimulationManager::update_() // if block to avoid omp barrier if SIONLIB is not used #ifdef HAVE_SIONLIB - kernel::manager< IOManager >().post_step_hook(); + kernel::manager< IOManager >.post_step_hook(); // enforce synchronization after post-step activities of the recording backends get_omp_synchronization_simulation_stopwatch().start(); #pragma omp barrier @@ -1138,8 +1138,8 @@ nest::SimulationManager::update_() } while ( to_do_ > 0 and not update_time_limit_exceeded and not exceptions_raised.at( tid ) ); // End of the slice, we update the number of synaptic elements - for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >().get_local_nodes( tid ).begin(); - i != kernel::manager< NodeManager >().get_local_nodes( tid ).end(); + for ( SparseNodeArray::const_iterator i = kernel::manager< NodeManager >.get_local_nodes( tid ).begin(); + i != kernel::manager< NodeManager >.get_local_nodes( tid ).end(); ++i ) { Node* node = i->get_node(); @@ -1160,7 +1160,7 @@ nest::SimulationManager::update_() } // check if any exceptions have been raised - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( exceptions_raised.at( tid ).get() ) { @@ -1178,11 +1178,11 @@ nest::SimulationManager::advance_time_() to_do_ -= to_step_ - from_step_; // advance clock, update modulos, slice counter only if slice completed - if ( to_step_ == kernel::manager< ConnectionManager >().get_min_delay() ) + if ( to_step_ == kernel::manager< ConnectionManager >.get_min_delay() ) { - clock_ += Time::step( kernel::manager< ConnectionManager >().get_min_delay() ); + clock_ += Time::step( kernel::manager< ConnectionManager >.get_min_delay() ); ++slice_; - kernel::manager< EventDeliveryManager >().update_moduli(); + kernel::manager< EventDeliveryManager >.update_moduli(); from_step_ = 0; } else @@ -1192,17 +1192,17 @@ nest::SimulationManager::advance_time_() long end_sim = from_step_ + to_do_; - if ( kernel::manager< ConnectionManager >().get_min_delay() < end_sim ) + if ( kernel::manager< ConnectionManager >.get_min_delay() < end_sim ) { // update to end of time slice - to_step_ = kernel::manager< ConnectionManager >().get_min_delay(); + to_step_ = kernel::manager< ConnectionManager >.get_min_delay(); } else { to_step_ = end_sim; // update to end of simulation time } - assert( to_step_ - from_step_ <= kernel::manager< ConnectionManager >().get_min_delay() ); + assert( to_step_ - from_step_ <= kernel::manager< ConnectionManager >.get_min_delay() ); } void @@ -1235,5 +1235,5 @@ nest::SimulationManager::print_progress_() nest::Time const nest::SimulationManager::get_previous_slice_origin() const { - return clock_ - Time::step( kernel::manager< ConnectionManager >().get_min_delay() ); + return clock_ - Time::step( kernel::manager< ConnectionManager >.get_min_delay() ); } diff --git a/nestkernel/slice_ring_buffer.cpp b/nestkernel/slice_ring_buffer.cpp index 7ed103b5d9..f4bda5ac46 100644 --- a/nestkernel/slice_ring_buffer.cpp +++ b/nestkernel/slice_ring_buffer.cpp @@ -39,9 +39,9 @@ void nest::SliceRingBuffer::resize() { long newsize = - static_cast< long >( std::ceil( static_cast< double >( kernel::manager< ConnectionManager >().get_min_delay() - + kernel::manager< ConnectionManager >().get_max_delay() ) - / kernel::manager< ConnectionManager >().get_min_delay() ) ); + static_cast< long >( std::ceil( static_cast< double >( kernel::manager< ConnectionManager >.get_min_delay() + + kernel::manager< ConnectionManager >.get_max_delay() ) + / kernel::manager< ConnectionManager >.get_min_delay() ) ); if ( queue_.size() != static_cast< unsigned long >( newsize ) ) { queue_.resize( newsize ); @@ -70,7 +70,7 @@ void nest::SliceRingBuffer::prepare_delivery() { // vector to deliver from in this slice - deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >().get_slice_modulo( 0 ) ] ); + deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >.get_slice_modulo( 0 ) ] ); // sort events, first event last std::sort( deliver_->begin(), deliver_->end(), std::greater< SpikeInfo >() ); @@ -80,7 +80,7 @@ void nest::SliceRingBuffer::discard_events() { // vector to deliver from in this slice - deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >().get_slice_modulo( 0 ) ] ); + deliver_ = &( queue_[ kernel::manager< EventDeliveryManager >.get_slice_modulo( 0 ) ] ); deliver_->clear(); } diff --git a/nestkernel/slice_ring_buffer.h b/nestkernel/slice_ring_buffer.h index 9576e57301..b4f6843d09 100644 --- a/nestkernel/slice_ring_buffer.h +++ b/nestkernel/slice_ring_buffer.h @@ -158,7 +158,7 @@ class SliceRingBuffer inline void SliceRingBuffer::add_spike( const long rel_delivery, const long stamp, const double ps_offset, const double weight ) { - const long idx = kernel::manager< EventDeliveryManager >().get_slice_modulo( rel_delivery ); + const long idx = kernel::manager< EventDeliveryManager >.get_slice_modulo( rel_delivery ); assert( static_cast< size_t >( idx ) < queue_.size() ); assert( ps_offset >= 0 ); diff --git a/nestkernel/sonata_connector.cpp b/nestkernel/sonata_connector.cpp index 8d7e4b7c97..7932f09ee8 100644 --- a/nestkernel/sonata_connector.cpp +++ b/nestkernel/sonata_connector.cpp @@ -404,7 +404,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off } std::vector< std::shared_ptr< WrappedThreadException > > exceptions_raised_( - kernel::manager< VPManager >().get_num_threads() ); + kernel::manager< VPManager >.get_num_threads() ); // Retrieve the correct NodeCollections const auto nest_nodes = getValue< DictionaryDatum >( graph_specs_->lookup( "nodes" ) ); @@ -441,7 +441,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off #pragma omp parallel { - const auto tid = kernel::manager< VPManager >().get_thread_id(); + const auto tid = kernel::manager< VPManager >.get_thread_id(); RngPtr rng = get_vp_specific_rng( tid ); try @@ -453,7 +453,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off const auto sonata_tgt_id = tgt_node_id_data_subset[ i ]; const size_t tnode_id = ( *( tnode_begin + sonata_tgt_id ) ).node_id; - if ( not kernel::manager< VPManager >().is_node_id_vp_local( tnode_id ) ) + if ( not kernel::manager< VPManager >.is_node_id_vp_local( tnode_id ) ) { continue; } @@ -461,7 +461,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off const auto sonata_src_id = src_node_id_data_subset[ i ]; const size_t snode_id = ( *( snode_begin + sonata_src_id ) ).node_id; - Node* target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id, tid ); + Node* target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id, tid ); const size_t target_thread = target->get_thread(); const auto edge_type_id = edge_type_id_data_subset[ i ]; @@ -472,7 +472,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off get_synapse_params_( snode_id, *target, target_thread, rng, edge_type_id ); - kernel::manager< ConnectionManager >().connect( snode_id, + kernel::manager< ConnectionManager >.connect( snode_id, target, target_thread, edge_type_id_2_syn_model_.at( edge_type_id ), @@ -492,7 +492,7 @@ SonataConnector::connect_chunk_( const hsize_t hyperslab_size, const hsize_t off } // end parallel region // Check if any exceptions have been raised - for ( size_t thr = 0; thr < kernel::manager< VPManager >().get_num_threads(); ++thr ) + for ( size_t thr = 0; thr < kernel::manager< VPManager >.get_num_threads(); ++thr ) { if ( exceptions_raised_.at( thr ).get() ) { @@ -574,7 +574,7 @@ SonataConnector::create_edge_type_id_2_syn_spec_( DictionaryDatum edge_params ) const auto syn_name = getValue< std::string >( ( *d )[ "synapse_model" ] ); // The following call will throw "UnknownSynapseType" if syn_name is not naming a known model - const size_t synapse_model_id = kernel::manager< ModelManager >().get_synapse_model_id( syn_name ); + const size_t synapse_model_id = kernel::manager< ModelManager >.get_synapse_model_id( syn_name ); set_synapse_params_( d, synapse_model_id, type_id ); edge_type_id_2_syn_model_[ type_id ] = synapse_model_id; @@ -584,7 +584,7 @@ SonataConnector::create_edge_type_id_2_syn_spec_( DictionaryDatum edge_params ) void SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_model_id, int type_id ) { - DictionaryDatum syn_defaults = kernel::manager< ModelManager >().get_connector_defaults( synapse_model_id ); + DictionaryDatum syn_defaults = kernel::manager< ModelManager >.get_connector_defaults( synapse_model_id ); ConnParameterMap synapse_params; for ( Dictionary::const_iterator default_it = syn_defaults->begin(); default_it != syn_defaults->end(); ++default_it ) @@ -599,13 +599,13 @@ SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_m { synapse_params[ param_name ] = std::shared_ptr< ConnParameter >( - ConnParameter::create( ( *syn_dict )[ param_name ], kernel::manager< VPManager >().get_num_threads() ) ); + ConnParameter::create( ( *syn_dict )[ param_name ], kernel::manager< VPManager >.get_num_threads() ) ); } } // Now create dictionary with dummy values that we will use to pass settings to the synapses created. We // create it here once to avoid re-creating the object over and over again. - edge_type_id_2_param_dicts_[ type_id ].resize( kernel::manager< VPManager >().get_num_threads(), nullptr ); + edge_type_id_2_param_dicts_[ type_id ].resize( kernel::manager< VPManager >.get_num_threads(), nullptr ); edge_type_id_2_syn_spec_[ type_id ] = synapse_params; // TODO: Once NEST is SLIless, the below loop over threads should be parallelizable. In order to parallelize, the @@ -613,7 +613,7 @@ SonataConnector::set_synapse_params_( DictionaryDatum syn_dict, size_t synapse_m // region. Currently, creation of NumericDatum objects is not thread-safe because sli::pool memory is a static // member variable; thus is also the new operator a static member function. // Note that this also applies to the equivalent loop in conn_builder.cpp - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { edge_type_id_2_param_dicts_[ type_id ][ tid ] = new Dictionary; diff --git a/nestkernel/source_table.cpp b/nestkernel/source_table.cpp index 38f6da87b5..ec81454064 100644 --- a/nestkernel/source_table.cpp +++ b/nestkernel/source_table.cpp @@ -41,7 +41,7 @@ void nest::SourceTable::initialize() { assert( sizeof( Source ) == 8 ); - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); sources_.resize( num_threads ); is_cleared_.initialize( num_threads, false ); saved_entry_point_.initialize( num_threads, false ); @@ -51,7 +51,7 @@ nest::SourceTable::initialize() #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); sources_.at( tid ).resize( 0 ); resize_sources(); compressible_sources_.at( tid ).resize( 0 ); @@ -93,7 +93,7 @@ nest::SourceTablePosition nest::SourceTable::find_maximal_position() const { SourceTablePosition max_position( -1, -1, -1 ); - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { if ( max_position < saved_positions_[ tid ] ) { @@ -152,7 +152,7 @@ nest::SourceTable::clean( const size_t tid ) size_t nest::SourceTable::get_node_id( const size_t tid, const synindex syn_id, const size_t lcid ) const { - if ( not kernel::manager< ConnectionManager >().get_keep_source_table() ) + if ( not kernel::manager< ConnectionManager >.get_keep_source_table() ) { throw KernelException( "Cannot use SourceTable::get_node_id when get_keep_source_table is false" ); } @@ -211,7 +211,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t // targets on the same process, but different threads for ( size_t syn_id = 0; syn_id < sources_[ tid ].size(); ++syn_id ) { - const ConnectorModel& conn_model = kernel::manager< ModelManager >().get_connection_model( syn_id, tid ); + const ConnectorModel& conn_model = kernel::manager< ModelManager >.get_connection_model( syn_id, tid ); const bool is_primary = conn_model.has_property( ConnectionModelProperties::IS_PRIMARY ); if ( not is_primary ) @@ -227,25 +227,25 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t } } } - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().start(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().start(); #pragma omp barrier - kernel::manager< SimulationManager >().get_omp_synchronization_construction_stopwatch().stop(); + kernel::manager< SimulationManager >.get_omp_synchronization_construction_stopwatch().stop(); #pragma omp single { // compute receive buffer positions for all unique pairs of source // node ID and synapse-type id on this MPI rank std::vector< int > recv_counts_secondary_events_in_int_per_rank( - kernel::manager< MPIManager >().get_num_processes(), 0 ); + kernel::manager< MPIManager >.get_num_processes(), 0 ); for ( std::set< std::pair< size_t, size_t > >::const_iterator cit = ( *unique_secondary_source_node_id_syn_id ).begin(); cit != ( *unique_secondary_source_node_id_syn_id ).end(); ++cit ) { - const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( cit->first ); + const size_t source_rank = kernel::manager< MPIManager >.get_process_id_of_node_id( cit->first ); const size_t event_size = - kernel::manager< ModelManager >().get_secondary_event_prototype( cit->second, tid ).size(); + kernel::manager< ModelManager >.get_secondary_event_prototype( cit->second, tid ).size(); buffer_pos_of_source_node_id_syn_id.insert( std::make_pair( pack_source_node_id_and_syn_id( cit->first, cit->second ), @@ -261,7 +261,7 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t ++recv_count; } - kernel::manager< MPIManager >().set_recv_counts_secondary_events_in_int_per_rank( + kernel::manager< MPIManager >.set_recv_counts_secondary_events_in_int_per_rank( recv_counts_secondary_events_in_int_per_rank ); delete unique_secondary_source_node_id_syn_id; } // of omp single @@ -270,9 +270,9 @@ nest::SourceTable::compute_buffer_pos_for_unique_secondary_sources( const size_t void nest::SourceTable::resize_sources() { - kernel::manager< VPManager >().assert_thread_parallel(); - sources_.at( kernel::manager< VPManager >().get_thread_id() ) - .resize( kernel::manager< ModelManager >().get_num_connection_models() ); + kernel::manager< VPManager >.assert_thread_parallel(); + sources_.at( kernel::manager< VPManager >.get_thread_id() ) + .resize( kernel::manager< ModelManager >.get_num_connection_models() ); } bool @@ -280,7 +280,7 @@ nest::SourceTable::source_should_be_processed_( const size_t rank_start, const size_t rank_end, const Source& source ) const { - const size_t source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( source.get_node_id() ); + const size_t source_rank = kernel::manager< MPIManager >.get_process_id_of_node_id( source.get_node_id() ); return not( source.is_processed() or source.is_disabled() @@ -321,14 +321,14 @@ nest::SourceTable::populate_target_data_fields_( const SourceTablePosition& curr const size_t source_rank, TargetData& next_target_data ) const { - assert( not kernel::manager< ConnectionManager >().use_compressed_spikes() ); // handled elsewhere + assert( not kernel::manager< ConnectionManager >.use_compressed_spikes() ); // handled elsewhere const auto node_id = current_source.get_node_id(); // set values of next_target_data - next_target_data.set_source_lid( kernel::manager< VPManager >().node_id_to_lid( node_id ) ); + next_target_data.set_source_lid( kernel::manager< VPManager >.node_id_to_lid( node_id ) ); next_target_data.set_source_tid( - kernel::manager< VPManager >().vp_to_thread( kernel::manager< VPManager >().node_id_to_vp( node_id ) ) ); + kernel::manager< VPManager >.vp_to_thread( kernel::manager< VPManager >.node_id_to_vp( node_id ) ) ); next_target_data.reset_marker(); if ( current_source.is_primary() ) // primary connection, i.e., chemical synapses @@ -349,9 +349,9 @@ nest::SourceTable::populate_target_data_fields_( const SourceTablePosition& curr // the source rank will write to the buffer position relative to // the first position from the absolute position in the receive // buffer - const size_t relative_recv_buffer_pos = kernel::manager< ConnectionManager >().get_secondary_recv_buffer_position( + const size_t relative_recv_buffer_pos = kernel::manager< ConnectionManager >.get_secondary_recv_buffer_position( current_position.tid, current_position.syn_id, current_position.lcid ) - - kernel::manager< MPIManager >().get_recv_displacement_secondary_events_in_int( source_rank ); + - kernel::manager< MPIManager >.get_recv_displacement_secondary_events_in_int( source_rank ); SecondaryTargetDataFields& secondary_fields = next_target_data.secondary_data; secondary_fields.set_recv_buffer_pos( relative_recv_buffer_pos ); @@ -396,7 +396,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // we need to set a marker stating whether the entry following this // entry, if existent, has the same source - kernel::manager< ConnectionManager >().set_source_has_more_targets( current_position.tid, + kernel::manager< ConnectionManager >.set_source_has_more_targets( current_position.tid, current_position.syn_id, current_position.lcid, next_entry_has_same_source_( current_position, current_source ) ); @@ -413,7 +413,7 @@ nest::SourceTable::get_next_target_data( const size_t tid, // communicated via MPI, so we prepare to return the relevant data // set the source rank - source_rank = kernel::manager< MPIManager >().get_process_id_of_node_id( current_source.get_node_id() ); + source_rank = kernel::manager< MPIManager >.get_process_id_of_node_id( current_source.get_node_id() ); if ( not populate_target_data_fields_( current_position, current_source, source_rank, next_target_data ) ) { @@ -436,7 +436,7 @@ nest::SourceTable::resize_compressible_sources() { compressible_sources_[ tid ].clear(); compressible_sources_[ tid ].resize( - kernel::manager< ModelManager >().get_num_connection_models(), std::map< size_t, SpikeData >() ); + kernel::manager< ModelManager >.get_num_connection_models(), std::map< size_t, SpikeData >() ); } } @@ -459,13 +459,13 @@ nest::SourceTable::collect_compressible_sources( const size_t tid ) ++lcid; while ( ( lcid < syn_sources.size() ) and ( syn_sources[ lcid ].get_node_id() == old_source_node_id ) ) { - kernel::manager< ConnectionManager >().set_source_has_more_targets( tid, syn_id, lcid - 1, true ); + kernel::manager< ConnectionManager >.set_source_has_more_targets( tid, syn_id, lcid - 1, true ); ++lcid; } // Mark last connection in sequence as not having successor. This is essential if connections are // delete, e.g., by structural plasticity, because we do not globally reset the more_targets flag. assert( lcid - 1 < syn_sources.size() ); - kernel::manager< ConnectionManager >().set_source_has_more_targets( tid, syn_id, lcid - 1, false ); + kernel::manager< ConnectionManager >.set_source_has_more_targets( tid, syn_id, lcid - 1, false ); } } } @@ -478,11 +478,11 @@ nest::SourceTable::dump_sources() const { for ( size_t lcid = 0; lcid < sources_[ tid ][ syn_id ].size(); ++lcid ) { - kernel::manager< KernelManager >().write_to_dump( String::compose( "src : r%1 t%2 s%3 tg%4 l%5 tt%6", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< KernelManager >.write_to_dump( String::compose( "src : r%1 t%2 s%3 tg%4 l%5 tt%6", + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), sources_[ tid ][ syn_id ][ lcid ].get_node_id(), - kernel::manager< ConnectionManager >().get_target_node_id( tid, syn_id, lcid ), + kernel::manager< ConnectionManager >.get_target_node_id( tid, syn_id, lcid ), lcid, tid ) ); } @@ -499,9 +499,9 @@ nest::SourceTable::dump_compressible_sources() const { for ( const auto& entry : compressible_sources_[ tid ][ syn_id ] ) { - kernel::manager< KernelManager >().write_to_dump( String::compose( "csrc : r%1 t%2 s%3 l%4 tt%5", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< KernelManager >.write_to_dump( String::compose( "csrc : r%1 t%2 s%3 l%4 tt%5", + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), entry.first, entry.second.get_lcid(), entry.second.get_tid() ) ); @@ -514,7 +514,7 @@ void nest::SourceTable::fill_compressed_spike_data( std::vector< std::vector< std::vector< SpikeData > > >& compressed_spike_data ) { - const size_t num_synapse_models = kernel::manager< ModelManager >().get_num_connection_models(); + const size_t num_synapse_models = kernel::manager< ModelManager >.get_num_connection_models(); compressed_spike_data.clear(); compressed_spike_data.resize( num_synapse_models ); compressed_spike_data_map_.clear(); @@ -527,7 +527,7 @@ nest::SourceTable::fill_compressed_spike_data( // TODO: I believe that at this point compressible_sources_ is ordered by source gid. // Maybe one can exploit that to avoid searching with find() below. - for ( synindex syn_id = 0; syn_id < kernel::manager< ModelManager >().get_num_connection_models(); ++syn_id ) + for ( synindex syn_id = 0; syn_id < kernel::manager< ModelManager >.get_num_connection_models(); ++syn_id ) { for ( size_t target_thread = 0; target_thread < static_cast< size_t >( compressible_sources_.size() ); ++target_thread ) @@ -541,7 +541,7 @@ nest::SourceTable::fill_compressed_spike_data( // Set up entry for new source const auto new_source_index = compressed_spike_data[ syn_id ].size(); - compressed_spike_data[ syn_id ].emplace_back( kernel::manager< VPManager >().get_num_threads(), + compressed_spike_data[ syn_id ].emplace_back( kernel::manager< VPManager >.get_num_threads(), SpikeData( invalid_targetindex, invalid_synindex, invalid_lcid, 0 ) ); compressed_spike_data_map_[ syn_id ].insert( @@ -571,9 +571,9 @@ nest::SourceTable::dump_compressed_spike_data( : compressed_spike_data_map_ ) { for ( const auto& entry : tab ) { - kernel::manager< KernelManager >().write_to_dump( String::compose( "csdm : r%1 t%2 s%3 sx%4 tt%5", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< KernelManager >.write_to_dump( String::compose( "csdm : r%1 t%2 s%3 sx%4 tt%5", + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), entry.first, entry.second.get_source_index(), entry.second.get_target_thread() ) ); @@ -586,9 +586,9 @@ nest::SourceTable::dump_compressed_spike_data( { for ( size_t tx = 0; tx < tab[ six ].size(); ++tx ) { - kernel::manager< KernelManager >().write_to_dump( String::compose( "csd : r%1 t%2 six%3 tx%4 l%5 tt%6", - kernel::manager< MPIManager >().get_rank(), - kernel::manager< VPManager >().get_thread_id(), + kernel::manager< KernelManager >.write_to_dump( String::compose( "csd : r%1 t%2 six%3 tx%4 l%5 tt%6", + kernel::manager< MPIManager >.get_rank(), + kernel::manager< VPManager >.get_thread_id(), six, tx, tab[ six ][ tx ].get_lcid(), diff --git a/nestkernel/sp_manager.cpp b/nestkernel/sp_manager.cpp index 8522fe7fa7..e80f0e7af4 100644 --- a/nestkernel/sp_manager.cpp +++ b/nestkernel/sp_manager.cpp @@ -102,7 +102,7 @@ SPManager::get_status( DictionaryDatum& d ) def< std::string >( sp_synapse, names::pre_synaptic_element, ( *i )->get_pre_synaptic_element_name() ); def< std::string >( sp_synapse, names::post_synaptic_element, ( *i )->get_post_synaptic_element_name() ); const std::string model = - kernel::manager< ModelManager >().get_connection_model( ( *i )->get_synapse_model(), 0 ).get_name(); + kernel::manager< ModelManager >.get_connection_model( ( *i )->get_synapse_model(), 0 ).get_name(); def< std::string >( sp_synapse, names::synapse_model, model ); def< bool >( sp_synapse, names::allow_autapses, ( *i )->allows_autapses() ); def< bool >( sp_synapse, names::allow_multapses, ( *i )->allows_multapses() ); @@ -164,7 +164,7 @@ SPManager::set_status( const DictionaryDatum& d ) // check that the user defined the min and max delay properly, if the // default delay is not used. if ( not conn_builder->get_default_delay() - and not kernel::manager< ConnectionManager >().get_user_set_delay_extrema() ) + and not kernel::manager< ConnectionManager >.get_user_set_delay_extrema() ) { throw BadProperty( "Structural Plasticity: to use different delays for synapses you must " @@ -205,11 +205,11 @@ SPManager::builder_max_delay() const void SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread, const size_t syn_id ) { - Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); + Node* const source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id ); // normal nodes and devices with proxies if ( target->has_proxies() ) { - kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } else if ( target->local_receiver() ) // normal devices { @@ -220,10 +220,10 @@ SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread if ( ( source->get_thread() != target_thread ) and ( source->has_proxies() ) ) { target_thread = source->get_thread(); - target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), target_thread ); + target = kernel::manager< NodeManager >.get_node_or_proxy( target->get_node_id(), target_thread ); } - kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } else // globally receiving devices iterate over all target threads { @@ -232,12 +232,12 @@ SPManager::disconnect( const size_t snode_id, Node* target, size_t target_thread { return; } - const size_t n_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t n_threads = kernel::manager< VPManager >.get_num_threads(); for ( size_t t = 0; t < n_threads; t++ ) { - target = kernel::manager< NodeManager >().get_node_or_proxy( target->get_node_id(), t ); + target = kernel::manager< NodeManager >.get_node_or_proxy( target->get_node_id(), t ); target_thread = target->get_thread(); - kernel::manager< ConnectionManager >().disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); + kernel::manager< ConnectionManager >.disconnect( target_thread, syn_id, snode_id, target->get_node_id() ); } } } @@ -248,12 +248,12 @@ SPManager::disconnect( NodeCollectionPTR sources, DictionaryDatum& conn_spec, DictionaryDatum& syn_spec ) { - if ( kernel::manager< ConnectionManager >().connections_have_changed() ) + if ( kernel::manager< ConnectionManager >.connections_have_changed() ) { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - kernel::manager< SimulationManager >().update_connection_infrastructure( tid ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + kernel::manager< SimulationManager >.update_connection_infrastructure( tid ); } } @@ -267,7 +267,7 @@ SPManager::disconnect( NodeCollectionPTR sources, } const std::string rule_name = ( *conn_spec )[ names::rule ]; - if ( not kernel::manager< ConnectionManager >().valid_connection_rule( rule_name ) ) + if ( not kernel::manager< ConnectionManager >.valid_connection_rule( rule_name ) ) { throw BadProperty( "Unknown connectivity rule: " + rule_name ); } @@ -278,9 +278,9 @@ SPManager::disconnect( NodeCollectionPTR sources, for ( std::vector< SPBuilder* >::const_iterator i = sp_conn_builders_.begin(); i != sp_conn_builders_.end(); i++ ) { std::string synModel = getValue< std::string >( syn_spec, names::synapse_model ); - if ( ( *i )->get_synapse_model() == kernel::manager< ModelManager >().get_synapse_model_id( synModel ) ) + if ( ( *i )->get_synapse_model() == kernel::manager< ModelManager >.get_synapse_model_id( synModel ) ) { - cb = kernel::manager< ConnectionManager >().get_conn_builder( rule_name, + cb = kernel::manager< ConnectionManager >.get_conn_builder( rule_name, sources, targets, /* third_out */ nullptr, @@ -293,7 +293,7 @@ SPManager::disconnect( NodeCollectionPTR sources, } else { - cb = kernel::manager< ConnectionManager >().get_conn_builder( rule_name, + cb = kernel::manager< ConnectionManager >.get_conn_builder( rule_name, sources, targets, /* third_out */ nullptr, @@ -307,7 +307,7 @@ SPManager::disconnect( NodeCollectionPTR sources, ALL_ENTRIES_ACCESSED( *syn_spec, "Connect", "Unread dictionary entries: " ); // Set flag before calling cb->disconnect() in case exception is thrown after some connections have been removed. - kernel::manager< ConnectionManager >().set_connections_have_changed(); + kernel::manager< ConnectionManager >.set_connections_have_changed(); cb->disconnect(); delete cb; @@ -349,8 +349,8 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) sp_builder->get_pre_synaptic_element_name(), pre_vacant_id, pre_vacant_n, pre_deleted_id, pre_deleted_n ); // Communicate the number of deleted pre-synaptic elements - kernel::manager< MPIManager >().communicate( pre_deleted_id, pre_deleted_id_global, displacements ); - kernel::manager< MPIManager >().communicate( pre_deleted_n, pre_deleted_n_global, displacements ); + kernel::manager< MPIManager >.communicate( pre_deleted_id, pre_deleted_id_global, displacements ); + kernel::manager< MPIManager >.communicate( pre_deleted_n, pre_deleted_n_global, displacements ); if ( pre_deleted_id_global.size() > 0 ) { @@ -367,8 +367,8 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) get_synaptic_elements( sp_builder->get_post_synaptic_element_name(), post_vacant_id, post_vacant_n, post_deleted_id, post_deleted_n ); // Communicate the number of deleted postsynaptic elements - kernel::manager< MPIManager >().communicate( post_deleted_id, post_deleted_id_global, displacements ); - kernel::manager< MPIManager >().communicate( post_deleted_n, post_deleted_n_global, displacements ); + kernel::manager< MPIManager >.communicate( post_deleted_id, post_deleted_id_global, displacements ); + kernel::manager< MPIManager >.communicate( post_deleted_n, post_deleted_n_global, displacements ); if ( post_deleted_id_global.size() > 0 ) { @@ -384,10 +384,10 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) } // Communicate vacant elements - kernel::manager< MPIManager >().communicate( pre_vacant_id, pre_vacant_id_global, displacements ); - kernel::manager< MPIManager >().communicate( pre_vacant_n, pre_vacant_n_global, displacements ); - kernel::manager< MPIManager >().communicate( post_vacant_id, post_vacant_id_global, displacements ); - kernel::manager< MPIManager >().communicate( post_vacant_n, post_vacant_n_global, displacements ); + kernel::manager< MPIManager >.communicate( pre_vacant_id, pre_vacant_id_global, displacements ); + kernel::manager< MPIManager >.communicate( pre_vacant_n, pre_vacant_n_global, displacements ); + kernel::manager< MPIManager >.communicate( post_vacant_id, post_vacant_id_global, displacements ); + kernel::manager< MPIManager >.communicate( post_vacant_n, post_vacant_n_global, displacements ); bool synapses_created = false; if ( pre_vacant_id_global.size() > 0 and post_vacant_id_global.size() > 0 ) @@ -397,7 +397,7 @@ SPManager::update_structural_plasticity( SPBuilder* sp_builder ) } if ( synapses_created or post_deleted_id.size() > 0 or pre_deleted_id.size() > 0 ) { - kernel::manager< ConnectionManager >().set_connections_have_changed(); + kernel::manager< ConnectionManager >.set_connections_have_changed(); } } @@ -457,7 +457,7 @@ SPManager::delete_synapses_from_pre( const std::vector< size_t >& pre_deleted_id std::vector< size_t >::const_iterator id_it; std::vector< int >::iterator n_it; - kernel::manager< ConnectionManager >().get_targets( pre_deleted_id, synapse_model, se_post_name, connectivity ); + kernel::manager< ConnectionManager >.get_targets( pre_deleted_id, synapse_model, se_post_name, connectivity ); id_it = pre_deleted_id.begin(); n_it = pre_deleted_n.begin(); @@ -465,7 +465,7 @@ SPManager::delete_synapses_from_pre( const std::vector< size_t >& pre_deleted_id for ( ; id_it != pre_deleted_id.end() and n_it != pre_deleted_n.end(); id_it++, n_it++, connectivity_it++ ) { // Communicate the list of targets - kernel::manager< MPIManager >().communicate( *connectivity_it, global_targets, displacements ); + kernel::manager< MPIManager >.communicate( *connectivity_it, global_targets, displacements ); // shuffle only the first n items, n is the number of deleted synaptic // elements if ( -( *n_it ) > static_cast< int >( global_targets.size() ) ) @@ -489,10 +489,10 @@ SPManager::delete_synapse( const size_t snode_id, const std::string se_post_name ) { // get thread id - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - if ( kernel::manager< NodeManager >().is_local_node_id( snode_id ) ) + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + if ( kernel::manager< NodeManager >.is_local_node_id( snode_id ) ) { - Node* const source = kernel::manager< NodeManager >().get_node_or_proxy( snode_id ); + Node* const source = kernel::manager< NodeManager >.get_node_or_proxy( snode_id ); const size_t source_thread = source->get_thread(); if ( tid == source_thread ) { @@ -500,13 +500,13 @@ SPManager::delete_synapse( const size_t snode_id, } } - if ( kernel::manager< NodeManager >().is_local_node_id( tnode_id ) ) + if ( kernel::manager< NodeManager >.is_local_node_id( tnode_id ) ) { - Node* const target = kernel::manager< NodeManager >().get_node_or_proxy( tnode_id ); + Node* const target = kernel::manager< NodeManager >.get_node_or_proxy( tnode_id ); const size_t target_thread = target->get_thread(); if ( tid == target_thread ) { - kernel::manager< ConnectionManager >().disconnect( tid, syn_id, snode_id, tnode_id ); + kernel::manager< ConnectionManager >.disconnect( tid, syn_id, snode_id, tnode_id ); target->connect_synaptic_element( se_post_name, -1 ); } @@ -535,7 +535,7 @@ SPManager::delete_synapses_from_post( std::vector< size_t >& post_deleted_id, std::vector< int >::iterator n_it; // Retrieve the connected sources - kernel::manager< ConnectionManager >().get_sources( post_deleted_id, synapse_model, connectivity ); + kernel::manager< ConnectionManager >.get_sources( post_deleted_id, synapse_model, connectivity ); id_it = post_deleted_id.begin(); n_it = post_deleted_n.begin(); @@ -544,7 +544,7 @@ SPManager::delete_synapses_from_post( std::vector< size_t >& post_deleted_id, for ( ; id_it != post_deleted_id.end() and n_it != post_deleted_n.end(); id_it++, n_it++, connectivity_it++ ) { // Communicate the list of sources - kernel::manager< MPIManager >().communicate( *connectivity_it, global_sources, displacements ); + kernel::manager< MPIManager >.communicate( *connectivity_it, global_sources, displacements ); // shuffle only the first n items, n is the number of deleted synaptic // elements if ( -( *n_it ) > static_cast< int >( global_sources.size() ) ) @@ -572,7 +572,7 @@ nest::SPManager::get_synaptic_elements( std::string se_name, size_t n_deleted_id = 0; size_t node_id; int n; - size_t n_nodes = kernel::manager< NodeManager >().size(); + size_t n_nodes = kernel::manager< NodeManager >.size(); se_vacant_id.clear(); se_vacant_n.clear(); se_deleted_id.clear(); @@ -589,9 +589,9 @@ nest::SPManager::get_synaptic_elements( std::string se_name, std::vector< int >::iterator deleted_n_it = se_deleted_n.begin(); SparseNodeArray::const_iterator node_it; - for ( size_t tid = 0; tid < kernel::manager< VPManager >().get_num_threads(); ++tid ) + for ( size_t tid = 0; tid < kernel::manager< VPManager >.get_num_threads(); ++tid ) { - const SparseNodeArray& local_nodes = kernel::manager< NodeManager >().get_local_nodes( tid ); + const SparseNodeArray& local_nodes = kernel::manager< NodeManager >.get_local_nodes( tid ); SparseNodeArray::const_iterator node_it; for ( node_it = local_nodes.begin(); node_it < local_nodes.end(); node_it++ ) { @@ -674,17 +674,17 @@ nest::SPManager::global_shuffle( std::vector< size_t >& v, size_t n ) void nest::SPManager::enable_structural_plasticity() { - if ( kernel::manager< VPManager >().get_num_threads() > 1 ) + if ( kernel::manager< VPManager >.get_num_threads() > 1 ) { throw KernelException( "Structural plasticity can not be used with multiple threads" ); } - if ( not kernel::manager< ConnectionManager >().get_keep_source_table() ) + if ( not kernel::manager< ConnectionManager >.get_keep_source_table() ) { throw KernelException( "Structural plasticity can not be enabled if keep_source_table has been " "set to false." ); } - if ( not kernel::manager< ConnectionManager >().use_compressed_spikes() ) + if ( not kernel::manager< ConnectionManager >.use_compressed_spikes() ) { throw KernelException( "Structural plasticity can not be enabled if use_compressed_spikes " diff --git a/nestkernel/sparse_node_array.cpp b/nestkernel/sparse_node_array.cpp index 1b8a459793..5307e2d017 100644 --- a/nestkernel/sparse_node_array.cpp +++ b/nestkernel/sparse_node_array.cpp @@ -86,8 +86,7 @@ nest::SparseNodeArray::add_local_node( Node& node ) left_side_has_proxies_ = node.has_proxies(); // we now know which scale applies on which side of the split - const double proxy_scale = - 1.0 / static_cast< double >( kernel::manager< VPManager >().get_num_virtual_processes() ); + const double proxy_scale = 1.0 / static_cast< double >( kernel::manager< VPManager >.get_num_virtual_processes() ); if ( left_side_has_proxies_ ) { left_scale_ = proxy_scale; diff --git a/nestkernel/spatial.cpp b/nestkernel/spatial.cpp index 4dd27c6fbe..02a1ac9167 100644 --- a/nestkernel/spatial.cpp +++ b/nestkernel/spatial.cpp @@ -92,7 +92,7 @@ get_position( NodeCollectionPTR layer_nc ) { size_t node_id = ( *it ).node_id; - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "GetPosition is currently implemented for local nodes only." ); } @@ -109,12 +109,12 @@ get_position( NodeCollectionPTR layer_nc ) std::vector< double > get_position( const size_t node_id ) { - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "GetPosition is currently implemented for local nodes only." ); } - NodeCollectionPTR nc = kernel::manager< NodeManager >().node_id_to_node_collection( node_id ); + NodeCollectionPTR nc = kernel::manager< NodeManager >.node_id_to_node_collection( node_id ); NodeCollectionMetadataPTR meta = nc->get_metadata(); if ( not meta ) @@ -149,7 +149,7 @@ displacement( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) if ( layer_from_nc->size() == 1 ) { size_t node_id = layer_from_nc->operator[]( 0 ); - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -168,7 +168,7 @@ displacement( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) for ( NodeCollection::const_iterator it = layer_from_nc->begin(); it < layer_from_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -203,7 +203,7 @@ displacement( NodeCollectionPTR layer_nc, const ArrayDatum point ) for ( NodeCollection::const_iterator it = layer_nc->begin(); it != layer_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Displacement is currently implemented for local nodes only." ); } @@ -241,7 +241,7 @@ distance( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) if ( layer_from_nc->size() == 1 ) { size_t node_id = layer_from_nc->operator[]( 0 ); - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -260,7 +260,7 @@ distance( NodeCollectionPTR layer_to_nc, NodeCollectionPTR layer_from_nc ) for ( NodeCollection::const_iterator it = layer_from_nc->begin(); it < layer_from_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -295,7 +295,7 @@ distance( NodeCollectionPTR layer_nc, const ArrayDatum point ) for ( NodeCollection::const_iterator it = layer_nc->begin(); it < layer_nc->end(); ++it ) { size_t node_id = ( *it ).node_id; - if ( not kernel::manager< NodeManager >().is_local_node_id( node_id ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( node_id ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } @@ -333,13 +333,13 @@ distance( const ArrayDatum conns ) size_t trgt = conn_id.get_target_node_id(); - if ( not kernel::manager< NodeManager >().is_local_node_id( trgt ) ) + if ( not kernel::manager< NodeManager >.is_local_node_id( trgt ) ) { throw KernelException( "Distance is currently implemented for local nodes only." ); } - NodeCollectionPTR trgt_nc = kernel::manager< NodeManager >().node_id_to_node_collection( trgt ); + NodeCollectionPTR trgt_nc = kernel::manager< NodeManager >.node_id_to_node_collection( trgt ); NodeCollectionMetadataPTR meta = trgt_nc->get_metadata(); // distance is NaN if source, target is not spatially distributed @@ -406,7 +406,7 @@ connect_layers( NodeCollectionPTR source_nc, NodeCollectionPTR target_nc, const ALL_ENTRIES_ACCESSED( *connection_dict, "nest::CreateLayers", "Unread dictionary entries: " ); // Set flag before calling source->connect() in case exception is thrown after some connections have been created. - kernel::manager< ConnectionManager >().set_connections_have_changed(); + kernel::manager< ConnectionManager >.set_connections_have_changed(); source->connect( source_nc, target, target_nc, connector ); } diff --git a/nestkernel/stimulation_backend_mpi.cpp b/nestkernel/stimulation_backend_mpi.cpp index 7fd4469198..1d1ff10242 100644 --- a/nestkernel/stimulation_backend_mpi.cpp +++ b/nestkernel/stimulation_backend_mpi.cpp @@ -49,7 +49,7 @@ nest::StimulationBackendMPI::~StimulationBackendMPI() noexcept void nest::StimulationBackendMPI::initialize() { - auto nthreads = kernel::manager< VPManager >().get_num_threads(); + auto nthreads = kernel::manager< VPManager >.get_num_threads(); device_map devices( nthreads ); devices_.swap( devices ); } @@ -117,7 +117,7 @@ nest::StimulationBackendMPI::prepare() } // need to be run only by the master thread : it is the case because this part is not running in parallel - size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >.get_thread_id(); // Create the connection with MPI // 1) take all the ports of the connections. Get port and update the list of device only for master for ( auto& it_device : devices_[ thread_id_master ] ) @@ -132,7 +132,7 @@ nest::StimulationBackendMPI::prepare() // it's not a new communicator comm = std::get< 0 >( comm_it->second ); // add the id of the device if there are a connection with the device. - if ( kernel::manager< ConnectionManager >().get_device_connected( + if ( kernel::manager< ConnectionManager >.get_device_connected( thread_id_master, it_device.second.second->get_local_device_id() ) ) { std::get< 1 >( comm_it->second )->push_back( it_device.second.second->get_node_id() ); @@ -147,11 +147,11 @@ nest::StimulationBackendMPI::prepare() comm = new MPI_Comm; auto vector_id_device = new std::vector< int >; // vector of ID device for the rank int* vector_nb_device_th { - new int[ kernel::manager< VPManager >().get_num_threads() ] {} + new int[ kernel::manager< VPManager >.get_num_threads() ] {} }; // number of device by thread - std::fill_n( vector_nb_device_th, kernel::manager< VPManager >().get_num_threads(), 0 ); + std::fill_n( vector_nb_device_th, kernel::manager< VPManager >.get_num_threads(), 0 ); // add the id of the device if there is a connection with the device. - if ( kernel::manager< ConnectionManager >().get_device_connected( + if ( kernel::manager< ConnectionManager >.get_device_connected( thread_id_master, it_device.second.second->get_local_device_id() ) ) { vector_id_device->push_back( it_device.second.second->get_node_id() ); @@ -165,7 +165,7 @@ nest::StimulationBackendMPI::prepare() } // Add the id of device of the other thread in the vector_id_device and update the count of all device - for ( size_t id_thread = 0; id_thread < kernel::manager< VPManager >().get_num_threads(); id_thread++ ) + for ( size_t id_thread = 0; id_thread < kernel::manager< VPManager >.get_num_threads(); id_thread++ ) { // don't do it again for the master thread if ( id_thread != thread_id_master ) @@ -173,7 +173,7 @@ nest::StimulationBackendMPI::prepare() for ( auto& it_device : devices_[ id_thread ] ) { // add the id of the device if there is a connection with the device. - if ( kernel::manager< ConnectionManager >().get_device_connected( + if ( kernel::manager< ConnectionManager >.get_device_connected( id_thread, it_device.second.second->get_local_device_id() ) ) { std::string port_name; @@ -282,7 +282,7 @@ nest::StimulationBackendMPI::cleanup() } // clear map of devices commMap_.clear(); - size_t thread_id_master = kernel::manager< VPManager >().get_thread_id(); + size_t thread_id_master = kernel::manager< VPManager >.get_thread_id(); for ( auto& it_device : devices_[ thread_id_master ] ) { it_device.second.first = nullptr; @@ -318,12 +318,12 @@ nest::StimulationBackendMPI::get_port( const size_t index_node, const std::strin // (file contains only one line with name of the port) std::ostringstream basename; // get the path from the kernel - const std::string& path = kernel::manager< IOManager >().get_data_path(); + const std::string& path = kernel::manager< IOManager >.get_data_path(); if ( not path.empty() ) { basename << path << '/'; } - basename << kernel::manager< IOManager >().get_data_prefix(); + basename << kernel::manager< IOManager >.get_data_prefix(); // add the path from the label of the device if ( not label.empty() ) @@ -386,7 +386,7 @@ nest::StimulationBackendMPI::update_device( int* array_index, if ( data.first[ 0 ] != 0 ) { // if there are some data - size_t thread_id = kernel::manager< VPManager >().get_thread_id(); + size_t thread_id = kernel::manager< VPManager >.get_thread_id(); int index_id_device = 0; // the index for the array of device in the data // get the first id of the device for the current thread // if the thread_id == 0, the index_id_device equals 0 diff --git a/nestkernel/stimulation_device.cpp b/nestkernel/stimulation_device.cpp index dcdd771cee..6bed78c28c 100644 --- a/nestkernel/stimulation_device.cpp +++ b/nestkernel/stimulation_device.cpp @@ -79,7 +79,7 @@ nest::StimulationDevice::pre_run_hook() void nest::StimulationDevice::set_initialized_() { - kernel::manager< IOManager >().enroll_stimulator( P_.stimulus_source_, *this, backend_params_ ); + kernel::manager< IOManager >.enroll_stimulator( P_.stimulus_source_, *this, backend_params_ ); } const std::string& @@ -111,7 +111,7 @@ nest::StimulationDevice::Parameters_::set( const DictionaryDatum& d ) if ( updateValue< std::string >( d, names::stimulus_source, stimulus_source ) ) { - if ( not kernel::manager< IOManager >().is_valid_stimulation_backend( stimulus_source ) ) + if ( not kernel::manager< IOManager >.is_valid_stimulation_backend( stimulus_source ) ) { std::string msg = String::compose( "Unknown input backend '%1'", stimulus_source ); throw BadProperty( msg ); @@ -155,7 +155,7 @@ nest::StimulationDevice::set_status( const DictionaryDatum& d ) } else { - kernel::manager< IOManager >().enroll_stimulator( ptmp.stimulus_source_, *this, d ); + kernel::manager< IOManager >.enroll_stimulator( ptmp.stimulus_source_, *this, d ); } // if we get here, temporaries contain consistent set of properties diff --git a/nestkernel/stopwatch.h b/nestkernel/stopwatch.h index aae04768ad..af8a064936 100644 --- a/nestkernel/stopwatch.h +++ b/nestkernel/stopwatch.h @@ -528,10 +528,10 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::start() { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].start(); - cputime_timers_[ kernel::manager< VPManager >().get_thread_id() ].start(); + walltime_timers_[ kernel::manager< VPManager >.get_thread_id() ].start(); + cputime_timers_[ kernel::manager< VPManager >.get_thread_id() ].start(); } template < StopwatchGranularity detailed_timer > @@ -541,10 +541,10 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::stop() { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].stop(); - cputime_timers_[ kernel::manager< VPManager >().get_thread_id() ].stop(); + walltime_timers_[ kernel::manager< VPManager >.get_thread_id() ].stop(); + cputime_timers_[ kernel::manager< VPManager >.get_thread_id() ].stop(); } template < StopwatchGranularity detailed_timer > @@ -554,9 +554,9 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::is_running_() const { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - return walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].is_running_(); + return walltime_timers_[ kernel::manager< VPManager >.get_thread_id() ].is_running_(); } template < StopwatchGranularity detailed_timer > @@ -567,9 +567,9 @@ Stopwatch< detailed_timer, and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::elapsed( timers::timeunit_t timeunit ) const { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - return walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].elapsed( timeunit ); + return walltime_timers_[ kernel::manager< VPManager >.get_thread_id() ].elapsed( timeunit ); } template < StopwatchGranularity detailed_timer > @@ -581,9 +581,9 @@ Stopwatch< detailed_timer, timers::timeunit_t timeunit, std::ostream& os ) const { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - walltime_timers_[ kernel::manager< VPManager >().get_thread_id() ].print( msg, timeunit, os ); + walltime_timers_[ kernel::manager< VPManager >.get_thread_id() ].print( msg, timeunit, os ); } template < StopwatchGranularity detailed_timer > @@ -593,9 +593,9 @@ Stopwatch< detailed_timer, std::enable_if_t< use_threaded_timers and ( detailed_timer == StopwatchGranularity::Normal or use_detailed_timers ) > >::reset() { - kernel::manager< VPManager >().assert_single_threaded(); + kernel::manager< VPManager >.assert_single_threaded(); - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); walltime_timers_.resize( num_threads ); cputime_timers_.resize( num_threads ); for ( size_t i = 0; i < num_threads; ++i ) diff --git a/nestkernel/synaptic_element.cpp b/nestkernel/synaptic_element.cpp index 7b0e1a730d..6a8f1d966e 100644 --- a/nestkernel/synaptic_element.cpp +++ b/nestkernel/synaptic_element.cpp @@ -55,7 +55,7 @@ nest::SynapticElement::SynapticElement( const SynapticElement& se ) , growth_rate_( se.growth_rate_ ) , tau_vacant_( se.tau_vacant_ ) { - growth_curve_ = kernel::manager< SPManager >().new_growth_curve( se.growth_curve_->get_name() ); + growth_curve_ = kernel::manager< SPManager >.new_growth_curve( se.growth_curve_->get_name() ); assert( growth_curve_ ); DictionaryDatum nc_parameters = DictionaryDatum( new Dictionary ); se.get( nc_parameters ); @@ -68,7 +68,7 @@ nest::SynapticElement::operator=( const SynapticElement& other ) if ( this != &other ) { // 1: allocate new memory and copy the elements - GrowthCurve* new_nc = kernel::manager< SPManager >().new_growth_curve( other.growth_curve_->get_name() ); + GrowthCurve* new_nc = kernel::manager< SPManager >.new_growth_curve( other.growth_curve_->get_name() ); DictionaryDatum nc_parameters = DictionaryDatum( new Dictionary ); other.get( nc_parameters ); @@ -123,7 +123,7 @@ nest::SynapticElement::set( const DictionaryDatum& d ) Name growth_curve_name( getValue< std::string >( d, names::growth_curve ) ); if ( not growth_curve_->is( growth_curve_name ) ) { - growth_curve_ = kernel::manager< SPManager >().new_growth_curve( growth_curve_name ); + growth_curve_ = kernel::manager< SPManager >.new_growth_curve( growth_curve_name ); } } growth_curve_->set( d ); diff --git a/nestkernel/target_identifier.h b/nestkernel/target_identifier.h index a7edf4234c..11955f9444 100644 --- a/nestkernel/target_identifier.h +++ b/nestkernel/target_identifier.h @@ -140,7 +140,7 @@ class TargetIdentifierIndex get_target_ptr( const size_t tid ) const { assert( target_ != invalid_targetindex ); - return kernel::manager< NodeManager >().thread_lid_to_node( tid, target_ ); + return kernel::manager< NodeManager >.thread_lid_to_node( tid, target_ ); } size_t @@ -170,7 +170,7 @@ class TargetIdentifierIndex inline void TargetIdentifierIndex::set_target( Node* target ) { - kernel::manager< NodeManager >().ensure_valid_thread_local_ids(); + kernel::manager< NodeManager >.ensure_valid_thread_local_ids(); size_t target_lid = target->get_thread_lid(); if ( target_lid > max_targetindex ) diff --git a/nestkernel/target_table.cpp b/nestkernel/target_table.cpp index 4fc30fdafc..88b0c63ef0 100644 --- a/nestkernel/target_table.cpp +++ b/nestkernel/target_table.cpp @@ -33,13 +33,13 @@ void nest::TargetTable::initialize() { - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); targets_.resize( num_threads ); secondary_send_buffer_pos_.resize( num_threads ); #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); targets_[ tid ] = std::vector< std::vector< Target > >(); secondary_send_buffer_pos_[ tid ] = std::vector< std::vector< std::vector< size_t > > >(); } // of omp parallel @@ -57,7 +57,7 @@ nest::TargetTable::prepare( const size_t tid ) { // add one to max_num_local_nodes to avoid possible overflow in case // of rounding errors - const size_t num_local_nodes = kernel::manager< NodeManager >().get_max_num_local_nodes() + 1; + const size_t num_local_nodes = kernel::manager< NodeManager >.get_max_num_local_nodes() + 1; targets_[ tid ].resize( num_local_nodes ); @@ -66,7 +66,7 @@ nest::TargetTable::prepare( const size_t tid ) for ( size_t lid = 0; lid < num_local_nodes; ++lid ) { // resize to maximal possible synapse-type index - secondary_send_buffer_pos_[ tid ][ lid ].resize( kernel::manager< ModelManager >().get_num_connection_models() ); + secondary_send_buffer_pos_[ tid ][ lid ].resize( kernel::manager< ModelManager >.get_num_connection_models() ); } } @@ -104,7 +104,7 @@ nest::TargetTable::add_target( const size_t tid, const size_t target_rank, const { const SecondaryTargetDataFields& secondary_fields = target_data.secondary_data; const size_t send_buffer_pos = secondary_fields.get_recv_buffer_pos() - + kernel::manager< MPIManager >().get_send_displacement_secondary_events_in_int( target_rank ); + + kernel::manager< MPIManager >.get_send_displacement_secondary_events_in_int( target_rank ); const synindex syn_id = secondary_fields.get_syn_id(); assert( syn_id < secondary_send_buffer_pos_[ tid ][ lid ].size() ); diff --git a/nestkernel/target_table_devices.cpp b/nestkernel/target_table_devices.cpp index 4aadb09a37..be6d052986 100644 --- a/nestkernel/target_table_devices.cpp +++ b/nestkernel/target_table_devices.cpp @@ -41,7 +41,7 @@ TargetTableDevices::~TargetTableDevices() void TargetTableDevices::initialize() { - const size_t num_threads = kernel::manager< VPManager >().get_num_threads(); + const size_t num_threads = kernel::manager< VPManager >.get_num_threads(); target_to_devices_.resize( num_threads ); target_from_devices_.resize( num_threads ); sending_devices_node_ids_.resize( num_threads ); @@ -82,30 +82,30 @@ TargetTableDevices::resize_to_number_of_neurons() { #pragma omp parallel { - const size_t tid = kernel::manager< VPManager >().get_thread_id(); - target_to_devices_[ tid ].resize( kernel::manager< NodeManager >().get_max_num_local_nodes() + 1 ); - target_from_devices_[ tid ].resize( kernel::manager< NodeManager >().get_num_thread_local_devices( tid ) + 1 ); - sending_devices_node_ids_[ tid ].resize( kernel::manager< NodeManager >().get_num_thread_local_devices( tid ) + 1 ); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); + target_to_devices_[ tid ].resize( kernel::manager< NodeManager >.get_max_num_local_nodes() + 1 ); + target_from_devices_[ tid ].resize( kernel::manager< NodeManager >.get_num_thread_local_devices( tid ) + 1 ); + sending_devices_node_ids_[ tid ].resize( kernel::manager< NodeManager >.get_num_thread_local_devices( tid ) + 1 ); } // end omp parallel } void TargetTableDevices::resize_to_number_of_synapse_types() { - kernel::manager< VPManager >().assert_thread_parallel(); + kernel::manager< VPManager >.assert_thread_parallel(); - const size_t tid = kernel::manager< VPManager >().get_thread_id(); + const size_t tid = kernel::manager< VPManager >.get_thread_id(); for ( size_t lid = 0; lid < target_to_devices_.at( tid ).size(); ++lid ) { // make sure this device has support for all synapse types target_to_devices_.at( tid ).at( lid ).resize( - kernel::manager< ModelManager >().get_num_connection_models(), nullptr ); + kernel::manager< ModelManager >.get_num_connection_models(), nullptr ); } for ( size_t ldid = 0; ldid < target_from_devices_.at( tid ).size(); ++ldid ) { // make sure this device has support for all synapse types target_from_devices_.at( tid ).at( ldid ).resize( - kernel::manager< ModelManager >().get_num_connection_models(), nullptr ); + kernel::manager< ModelManager >.get_num_connection_models(), nullptr ); } } @@ -119,8 +119,8 @@ TargetTableDevices::get_connections_to_devices_( const size_t requested_source_n { if ( requested_source_node_id != 0 ) { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( requested_source_node_id ); - if ( kernel::manager< VPManager >().lid_to_node_id( lid ) != requested_source_node_id ) + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( requested_source_node_id ); + if ( kernel::manager< VPManager >.lid_to_node_id( lid ) != requested_source_node_id ) { return; } @@ -145,7 +145,7 @@ TargetTableDevices::get_connections_to_device_for_lid_( const size_t lid, { if ( target_to_devices_[ tid ][ lid ].size() > 0 ) { - const size_t source_node_id = kernel::manager< VPManager >().lid_to_node_id( lid ); + const size_t source_node_id = kernel::manager< VPManager >.lid_to_node_id( lid ); // not the valid connector if ( source_node_id > 0 and target_to_devices_[ tid ][ lid ][ syn_id ] ) { @@ -170,7 +170,7 @@ TargetTableDevices::get_connections_from_devices_( const size_t requested_source const size_t source_node_id = *it; if ( source_node_id > 0 and ( requested_source_node_id == source_node_id or requested_source_node_id == 0 ) ) { - const Node* source = kernel::manager< NodeManager >().get_node_or_proxy( source_node_id, tid ); + const Node* source = kernel::manager< NodeManager >.get_node_or_proxy( source_node_id, tid ); const size_t ldid = source->get_local_device_id(); if ( target_from_devices_[ tid ][ ldid ].size() > 0 ) @@ -212,13 +212,11 @@ TargetTableDevices::add_connection_to_device( Node& source, const double d, const double w ) { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); assert( lid < target_to_devices_[ tid ].size() ); assert( syn_id < target_to_devices_[ tid ][ lid ].size() ); - kernel::manager< ModelManager >() - .get_connection_model( syn_id, tid ) - .add_connection( source, target, target_to_devices_[ tid ][ lid ], syn_id, p, d, w ); + kernel::manager< ModelManager >.get_connection_model( syn_id, tid ).add_connection( source, target, target_to_devices_[ tid ][ lid ], syn_id, p, d, w ); } void @@ -235,9 +233,7 @@ TargetTableDevices::add_connection_from_device( Node& source, assert( ldid < target_from_devices_[ tid ].size() ); assert( syn_id < target_from_devices_[ tid ][ ldid ].size() ); - kernel::manager< ModelManager >() - .get_connection_model( syn_id, tid ) - .add_connection( source, target, target_from_devices_[ tid ][ ldid ], syn_id, p, d, w ); + kernel::manager< ModelManager >.get_connection_model( syn_id, tid ).add_connection( source, target, target_from_devices_[ tid ][ ldid ], syn_id, p, d, w ); // store node ID of sending device sending_devices_node_ids_[ tid ][ ldid ] = source.get_node_id(); diff --git a/nestkernel/target_table_devices.h b/nestkernel/target_table_devices.h index d8feef6367..387b39ef69 100644 --- a/nestkernel/target_table_devices.h +++ b/nestkernel/target_table_devices.h @@ -268,7 +268,7 @@ TargetTableDevices::send_to_device( const size_t tid, Event& e, const std::vector< ConnectorModel* >& cm ) { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); for ( std::vector< ConnectorBase* >::iterator it = target_to_devices_[ tid ][ lid ].begin(); it != target_to_devices_[ tid ][ lid ].end(); ++it ) @@ -286,7 +286,7 @@ TargetTableDevices::send_to_device( const size_t tid, SecondaryEvent& e, const std::vector< ConnectorModel* >& cm ) { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); for ( auto& synid : e.get_supported_syn_ids() ) { if ( target_to_devices_[ tid ][ lid ][ synid ] ) @@ -303,7 +303,7 @@ TargetTableDevices::get_synapse_status_to_device( const size_t tid, DictionaryDatum& dict, const size_t lcid ) const { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) { target_to_devices_[ tid ][ lid ][ syn_id ]->get_synapse_status( tid, lcid, dict ); @@ -318,7 +318,7 @@ TargetTableDevices::set_synapse_status_to_device( const size_t tid, const DictionaryDatum& dict, const size_t lcid ) { - const size_t lid = kernel::manager< VPManager >().node_id_to_lid( source_node_id ); + const size_t lid = kernel::manager< VPManager >.node_id_to_lid( source_node_id ); if ( target_to_devices_[ tid ][ lid ][ syn_id ] ) { target_to_devices_[ tid ][ lid ][ syn_id ]->set_synapse_status( lcid, dict, cm ); diff --git a/nestkernel/universal_data_logger.h b/nestkernel/universal_data_logger.h index 4dab40df54..3085ba3520 100644 --- a/nestkernel/universal_data_logger.h +++ b/nestkernel/universal_data_logger.h @@ -626,7 +626,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // Next recording step is in current slice or beyond, indicates that // buffer is properly initialized. - if ( next_rec_step_ >= kernel::manager< SimulationManager >().get_slice_origin().get_steps() ) + if ( next_rec_step_ >= kernel::manager< SimulationManager >.get_slice_origin().get_steps() ) { return; } @@ -644,14 +644,14 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // update interval to be multiples of recording interval. Need to add // +1 because the division result is rounded down. next_rec_step_ = - ( kernel::manager< SimulationManager >().get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + ( kernel::manager< SimulationManager >.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; // If offset is not 0, adjust next recording step to account for it by first setting next recording // step to be offset and then iterating until the variable is greater than current simulation time. if ( recording_offset_.get_steps() != 0 ) { next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel::manager< SimulationManager >().get_time().get_steps() ) + while ( next_rec_step_ <= kernel::manager< SimulationManager >.get_time().get_steps() ) { next_rec_step_ += rec_int_steps_; } @@ -659,7 +659,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::init() // number of data points per slice const long recs_per_slice = static_cast< long >( - std::ceil( kernel::manager< ConnectionManager >().get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + std::ceil( kernel::manager< ConnectionManager >.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); @@ -676,7 +676,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode return; } - const size_t wt = kernel::manager< EventDeliveryManager >().write_toggle(); + const size_t wt = kernel::manager< EventDeliveryManager >.write_toggle(); assert( wt < next_rec_.size() ); assert( wt < data_.size() ); @@ -724,13 +724,13 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, con assert( data_.size() == 2 ); // get read toggle and start and end of slice - const size_t rt = kernel::manager< EventDeliveryManager >().read_toggle(); + const size_t rt = kernel::manager< EventDeliveryManager >.read_toggle(); assert( not data_[ rt ].empty() ); // Check if we have valid data, i.e., data with time stamps within the // past time slice. This may not be the case if the node has been frozen. // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >().get_previous_slice_origin() ) + if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >.get_previous_slice_origin() ) { next_rec_[ rt ] = 0; return; @@ -758,7 +758,7 @@ DynamicUniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, con reply.set_port( request.get_port() ); // send it off - kernel::manager< EventDeliveryManager >().send_to_node( reply ); + kernel::manager< EventDeliveryManager >.send_to_node( reply ); } template < typename HostNode > @@ -828,7 +828,7 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // Next recording step is in current slice or beyond, indicates that // buffer is properly initialized. - if ( next_rec_step_ >= kernel::manager< SimulationManager >().get_slice_origin().get_steps() ) + if ( next_rec_step_ >= kernel::manager< SimulationManager >.get_slice_origin().get_steps() ) { return; } @@ -846,14 +846,14 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // update interval to be multiples of recording interval. Need to add // +1 because the division result is rounded down. next_rec_step_ = - ( kernel::manager< SimulationManager >().get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; + ( kernel::manager< SimulationManager >.get_time().get_steps() / rec_int_steps_ + 1 ) * rec_int_steps_ - 1; // If offset is not 0, adjust next recording step to account for it by first setting next recording // step to be offset and then iterating until the variable is greater than current simulation time. if ( recording_offset_.get_steps() != 0 ) { next_rec_step_ = recording_offset_.get_steps() - 1; // shifted one to left - while ( next_rec_step_ <= kernel::manager< SimulationManager >().get_time().get_steps() ) + while ( next_rec_step_ <= kernel::manager< SimulationManager >.get_time().get_steps() ) { next_rec_step_ += rec_int_steps_; } @@ -861,7 +861,7 @@ UniversalDataLogger< HostNode >::DataLogger_::init() // number of data points per slice const long recs_per_slice = static_cast< long >( - std::ceil( kernel::manager< ConnectionManager >().get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); + std::ceil( kernel::manager< ConnectionManager >.get_min_delay() / static_cast< double >( rec_int_steps_ ) ) ); data_.resize( 2, DataLoggingReply::Container( recs_per_slice, DataLoggingReply::Item( num_vars_ ) ) ); @@ -878,7 +878,7 @@ UniversalDataLogger< HostNode >::DataLogger_::record_data( const HostNode& host, return; } - const size_t wt = kernel::manager< EventDeliveryManager >().write_toggle(); + const size_t wt = kernel::manager< EventDeliveryManager >.write_toggle(); assert( wt < next_rec_.size() ); assert( wt < data_.size() ); @@ -927,13 +927,13 @@ UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const Data assert( data_.size() == 2 ); // get read toggle and start and end of slice - const size_t rt = kernel::manager< EventDeliveryManager >().read_toggle(); + const size_t rt = kernel::manager< EventDeliveryManager >.read_toggle(); assert( not data_[ rt ].empty() ); // Check if we have valid data, i.e., data with time stamps within the // past time slice. This may not be the case if the node has been frozen. // In that case, we still reset the recording marker, to prepare for the next round. - if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >().get_previous_slice_origin() ) + if ( data_[ rt ][ 0 ].timestamp <= kernel::manager< SimulationManager >.get_previous_slice_origin() ) { next_rec_[ rt ] = 0; return; @@ -961,7 +961,7 @@ UniversalDataLogger< HostNode >::DataLogger_::handle( HostNode& host, const Data reply.set_port( request.get_port() ); // send it off - kernel::manager< EventDeliveryManager >().send_to_node( reply ); + kernel::manager< EventDeliveryManager >.send_to_node( reply ); } } // namespace nest diff --git a/nestkernel/vp_manager.cpp b/nestkernel/vp_manager.cpp index 69f09b1a23..0bbfc51979 100644 --- a/nestkernel/vp_manager.cpp +++ b/nestkernel/vp_manager.cpp @@ -112,11 +112,11 @@ nest::VPManager::set_status( const DictionaryDatum& d ) { if ( not n_threads_updated ) { - n_threads = n_vps / kernel::manager< MPIManager >().get_num_processes(); + n_threads = n_vps / kernel::manager< MPIManager >.get_num_processes(); } - const bool n_threads_conflict = n_vps / kernel::manager< MPIManager >().get_num_processes() != n_threads; - const bool n_procs_conflict = n_vps % kernel::manager< MPIManager >().get_num_processes() != 0; + const bool n_threads_conflict = n_vps / kernel::manager< MPIManager >.get_num_processes() != n_threads; + const bool n_procs_conflict = n_vps % kernel::manager< MPIManager >.get_num_processes() != 0; if ( n_threads_conflict or n_procs_conflict ) { throw BadProperty( @@ -133,23 +133,23 @@ nest::VPManager::set_status( const DictionaryDatum& d ) if ( n_threads_updated or n_vps_updated ) { std::vector< std::string > errors; - if ( kernel::manager< NodeManager >().size() > 0 ) + if ( kernel::manager< NodeManager >.size() > 0 ) { errors.push_back( "Nodes exist" ); } - if ( kernel::manager< ConnectionManager >().get_user_set_delay_extrema() ) + if ( kernel::manager< ConnectionManager >.get_user_set_delay_extrema() ) { errors.push_back( "Delay extrema have been set" ); } - if ( kernel::manager< SimulationManager >().has_been_simulated() ) + if ( kernel::manager< SimulationManager >.has_been_simulated() ) { errors.push_back( "Network has been simulated" ); } - if ( kernel::manager< ModelManager >().are_model_defaults_modified() ) + if ( kernel::manager< ModelManager >.are_model_defaults_modified() ) { errors.push_back( "Model defaults were modified" ); } - if ( kernel::manager< SPManager >().is_structural_plasticity_enabled() and n_threads > 1 ) + if ( kernel::manager< SPManager >.is_structural_plasticity_enabled() and n_threads > 1 ) { errors.push_back( "Structural plasticity enabled: multithreading cannot be enabled" ); } @@ -175,7 +175,7 @@ nest::VPManager::set_status( const DictionaryDatum& d ) LOG( M_WARNING, "VPManager::set_status()", msg ); } - kernel::manager< KernelManager >().change_number_of_threads( n_threads ); + kernel::manager< KernelManager >.change_number_of_threads( n_threads ); } } @@ -189,7 +189,7 @@ nest::VPManager::get_status( DictionaryDatum& d ) void nest::VPManager::set_num_threads( size_t n_threads ) { - assert( not( kernel::manager< SPManager >().is_structural_plasticity_enabled() and n_threads > 1 ) ); + assert( not( kernel::manager< SPManager >.is_structural_plasticity_enabled() and n_threads > 1 ) ); n_threads_ = n_threads; #ifdef _OPENMP diff --git a/nestkernel/vp_manager.h b/nestkernel/vp_manager.h index b7621eac7a..f500ff0cdb 100644 --- a/nestkernel/vp_manager.h +++ b/nestkernel/vp_manager.h @@ -29,6 +29,7 @@ // Includes from sli: #include "dictdatum.h" +#include "kernel_manager.h" #include "mpi_manager.h" #ifdef _OPENMP @@ -209,8 +210,7 @@ VPManager::assert_thread_parallel() const inline size_t VPManager::get_vp() const { - return kernel::manager< MPIManager >().get_rank() - + get_thread_id() * kernel::manager< MPIManager >().get_num_processes(); + return kernel::manager< MPIManager >.get_rank() + get_thread_id() * kernel::manager< MPIManager >.get_num_processes(); } inline size_t @@ -222,25 +222,25 @@ VPManager::node_id_to_vp( const size_t node_id ) const inline size_t VPManager::vp_to_thread( const size_t vp ) const { - return vp / kernel::manager< MPIManager >().get_num_processes(); + return vp / kernel::manager< MPIManager >.get_num_processes(); } inline size_t VPManager::get_num_virtual_processes() const { - return get_num_threads() * kernel::manager< MPIManager >().get_num_processes(); + return get_num_threads() * kernel::manager< MPIManager >.get_num_processes(); } inline bool VPManager::is_local_vp( const size_t vp ) const { - return kernel::manager< MPIManager >().get_process_id_of_vp( vp ) == kernel::manager< MPIManager >().get_rank(); + return kernel::manager< MPIManager >.get_process_id_of_vp( vp ) == kernel::manager< MPIManager >.get_rank(); } inline size_t VPManager::thread_to_vp( const size_t tid ) const { - return tid * kernel::manager< MPIManager >().get_num_processes() + kernel::manager< MPIManager >().get_rank(); + return tid * kernel::manager< MPIManager >.get_num_processes() + kernel::manager< MPIManager >.get_rank(); } inline bool @@ -266,7 +266,7 @@ VPManager::lid_to_node_id( const size_t lid ) const inline size_t VPManager::get_num_assigned_ranks_per_thread() const { - return std::ceil( static_cast< double >( kernel::manager< MPIManager >().get_num_processes() ) / n_threads_ ); + return std::ceil( static_cast< double >( kernel::manager< MPIManager >.get_num_processes() ) / n_threads_ ); } inline size_t @@ -283,9 +283,9 @@ VPManager::get_end_rank_per_thread( const size_t rank_start, const size_t num_as // if we have more threads than ranks, or if ranks can not be // distributed evenly on threads, we need to make sure, that all // threads care only about existing ranks - if ( rank_end > kernel::manager< MPIManager >().get_num_processes() ) + if ( rank_end > kernel::manager< MPIManager >.get_num_processes() ) { - rank_end = std::max( rank_start, kernel::manager< MPIManager >().get_num_processes() ); + rank_end = std::max( rank_start, kernel::manager< MPIManager >.get_num_processes() ); } return rank_end;