diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 6d3e4a1..c45c708 100755
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -4,13 +4,17 @@ Changelog
Version 1.2.0 [2021-02-22]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataOrganizer_HMC_NRT.py
- FIX: bugs related to xarray library;
- FIX: bugs related to progressbar library
+ APP: HAT_DataPublisher_HMC_NRT.py
+- FIX: bugs related to matplotlib=3.1.3 and cartopy=0.17 libraries
+ APP: HAT_RunAnalyzer_HMC_MAIN.py
+- Release for HMC version 3.x.x
Version 1.1.5 [2019-10-22]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataManager_HMC_NRT.py
- ADD: method to set terrain variable in buffered datasets when is missed
- ADD: effective time steps information about computing gridded average or accumulated variable(s) skipping empty step(s)
- FIX: empty time steps condition in computing gridded average or accumulated variable(s)
@@ -19,37 +23,37 @@ Version 1.1.5 [2019-10-22]
Version 1.1.4 [2019-07-08]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataManager_HMC_NRT.py
- ADD: variables to datasets (soil moisture and accumulated rain time-series)
- ADD: management of tmp file(s) in a common system folder
- FIX: datasets finder according with operative chain requests
Version 1.1.3 [2019-05-27]
===========
- APP: HAT_DataPublisher_HMC_NRT
+ APP: HAT_DataPublisher_HMC_NRT.py
- ADD: soil moisture and accumulated rain to time-series graph(s)
Version 1.1.2 [2019-05-16]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataManager_HMC_NRT.py
- FIX: bug about selecting gridded variables with mean equal to zero
- FIX: bug about out of range probabilistic runs
- APP: HAT_DataPublisher_HMC_NRT
+ APP: HAT_DataPublisher_HMC_NRT.py
- FIX: bugs in data seeking and data plotting (hydrapp time-series and maps product)
Version 1.1.1 [2019-03-04]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataManager_HMC_NRT.py
- FIX: nasty bugs arisen in operative mode fixed
-Version 1.1.0 [2019-03-22]
+Version 1.1.0 [2019-02-22]
===========
- APP: HAT_DataPublisher_HMC_NRT
+ APP: HAT_DataPublisher_HMC_NRT.py
- Beta release
Version 1.0.0 [2019-02-13]
===========
- APP: HAT_DataManager_HMC_NRT
+ APP: HAT_DataManager_HMC_NRT.py
- Beta release
Version 0.0.1 [2019-01-16]
diff --git a/apps/DataPublisher/HAT_DataManager_HMC_NRT.py b/apps/Analyzer_Datasets/HAT_DataOrganizer_HMC_NRT.py
similarity index 99%
rename from apps/DataPublisher/HAT_DataManager_HMC_NRT.py
rename to apps/Analyzer_Datasets/HAT_DataOrganizer_HMC_NRT.py
index 1d6127a..a4a260c 100755
--- a/apps/DataPublisher/HAT_DataManager_HMC_NRT.py
+++ b/apps/Analyzer_Datasets/HAT_DataOrganizer_HMC_NRT.py
@@ -7,7 +7,7 @@
__library__ = 'hat'
General command line:
-python3 HAT_DataManager_HMC_NRT.py -settings_file configuration.json -time "YYYY-MM-DD HH:MM"
+python3 HAT_RunOrganizer_HMC_NRT.py -settings_file configuration.json -time "YYYY-MM-DD HH:MM"
Version(s) in changelog file
"""
# -------------------------------------------------------------------------------------
diff --git a/apps/DataPublisher/HAT_DataPublisher_HMC_NRT.py b/apps/Analyzer_Datasets/HAT_DataPublisher_HMC_NRT.py
similarity index 98%
rename from apps/DataPublisher/HAT_DataPublisher_HMC_NRT.py
rename to apps/Analyzer_Datasets/HAT_DataPublisher_HMC_NRT.py
index 09d8211..c914332 100755
--- a/apps/DataPublisher/HAT_DataPublisher_HMC_NRT.py
+++ b/apps/Analyzer_Datasets/HAT_DataPublisher_HMC_NRT.py
@@ -1,5 +1,5 @@
"""
-Hydrological Analysis Tool - DataPublisher for Hydrological Model Continuum - Near Real Time
+Hydrological Analysis Tool - Analyzer_Datasets for Hydrological Model Continuum - Near Real Time
__date__ = '20210218'
__version__ = '1.2.0'
@@ -7,7 +7,7 @@
__library__ = 'hat'
General command line:
-python3 HAT_DataPublisher_HMC_NRT.py -settings_file configuration.json -time YYYYMMDDHHMM
+python3 HAT_RunPublisher_HMC_NRT.py -settings_file configuration.json -time YYYYMMDDHHMM
Version(s) in changelog file
"""
@@ -45,7 +45,7 @@ def main():
# Version and algorithm information
sProgramVersion = '1.2.0'
sProjectName = 'HAT'
- sAlgType = 'DataPublisher'
+ sAlgType = 'Analyzer_Datasets'
sAlgName = 'HMC NearRealTime'
# Time algorithm information
dStartTime = time.time()
diff --git a/apps/DataPublisher/hat_datamanager_hmc_nrt_configuration_example_generic.json b/apps/Analyzer_Datasets/hat_dataorganizer_hmc_nrt_configuration_example_generic.json
similarity index 100%
rename from apps/DataPublisher/hat_datamanager_hmc_nrt_configuration_example_generic.json
rename to apps/Analyzer_Datasets/hat_dataorganizer_hmc_nrt_configuration_example_generic.json
diff --git a/apps/DataPublisher/hat_datapublisher_hmc_nrt_configuration_example_generic.json b/apps/Analyzer_Datasets/hat_datapublisher_hmc_nrt_configuration_example_generic.json
similarity index 100%
rename from apps/DataPublisher/hat_datapublisher_hmc_nrt_configuration_example_generic.json
rename to apps/Analyzer_Datasets/hat_datapublisher_hmc_nrt_configuration_example_generic.json
diff --git a/apps/Analyzer_Execution/HAT_RunAnalyzer_HMC_Main.py b/apps/Analyzer_Execution/HAT_RunAnalyzer_HMC_Main.py
new file mode 100755
index 0000000..46cf4e4
--- /dev/null
+++ b/apps/Analyzer_Execution/HAT_RunAnalyzer_HMC_Main.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python3
+"""
+Hydrological Analysis Tool - Analyzer_Execution
+
+__date__ = '20210225'
+__version__ = '1.5.0'
+__author__ =
+ 'Fabio Delogu (fabio.delogu@cimafoundation.org',
+ 'Flavio Pignone (flavio.pignone@cimafoundation.org',
+
+__library__ = 'HAT'
+
+General command line:
+python3 HAT_RunAnalyzer_HMC_Main.py -settings_file configuration.json -time "YYYY-MM-DD HH:MM"
+
+Version(s):
+20210225 (1.5.0) --> Beta release for HMC 3.x.x
+20200605 (1.0.0) --> Beta release for HMC 2.x.x
+"""
+
+# -------------------------------------------------------------------------------------
+# Complete library
+import logging
+import time
+import os
+
+from argparse import ArgumentParser
+
+from lib_data_io_json import read_file_settings
+from lib_utils_time import set_time
+
+from driver_data_io_static import DriverStatic
+from driver_data_io_dynamic import DriverDynamic
+# -------------------------------------------------------------------------------------
+
+# -------------------------------------------------------------------------------------
+# Algorithm information
+alg_version = '1.5.0'
+alg_release = '2021-02-25'
+alg_name = 'RUN ANALYZER'
+# Algorithm parameter(s)
+time_format = '%Y-%m-%d %H:%M'
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Script Main
+def main():
+
+ # -------------------------------------------------------------------------------------
+ # Get algorithm settings
+ alg_settings, alg_time = get_args()
+
+ # Set algorithm settings
+ data_settings = read_file_settings(alg_settings)
+
+ # Set algorithm logging
+ set_logging(logger_file=os.path.join(data_settings['log']['folder_name'],
+ data_settings['log']['file_name']))
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Info algorithm
+ logging.info(' ============================================================================ ')
+ logging.info(' ==> ' + alg_name + ' (Version: ' + alg_version + ' Release_Date: ' + alg_release + ')')
+ logging.info(' ==> START ... ')
+ logging.info(' ')
+
+ # Time algorithm information
+ start_time = time.time()
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Organize time information
+ time_now, time_exec, time_range = set_time(
+ time_run_args=alg_time, time_run_file=data_settings['time']['time_now'],
+ time_format=time_format,
+ time_period=data_settings['time']['time_period'],
+ time_frequency=data_settings['time']['time_frequency'],
+ time_rounding=data_settings['time']['time_rounding'])
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Driver and method of static datasets
+ driver_data_static = DriverStatic(
+ src_dict=data_settings['data']['static'],
+ alg_ancillary=data_settings['algorithm']['ancillary'],
+ alg_template_tags=data_settings['algorithm']['template']
+ )
+ static_data_collection = driver_data_static.organize_static()
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Iterate over time range
+ for time_step in time_range:
+
+ # -------------------------------------------------------------------------------------
+ # Driver and method of dynamic datasets
+ driver_data_dynamic = DriverDynamic(
+ time_now=time_now,
+ time_exec=time_exec,
+ time_run=time_step,
+ static_data_collection=static_data_collection,
+ src_dict=data_settings['data']['dynamic']['source'],
+ anc_dict=data_settings['data']['dynamic']['ancillary'],
+ dest_dict=data_settings['data']['dynamic']['destination'],
+ alg_ancillary=data_settings['algorithm']['ancillary'],
+ alg_template_tags=data_settings['algorithm']['template'],
+ flag_cleaning_dynamic_source=data_settings['algorithm']['flags']['cleaning_dynamic_source'],
+ flag_cleaning_dynamic_analysis=data_settings['algorithm']['flags']['cleaning_dynamic_analysis'],
+ flag_cleaning_dynamic_destination=data_settings['algorithm']['flags']['cleaning_dynamic_destination'],
+ flag_cleaning_dynamic_tmp=data_settings['algorithm']['flags']['cleaning_dynamic_tmp'])
+
+ dynamic_data_collection = driver_data_dynamic.organize_dynamic_data()
+ analyze_data_collection, analyze_warnings_collection = driver_data_dynamic.analyze_dynamic_data(
+ dynamic_data_collection)
+ driver_data_dynamic.dump_dynamic_data(analyze_data_collection, analyze_warnings_collection)
+ driver_data_dynamic.clean_dynamic_tmp()
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Info algorithm
+ alg_time_elapsed = round(time.time() - alg_time_start, 1)
+
+ logging.info(' ')
+ logging.info('[' + project_name + ' ' + alg_type + ' - ' + alg_name + ' (Version ' + alg_version +
+ ' - Release ' + alg_release + ')]')
+ logging.info(' ==> TIME ELAPSED: ' + str(alg_time_elapsed) + ' seconds')
+ logging.info(' ==> ... END')
+ logging.info(' ==> Bye, Bye')
+ logging.info(' ============================================================================ ')
+ # -------------------------------------------------------------------------------------
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to get script argument(s)
+def get_args():
+ parser_handle = ArgumentParser()
+ parser_handle.add_argument('-settings_file', action="store", dest="alg_settings")
+ parser_handle.add_argument('-time', action="store", dest="alg_time")
+ parser_values = parser_handle.parse_args()
+
+ if parser_values.alg_settings:
+ alg_settings = parser_values.alg_settings
+ else:
+ alg_settings = 'configuration.json'
+
+ if parser_values.alg_time:
+ alg_time = parser_values.alg_time
+ else:
+ alg_time = None
+
+ return alg_settings, alg_time
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to set logging information
+def set_logging(logger_file='log.txt', logger_format=None):
+ if logger_format is None:
+ logger_format = '%(asctime)s %(name)-12s %(levelname)-8s ' \
+ '%(filename)s:[%(lineno)-6s - %(funcName)20s()] %(message)s'
+
+ # Remove old logging file
+ if os.path.exists(logger_file):
+ os.remove(logger_file)
+
+ # Set level of root debugger
+ logging.root.setLevel(logging.DEBUG)
+
+ # Open logging basic configuration
+ logging.basicConfig(level=logging.DEBUG, format=logger_format, filename=logger_file, filemode='w')
+
+ # Set logger handle
+ logger_handle_1 = logging.FileHandler(logger_file, 'w')
+ logger_handle_2 = logging.StreamHandler()
+ # Set logger level
+ logger_handle_1.setLevel(logging.DEBUG)
+ logger_handle_2.setLevel(logging.DEBUG)
+ # Set logger formatter
+ logger_formatter = logging.Formatter(logger_format)
+ logger_handle_1.setFormatter(logger_formatter)
+ logger_handle_2.setFormatter(logger_formatter)
+
+ # Add handle to logging
+ logging.getLogger('').addHandler(logger_handle_1)
+ logging.getLogger('').addHandler(logger_handle_2)
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Call script from external library
+if __name__ == '__main__':
+ main()
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/driver_data_io_dynamic.py b/apps/Analyzer_Execution/driver_data_io_dynamic.py
new file mode 100755
index 0000000..8de8eb6
--- /dev/null
+++ b/apps/Analyzer_Execution/driver_data_io_dynamic.py
@@ -0,0 +1,768 @@
+"""
+Class Features
+
+Name: driver_data_io_dynamic
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+
+######################################################################################
+# Library
+import logging
+import os
+
+import pandas as pd
+
+from copy import deepcopy
+
+from lib_utils_io import read_obj, write_obj
+from lib_utils_system import fill_tags2string, make_folder
+
+from lib_utils_exec import read_file_execution_info
+from lib_data_io_json import read_file_hydrograph_ts, read_file_hydrograph_info
+from lib_data_io_html import write_file_summary
+from lib_data_io_ascii import write_file_warnings
+
+from lib_data_analysis import analyze_hydrograph_datasets, analyze_time_info
+
+# Debug
+# import matplotlib.pylab as plt
+######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Class DriverDynamic
+class DriverDynamic:
+
+ # -------------------------------------------------------------------------------------
+ # Initialize class
+ def __init__(self, time_now, time_exec, time_run, src_dict, anc_dict, dest_dict, static_data_collection,
+ alg_ancillary=None, alg_template_tags=None,
+ tag_section_data='section_data', tag_execution_data='execution_data',
+ tag_src_reference_start='run_reference_start', tag_src_reference_end='run_reference_end',
+ tag_anc_source='run_source', tag_anc_analysis='run_analysis', tag_anc_destination='run_destination',
+ tag_dest_summary='run_summary', tag_dest_warnings='run_warnings',
+ tag_section_name='section_name', tag_basin_name='basin_name',
+ flag_cleaning_dynamic_source=True, flag_cleaning_dynamic_analysis=True,
+ flag_cleaning_dynamic_destination=True, flag_cleaning_dynamic_tmp=True, string_sep=':'):
+
+ self.time_now = time_now
+ self.time_exec = time_exec
+ self.time_run = time_run
+
+ self.tag_section_data = tag_section_data
+ self.tag_execution_data = tag_execution_data
+
+ self.tag_src_reference_start = tag_src_reference_start
+ self.tag_src_reference_end = tag_src_reference_end
+ self.tag_anc_source = tag_anc_source
+ self.tag_anc_analysis = tag_anc_analysis
+ self.tag_anc_destination = tag_anc_destination
+ self.tag_dest_summary = tag_dest_summary
+ self.tag_dest_warnings = tag_dest_warnings
+
+ self.tag_section_name = tag_section_name
+ self.tag_basin_name = tag_basin_name
+ self.alg_ancillary = alg_ancillary
+ self.alg_template_tags = alg_template_tags
+
+ self.string_sep = string_sep
+
+ self.section_data_collection = static_data_collection[self.tag_section_data]
+ self.execution_data_collection = static_data_collection[self.tag_execution_data]
+
+ self.domain_name = self.alg_ancillary['domain_name']
+
+ self.file_name_tag = 'file_name'
+ self.folder_name_tag = 'folder_name'
+
+ self.section_name_list = self.collect_section_list(
+ columns_tag_in=['section_name'], columns_tag_out=[self.tag_section_name])[self.tag_section_name]
+ self.basin_name_list = self.collect_section_list(
+ columns_tag_in=['section_domain'], columns_tag_out=[self.tag_basin_name])[self.tag_basin_name]
+
+ self.outlet_name_list = [self.string_sep.join(
+ [b.lower(), s.lower()]) for b, s in zip(self.basin_name_list, self.section_name_list)]
+
+ self.folder_name_src_ref_start = src_dict[self.tag_src_reference_start][self.folder_name_tag]
+ self.file_name_src_ref_start = src_dict[self.tag_src_reference_start][self.file_name_tag]
+ self.folder_name_src_ref_end = src_dict[self.tag_src_reference_end][self.folder_name_tag]
+ self.file_name_src_ref_end = src_dict[self.tag_src_reference_end][self.file_name_tag]
+
+ self.folder_name_anc_source = anc_dict[self.tag_anc_source][self.folder_name_tag]
+ self.file_name_anc_source = anc_dict[self.tag_anc_source][self.file_name_tag]
+ self.folder_name_anc_analysis = anc_dict[self.tag_anc_analysis][self.folder_name_tag]
+ self.file_name_anc_analysis = anc_dict[self.tag_anc_analysis][self.file_name_tag]
+ self.folder_name_anc_destination = anc_dict[self.tag_anc_destination][self.folder_name_tag]
+ self.file_name_anc_destination = anc_dict[self.tag_anc_destination][self.file_name_tag]
+
+ self.folder_name_dest_summary = dest_dict[self.tag_dest_summary][self.folder_name_tag]
+ self.file_name_dest_summary = dest_dict[self.tag_dest_summary][self.file_name_tag]
+ self.folder_name_dest_warnings = dest_dict[self.tag_dest_warnings][self.folder_name_tag]
+ self.file_name_dest_warnings = dest_dict[self.tag_dest_warnings][self.file_name_tag]
+
+ self.exec_name_list = list(self.execution_data_collection.index)
+ self.exec_values_list = list(self.execution_data_collection.values)
+ self.exec_type_list = list(self.execution_data_collection.columns)
+
+ file_collections_src_ref_start = {}
+ file_collections_src_ref_end = {}
+ for exec_name, exec_fields in zip(self.exec_name_list, self.exec_values_list):
+
+ file_extra_variables = {'domain_name': self.domain_name}
+ for exec_type_step, exec_field_step in zip(self.exec_type_list, exec_fields):
+ file_extra_variables[exec_type_step] = exec_field_step
+ file_extra_collections = {self.tag_basin_name: self.basin_name_list, self.tag_section_name: self.section_name_list}
+
+ file_list_src_ref_start = self.define_file_string(
+ None, os.path.join(self.folder_name_src_ref_start, self.file_name_src_ref_start),
+ file_extra_variables=file_extra_variables, file_extra_collections=file_extra_collections)
+
+ file_list_src_ref_end = self.define_file_string(
+ None, os.path.join(self.folder_name_src_ref_end, self.file_name_src_ref_end),
+ file_extra_variables=file_extra_variables, file_extra_collections=file_extra_collections)
+
+ file_collections_src_ref_start[exec_name] = pd.Series(file_list_src_ref_start).drop_duplicates().tolist()
+ file_collections_src_ref_end[exec_name] = pd.Series(file_list_src_ref_end).drop_duplicates().tolist()
+
+ self.file_collections_src_ref_start = file_collections_src_ref_start
+ self.file_collections_src_ref_end = file_collections_src_ref_end
+
+ self.file_path_anc_source = self.define_file_string(
+ self.time_run, os.path.join(self.folder_name_anc_source, self.file_name_anc_source),
+ file_extra_variables=None, file_extra_collections=None)
+ self.file_path_anc_analysis = self.define_file_string(
+ self.time_run, os.path.join(self.folder_name_anc_analysis, self.file_name_anc_analysis),
+ file_extra_variables=None, file_extra_collections=None)
+ self.file_path_anc_destination = self.define_file_string(
+ self.time_run, os.path.join(self.folder_name_anc_destination, self.file_name_anc_destination),
+ file_extra_variables=None, file_extra_collections=None)
+
+ self.file_path_dest_summary = self.define_file_string(
+ self.time_run, os.path.join(self.folder_name_dest_summary, self.file_name_dest_summary),
+ file_extra_variables=None, file_extra_collections=None)
+ self.file_path_dest_warnings = self.define_file_string(
+ self.time_run, os.path.join(self.folder_name_dest_warnings, self.file_name_dest_warnings),
+ file_extra_variables=None, file_extra_collections=None)
+
+ self.flag_cleaning_dynamic_source = flag_cleaning_dynamic_source
+ self.flag_cleaning_dynamic_analysis = flag_cleaning_dynamic_analysis
+ self.flag_cleaning_dynamic_destination = flag_cleaning_dynamic_destination
+ self.flag_cleaning_dynamic_tmp = flag_cleaning_dynamic_tmp
+
+ self.tag_time_period = 'time_period'
+ self.tag_time_frequency = 'time_frequency'
+ self.tag_time_rounding = 'time_rounding'
+ self.tag_run_description = 'run_description'
+ self.tag_run_var_sim = 'run_variable_sim'
+ self.tag_run_var_obs = 'run_variable_obs'
+
+ self.tag_run_ref_start = 'run_start'
+ self.tag_run_ref_end = 'run_end'
+ self.tag_run_ref_elapsed = 'run_elapsed'
+ self.tag_run_datasets_section = 'run_datasets_section'
+ self.tag_run_datasets_attrs = 'run_datasets_attrs'
+ self.tag_run_n = 'run_n'
+ self.tag_section_n = 'section_n'
+ self.tag_run_type = 'run_type'
+
+ self.tag_summary_alert_value = 'alert_value'
+ self.tag_summary_alert_index = 'alert_index'
+ self.tag_summary_alert_section = 'alert_section'
+ self.tag_summary_alert_thr = 'alert_thr'
+ self.tag_summary_alarm_value = 'alarm_value'
+ self.tag_summary_alarm_index = 'alarm_index'
+ self.tag_summary_alarm_section = 'alarm_section'
+ self.tag_summary_alarm_thr = 'alarm_thr'
+ self.tag_summary_run_type = 'run_type'
+
+ self.warnings_structure = {
+ 'alert_datasets': {
+ 'key_ref': self.tag_summary_alert_value,
+ 'key_list': [self.tag_summary_alert_value, self.tag_summary_alert_index,
+ self.tag_summary_alert_thr, self.tag_summary_run_type]
+ },
+ 'alarm_datasets': {
+ 'key_ref': self.tag_summary_alert_value,
+ 'key_list': [self.tag_summary_alarm_value, self.tag_summary_alarm_index,
+ self.tag_summary_alarm_thr, self.tag_summary_run_type]}
+ }
+
+ self.tag_info = 'info'
+ self.tag_data = 'data'
+ self.tag_warnings = 'warnings'
+ self.tag_time_ref_start = 'time_start'
+ self.tag_time_ref_end = 'time_end'
+
+ self.tag_file_time_create = 'time_create'
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to define filename
+ def define_file_string(self, time, file_path_raw,
+ file_extra_variables=None, file_extra_collections=None):
+
+ if file_extra_variables is None:
+ file_extra_variables = {}
+ if file_extra_collections is None:
+ file_extra_collections = None
+
+ alg_template_tags = self.alg_template_tags
+
+ alg_template_values_raw = {
+ 'source_datetime': time,
+ 'source_sub_path_time': time,
+ 'ancillary_datetime': time,
+ 'ancillary_sub_path_time': time,
+ 'destination_datetime': time,
+ 'destination_sub_path_time': time,
+ 'run_name': None,
+ 'run_sub_path_datasets': None,
+ 'run_sub_path_execution': None,
+ 'domain_name': None,
+ 'section_name': None,
+ 'basin_name': None
+ }
+
+ if file_extra_collections is not None:
+ file_n = None
+ file_values_list = []
+ for id_values, file_values in enumerate(file_extra_collections.values()):
+ if file_n is None:
+ file_n = file_values.__len__()
+ file_values_list.append(file_values)
+ file_keys_list = []
+ for id_key, file_key in enumerate(file_extra_collections.keys()):
+ file_keys_list.append([file_key] * file_n)
+
+ file_collections = {}
+ for file_id, (file_keys_step, file_values_step) in enumerate(zip(file_keys_list, file_values_list)):
+ for id, (key_step, value_step) in enumerate(zip(file_keys_step, file_values_step)):
+ if id not in list(file_collections.keys()):
+ file_collections[id] = [key_step, value_step]
+ else:
+ list_tmp = file_collections[id]
+ list_tmp.extend([key_step, value_step])
+ file_collections[id] = list_tmp
+
+ file_path_obj = []
+ for file_fields in file_collections.values():
+ file_keys = file_fields[0::2]
+ file_values = file_fields[1::2]
+
+ alg_template_values_def = deepcopy(alg_template_values_raw)
+ for file_key, file_value in zip(file_keys, file_values):
+ alg_template_values_def[file_key] = file_value
+
+ alg_template_values = {**alg_template_values_def, **file_extra_variables}
+
+ file_path_def = fill_tags2string(file_path_raw, alg_template_tags, alg_template_values)
+ file_path_obj.append(file_path_def)
+ else:
+
+ file_path_obj = fill_tags2string(file_path_raw, alg_template_tags, alg_template_values_raw)
+
+ return file_path_obj
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to collect section information
+ def collect_section_list(self, columns_tag_in=None, columns_tag_out=None):
+
+ if columns_tag_in is None:
+ columns_tag_in = ['section_domain', 'section_name']
+ if columns_tag_out is None:
+ columns_tag_out = ['section_domain', 'section_name']
+
+ section_data_collection = self.section_data_collection
+
+ section_dict = {}
+ for columns_in_step, columns_out_step in zip(columns_tag_in, columns_tag_out):
+ section_data_step = section_data_collection[columns_in_step].values.tolist()
+ section_dict[columns_out_step] = section_data_step
+ return section_dict
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to clean dynamic tmp
+ def clean_dynamic_tmp(self):
+
+ flag_cleaning_tmp = self.flag_cleaning_dynamic_tmp
+
+ file_path_anc_source = self.file_path_anc_source
+ file_path_anc_analysis = self.file_path_anc_analysis
+ file_path_anc_destination = self.file_path_anc_destination
+
+ file_path_anc_list = [file_path_anc_source, file_path_anc_analysis, file_path_anc_destination]
+ if flag_cleaning_tmp:
+ for file_path_anc_step in file_path_anc_list:
+ if os.path.exists(file_path_anc_step):
+ os.remove(file_path_anc_step)
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to dump dynamic data
+ def dump_dynamic_data(self, analysis_datasets_collections, analysis_warnings_collections):
+
+ time_exec = self.time_exec
+ time_run = self.time_run
+
+ exec_name_list = self.exec_name_list
+ outlet_name_list = self.outlet_name_list
+ exec_collections = self.execution_data_collection
+
+ warnings_structure = self.warnings_structure
+
+ file_path_anc = self.file_path_anc_destination
+ file_path_dest_summary = self.file_path_dest_summary
+ file_path_dest_warnings = self.file_path_dest_warnings
+
+ flag_clean_anc = self.flag_cleaning_dynamic_destination
+
+ logging.info(' ---> Dump dynamic datasets [' + str(time_run) + '] ... ')
+
+ if flag_clean_anc:
+ if os.path.exists(file_path_anc):
+ os.remove(file_path_anc)
+
+ summary_collections = {}
+ for exec_name_step in exec_name_list:
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... ')
+ summary_collections[exec_name_step] = {}
+
+ attrs_exec = exec_collections[exec_collections.index == exec_name_step].to_dict('r')[0]
+
+ data_analysis = analysis_datasets_collections[exec_name_step][self.tag_run_datasets_section]
+
+ if data_analysis is not None:
+
+ attrs_analysis = analysis_datasets_collections[exec_name_step][self.tag_run_datasets_attrs]
+ attrs_analysis[self.tag_run_ref_start] = analysis_datasets_collections[exec_name_step][self.tag_run_ref_start]
+ attrs_analysis[self.tag_run_ref_end] = analysis_datasets_collections[exec_name_step][self.tag_run_ref_end]
+ attrs_analysis[self.tag_run_ref_elapsed] = analysis_datasets_collections[exec_name_step][self.tag_run_ref_elapsed]
+
+ summary_datasets = {}
+ for section_name, section_data in data_analysis.items():
+
+ logging.info(' -----> Section ' + section_name + ' ... ')
+
+ attrs_datasets = {**attrs_analysis, **attrs_exec}
+
+ for time_step, data_step in section_data.items():
+
+ logging.info(' ------> Time ' + str(time_step) + ' ... ')
+
+ max_alert_value = data_step[self.tag_summary_alert_value]
+ max_alert_idx = data_step[self.tag_summary_alert_index]
+ max_alarm_value = data_step[self.tag_summary_alarm_value]
+ max_alarm_idx = data_step[self.tag_summary_alarm_index]
+ thr_alert_value = data_step[self.tag_summary_alert_thr]
+ thr_alarm_value = data_step[self.tag_summary_alarm_thr]
+
+ if time_step not in list(summary_datasets.keys()):
+ summary_datasets[time_step] = {}
+
+ summary_datasets[time_step][self.tag_summary_alert_value] = [max_alert_value]
+ summary_datasets[time_step][self.tag_summary_alert_index] = [max_alert_idx]
+ summary_datasets[time_step][self.tag_summary_alert_section] = [section_name]
+ summary_datasets[time_step][self.tag_summary_alert_thr] = [thr_alert_value]
+
+ summary_datasets[time_step][self.tag_summary_alarm_value] = [max_alarm_value]
+ summary_datasets[time_step][self.tag_summary_alarm_index] = [max_alarm_idx]
+ summary_datasets[time_step][self.tag_summary_alarm_section] = [section_name]
+ summary_datasets[time_step][self.tag_summary_alarm_thr] = [thr_alarm_value]
+
+ summary_datasets[time_step][self.tag_summary_run_type] = [exec_name_step]
+
+ else:
+
+ tmp_datasets = deepcopy(summary_datasets[time_step])
+
+ tmp_alert_value = tmp_datasets[self.tag_summary_alert_value]
+ tmp_alert_idx = tmp_datasets[self.tag_summary_alert_index]
+ tmp_alert_section = tmp_datasets[self.tag_summary_alert_section]
+ tmp_alert_thr = tmp_datasets[self.tag_summary_alert_thr]
+ tmp_alert_value.append(max_alert_value)
+ tmp_alert_idx.append(max_alert_idx)
+ tmp_alert_section.append(section_name)
+ tmp_alert_thr.append(thr_alert_value)
+
+ summary_datasets[time_step][self.tag_summary_alert_value] = tmp_alert_value
+ summary_datasets[time_step][self.tag_summary_alert_index] = tmp_alert_idx
+ summary_datasets[time_step][self.tag_summary_alert_section] = tmp_alert_section
+ summary_datasets[time_step][self.tag_summary_alert_thr] = tmp_alert_thr
+
+ tmp_alarm_value = tmp_datasets[self.tag_summary_alarm_value]
+ tmp_alarm_idx = tmp_datasets[self.tag_summary_alarm_index]
+ tmp_alarm_section = tmp_datasets[self.tag_summary_alarm_section]
+ tmp_alarm_thr = tmp_datasets[self.tag_summary_alarm_thr]
+ tmp_alarm_value.append(max_alarm_value)
+ tmp_alarm_idx.append(max_alarm_idx)
+ tmp_alarm_section.append(section_name)
+ tmp_alarm_thr.append(thr_alarm_value)
+
+ summary_datasets[time_step][self.tag_summary_alarm_value] = tmp_alarm_value
+ summary_datasets[time_step][self.tag_summary_alarm_index] = tmp_alarm_idx
+ summary_datasets[time_step][self.tag_summary_alarm_section] = tmp_alarm_section
+ summary_datasets[time_step][self.tag_summary_alarm_thr] = tmp_alarm_thr
+
+ tmp_run_type = tmp_datasets[self.tag_summary_run_type]
+ tmp_run_type.append(exec_name_step)
+ summary_datasets[time_step][self.tag_summary_run_type] = tmp_run_type
+
+ logging.info(' ------> Time ' + str(time_step) + ' ... DONE')
+
+ logging.info(' -----> Section ' + section_name + ' ... DONE')
+
+ else:
+ summary_datasets = None
+ attrs_datasets = deepcopy(attrs_exec)
+
+ summary_collections[exec_name_step][self.tag_data] = {}
+ summary_collections[exec_name_step][self.tag_data] = summary_datasets
+ summary_collections[exec_name_step][self.tag_info] = {}
+ summary_collections[exec_name_step][self.tag_info] = attrs_datasets
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... DONE')
+
+ # Save summary bulletin
+ folder_name_dest_summary, file_name_dest_summary = os.path.split(file_path_dest_summary)
+ make_folder(folder_name_dest_summary)
+ logging.info(' ----> Save summary bulletin ' + file_name_dest_summary + ' ... ')
+
+ write_file_summary(time_run, time_exec, time_format='%Y-%m-%d %H:%M', html_name=file_path_dest_summary,
+ run_name=exec_name_list, run_summary=summary_collections,
+ tag_summary_data=self.tag_data, tag_summary_info=self.tag_info,
+ tag_alert_section=self.tag_summary_alert_section,
+ tag_alert_value=self.tag_summary_alert_value,
+ tag_alert_index=self.tag_summary_alert_index,
+ tag_alarm_section=self.tag_summary_alarm_section,
+ tag_alarm_value=self.tag_summary_alarm_value,
+ tag_alarm_index=self.tag_summary_alarm_index)
+
+ logging.info(' ----> Save summary bulletin ' + file_name_dest_summary + ' ... DONE')
+
+ # Save warnings bulletin
+ warnings_collections = {}
+ for warnings_key, warnings_data in analysis_warnings_collections.items():
+ warnings_collections[warnings_key] = {}
+
+ for summary_key, summary_fields in warnings_structure.items():
+
+ key_ref = summary_fields['key_ref']
+ key_list = summary_fields['key_list']
+
+ values_ref = warnings_data[key_ref]
+ index_ref = int(values_ref.index(max(values_ref)))
+
+ warnings_collections[warnings_key][summary_key] = {}
+ for key_step in key_list:
+ value_step = warnings_data[key_step][index_ref]
+ warnings_collections[warnings_key][summary_key][key_step] = value_step
+
+ folder_name_dest_warnings, file_name_dest_warnings = os.path.split(file_path_dest_warnings)
+ make_folder(folder_name_dest_warnings)
+
+ logging.info(' ----> Save warnings bulletin ' + file_name_dest_warnings + ' ... ')
+
+ write_file_warnings(file_path_dest_warnings, warnings_collections, warnings_structure)
+
+ logging.info(' ----> Save warnings bulletin ' + file_name_dest_warnings + ' ... DONE')
+
+ logging.info(' ---> Dump dynamic datasets [' + str(time_run) + '] ... DONE')
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to define time period search
+ @staticmethod
+ def define_time_search(time_step, time_period=1, time_frequency='H', time_rounding='H', time_reverse=False):
+
+ time_step = time_step.floor(time_rounding)
+ time_range = pd.date_range(end=time_step, periods=time_period, freq=time_frequency)
+
+ if time_reverse:
+ time_range = time_range[::-1]
+
+ return time_range
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to analyze dynamic data
+ def analyze_dynamic_data(self, file_workspace):
+
+ time = self.time_run
+ exec_name_list = self.exec_name_list
+
+ exec_collections = self.execution_data_collection
+
+ file_path_anc = self.file_path_anc_analysis
+ flag_clean_anc = self.flag_cleaning_dynamic_analysis
+
+ logging.info(' ---> Analyze dynamic datasets [' + str(time) + '] ... ')
+
+ if flag_clean_anc:
+ if os.path.exists(file_path_anc):
+ os.remove(file_path_anc)
+
+ if not os.path.exists(file_path_anc):
+
+ analyze_warnings_collections = {}
+ analyze_datasets_collections = {}
+ for exec_name_step in exec_name_list:
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... ')
+
+ exec_data = exec_collections[exec_collections.index == exec_name_step]
+ exec_run_var_sim = exec_data[self.tag_run_var_sim].values[0]
+ exec_run_var_obs = exec_data[self.tag_run_var_obs].values[0]
+
+ analyze_datasets_collections[exec_name_step] = {}
+ if exec_name_step in list(file_workspace.keys()):
+
+ exec_workspace = file_workspace[exec_name_step]
+
+ exec_info_collection = deepcopy(exec_workspace[self.tag_info])
+ exec_data_collection = deepcopy(exec_workspace[self.tag_data])
+
+ if (exec_info_collection[self.tag_run_ref_start] is not None) and \
+ (exec_info_collection[self.tag_run_ref_start] is not None):
+
+ analysis_time_creation_start, analysis_time_creation_end, \
+ analysis_time_creation_elapsed = analyze_time_info(
+ exec_info_collection[self.tag_run_ref_start],
+ exec_info_collection[self.tag_run_ref_end],
+ tag_file_create=self.tag_file_time_create)
+
+ analysis_datasets_section, analysis_warnings_section, \
+ attrs_datasets_section = analyze_hydrograph_datasets(
+ exec_name_step, exec_data_collection,
+ tag_discharge_observed=exec_run_var_obs, tag_discharge_simulated=exec_run_var_sim,
+ tag_discharge_thr_alert=self.tag_summary_alert_thr,
+ tag_discharge_thr_alarm=self.tag_summary_alarm_thr,
+ tag_discharge_max_alert_value=self.tag_summary_alert_value,
+ tag_discharge_max_alert_index=self.tag_summary_alert_index,
+ tag_discharge_max_alarm_value=self.tag_summary_alarm_value,
+ tag_discharge_max_alarm_index=self.tag_summary_alarm_index,
+ tag_run_n=self.tag_run_n, tag_section_n=self.tag_section_n)
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... DONE')
+ else:
+ analysis_time_creation_start = None
+ analysis_time_creation_end = None
+ analysis_time_creation_elapsed = None
+ analysis_datasets_section = None
+ analysis_warnings_section = None
+ attrs_datasets_section = None
+ logging.info(' ----> Execution ' + exec_name_step + ' ... SKIPPED. Execution datasets are null')
+ else:
+ analysis_time_creation_start = None
+ analysis_time_creation_end = None
+ analysis_time_creation_elapsed = None
+ analysis_datasets_section = None
+ analysis_warnings_section = None
+ attrs_datasets_section = None
+ logging.info(' ----> Execution ' + exec_name_step + ' ... SKIPPED. Execution not available')
+
+ analyze_datasets_collections[exec_name_step][self.tag_run_ref_start] = analysis_time_creation_start
+ analyze_datasets_collections[exec_name_step][self.tag_run_ref_end] = analysis_time_creation_end
+ analyze_datasets_collections[exec_name_step][self.tag_run_ref_elapsed] = analysis_time_creation_elapsed
+ analyze_datasets_collections[exec_name_step][self.tag_run_datasets_section] = analysis_datasets_section
+ analyze_datasets_collections[exec_name_step][self.tag_run_datasets_attrs] = attrs_datasets_section
+
+ if analysis_warnings_section is not None:
+ for warnings_key, warnings_data in analysis_warnings_section.items():
+
+ if warnings_key not in list(analyze_warnings_collections.keys()):
+ analyze_warnings_collections[warnings_key] = {}
+
+ for var_key, var_value in warnings_data.items():
+ if var_key not in list(analyze_warnings_collections[warnings_key].keys()):
+ analyze_warnings_collections[warnings_key][var_key] = var_value
+ else:
+ warning_value_tmp = analyze_warnings_collections[warnings_key][var_key]
+ warning_value_tmp.extend(var_value)
+ analyze_warnings_collections[warnings_key][var_key] = warning_value_tmp
+
+ folder_name_anc, file_name_anc = os.path.split(file_path_anc)
+ make_folder(folder_name_anc)
+
+ analyze_collections = {self.tag_data: analyze_datasets_collections,
+ self.tag_warnings: analyze_warnings_collections}
+
+ write_obj(file_path_anc, analyze_collections)
+
+ else:
+
+ analyze_collections = read_obj(file_path_anc)
+ analyze_datasets_collections = analyze_collections[self.tag_data]
+ analyze_warnings_collections = analyze_collections[self.tag_warnings]
+
+ logging.info(' ---> Analyze dynamic datasets [' + str(time) + '] ... DONE')
+
+ return analyze_datasets_collections, analyze_warnings_collections
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to organize dynamic data
+ def organize_dynamic_data(self):
+
+ time = self.time_run
+
+ outlet_name_list = self.outlet_name_list
+ exec_name_list = self.exec_name_list
+
+ exec_collections = self.execution_data_collection
+
+ file_collections_src_ref_start = self.file_collections_src_ref_start
+ file_collections_src_ref_end = self.file_collections_src_ref_end
+
+ file_path_anc = self.file_path_anc_source
+ flag_clean_anc = self.flag_cleaning_dynamic_source
+
+ logging.info(' ---> Organize dynamic datasets [' + str(time) + '] ... ')
+
+ if flag_clean_anc:
+ if os.path.exists(file_path_anc):
+ os.remove(file_path_anc)
+
+ if not os.path.exists(file_path_anc):
+
+ file_workspace = {}
+ for exec_name_step in exec_name_list:
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... ')
+
+ file_path_src_ref_start = file_collections_src_ref_start[exec_name_step]
+ file_path_src_ref_end = file_collections_src_ref_end[exec_name_step]
+
+ exec_data = exec_collections[exec_collections.index == exec_name_step]
+ exec_time_period = exec_data[self.tag_time_period].values[0]
+ exec_time_frequency = exec_data[self.tag_time_frequency].values[0]
+ exec_time_rounding = exec_data[self.tag_time_rounding].values[0]
+
+ time_search = self.define_time_search(time,
+ time_period=exec_time_period, time_frequency=exec_time_frequency,
+ time_rounding=exec_time_rounding, time_reverse=True)
+
+ file_workspace[exec_name_step] = {}
+ file_workspace[exec_name_step][self.tag_info] = {}
+ file_workspace[exec_name_step][self.tag_data] = {}
+ file_workspace[exec_name_step][self.tag_info][self.tag_run_ref_start] = None
+ file_workspace[exec_name_step][self.tag_info][self.tag_time_ref_start] = None
+ file_workspace[exec_name_step][self.tag_info][self.tag_run_ref_end] = None
+ file_workspace[exec_name_step][self.tag_info][self.tag_time_ref_end] = None
+ file_workspace[exec_name_step][self.tag_data] = None
+
+ for time_step in time_search:
+
+ logging.info(' -----> Time ' + str(time_step) + ' ... ')
+
+ logging.info(' ------> Reference run_start datasets ... ')
+ if file_workspace[exec_name_step]['info']['run_start'] is None:
+ if file_path_src_ref_start.__len__() == 1:
+
+ file_path_src_start_raw = file_path_src_ref_start[0]
+ file_path_src_start_def = self.define_file_string(time_step, file_path_src_start_raw)
+
+ if file_path_src_start_def.endswith('.x'):
+ if os.path.exists(file_path_src_start_def):
+ file_info_start = read_file_execution_info(file_path_src_start_def)
+ else:
+ file_info_start = None
+ else:
+ logging.error(' ------> Reference run_start datasets ... FAILED')
+ raise NotImplementedError('Case not implemented in source reference type start')
+
+ file_check_start = all(elem is None for elem in list([file_info_start]))
+ if file_check_start:
+ file_insert_start = False
+ logging.info(' ------> Reference run_start datasets ... SKIPPED. Datasets not defined')
+ else:
+ file_insert_start = True
+ logging.info(' ------> Reference run_start datasets ... DONE')
+ else:
+ logging.error(' ------> Reference run_start datasets ... FAILED')
+ raise NotImplementedError('Case not implemented in source reference file start')
+ else:
+ logging.info(' ------> Reference run_start datasets ... SKIPPED. Datasets already selected')
+ file_insert_start = False
+
+ logging.info(' ------> Reference run_end datasets ... ')
+ if file_workspace[exec_name_step]['info']['run_end'] is None:
+ if file_path_src_ref_end.__len__() == 1:
+ logging.error(' ------> Reference run_end datasets ... FAILED')
+ raise NotImplementedError('Case not implemented in source reference file end')
+ elif file_path_src_ref_end.__len__() > 1:
+
+ if file_path_src_ref_end[0].endswith('.json'):
+
+ file_info_end = {}
+ file_data_end = {}
+ for outlet_name_step, file_path_src_end_raw in zip(outlet_name_list,
+ file_path_src_ref_end):
+
+ file_path_src_end_def = self.define_file_string(time_step, file_path_src_end_raw)
+
+ if os.path.exists(file_path_src_end_def):
+ file_info_src_ref_end = read_file_hydrograph_info(file_path_src_end_def)
+ file_data_src_ref_end = read_file_hydrograph_ts(file_path_src_end_def)
+ else:
+ file_info_src_ref_end = None
+ file_data_src_ref_end = None
+
+ file_info_end[outlet_name_step] = file_info_src_ref_end
+ file_data_end[outlet_name_step] = file_data_src_ref_end
+
+ file_check_end = all(elem is None for elem in list(file_info_end.values()))
+ if file_check_end:
+ file_insert_end = False
+ logging.info(' ------> Reference run_end datasets ... SKIPPED. Datasets not defined')
+ else:
+ file_insert_end = True
+ logging.info(' ------> Reference run_end datasets ... DONE')
+ else:
+ logging.error(' ------> Reference run_end datasets ... FAILED')
+ raise NotImplementedError('Case not implemented in source reference type end')
+ else:
+ logging.error(' ------> Reference run_end datasets ... FAILED')
+ raise NotImplementedError('Case not implemented in source reference file end')
+ else:
+ logging.info(' ------> Reference run_end datasets ... SKIPPED. Datasets already selected')
+ file_insert_end = False
+
+ # Store reference source information and datasets
+ if file_insert_start:
+ file_workspace[exec_name_step][self.tag_info][self.tag_run_ref_start] = file_info_start
+ file_workspace[exec_name_step][self.tag_info][self.tag_time_ref_start] = time_step
+ elif file_insert_end:
+ file_workspace[exec_name_step][self.tag_info][self.tag_run_ref_end] = file_info_end
+ file_workspace[exec_name_step][self.tag_info][self.tag_time_ref_end] = time_step
+ file_workspace[exec_name_step][self.tag_data] = file_data_end
+
+ logging.info(' -----> Time ' + str(time_step) + ' ... DONE')
+
+ logging.info(' ----> Execution ' + exec_name_step + ' ... DONE')
+
+ folder_name_anc, file_name_anc = os.path.split(file_path_anc)
+ make_folder(folder_name_anc)
+
+ write_obj(file_path_anc, file_workspace)
+
+ else:
+
+ file_workspace = read_obj(file_path_anc)
+
+ logging.info(' ---> Organize dynamic datasets [' + str(time) + '] ... DONE')
+
+ return file_workspace
+
+ # -------------------------------------------------------------------------------------
+
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/driver_data_io_static.py b/apps/Analyzer_Execution/driver_data_io_static.py
new file mode 100755
index 0000000..57b1cae
--- /dev/null
+++ b/apps/Analyzer_Execution/driver_data_io_static.py
@@ -0,0 +1,83 @@
+"""
+Class Features
+
+Name: driver_data_io_static
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+
+######################################################################################
+# Library
+import logging
+import os
+
+from lib_data_io_shapefile import read_file_section_data
+from lib_utils_exec import read_file_execution_data
+
+# Debug
+# import matplotlib.pylab as plt
+######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Class DriverStatic
+class DriverStatic:
+
+ # -------------------------------------------------------------------------------------
+ # Initialize class
+ def __init__(self, src_dict, dst_dict=None,
+ alg_ancillary=None, alg_template_tags=None,
+ flag_section_data='section_data', flag_execution_data='execution_data',
+ flag_domain_collections='domain_collection',
+ flag_cleaning_static=True):
+
+ self.src_dict = src_dict
+
+ self.flag_section_data = flag_section_data
+ self.flag_execution_data = flag_execution_data
+
+ self.flag_domain_collections = flag_domain_collections
+
+ self.alg_ancillary = alg_ancillary
+
+ self.alg_template_tags = alg_template_tags
+ self.file_name_tag = 'file_name'
+ self.folder_name_tag = 'folder_name'
+
+ self.folder_name_section = self.src_dict[self.flag_section_data][self.folder_name_tag]
+ self.file_name_section = self.src_dict[self.flag_section_data][self.file_name_tag]
+ self.file_path_section = os.path.join(self.folder_name_section, self.file_name_section)
+
+ self.execution_data = self.src_dict[self.flag_execution_data]
+
+ self.flag_cleaning_static = flag_cleaning_static
+
+ # -------------------------------------------------------------------------------------
+
+ # -------------------------------------------------------------------------------------
+ # Method to organize geographical data
+ def organize_static(self):
+
+ # Info start
+ logging.info(' ---> Organize static datasets ... ')
+
+ # Data collection object
+ dframe_collections = {}
+
+ # Read section dataset
+ dframe_section = read_file_section_data(self.file_path_section)
+ # Read execution dataset
+ dframe_execution = read_file_execution_data(self.execution_data)
+
+ # Collect datasets in a common object
+ dframe_collections[self.flag_section_data] = dframe_section
+ dframe_collections[self.flag_execution_data] = dframe_execution
+
+ # Info end
+ logging.info(' ---> Organize static datasets ... DONE')
+
+ return dframe_collections
+ # -------------------------------------------------------------------------------------
+
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/hat_runanalyzer_hmc_configuration_example.json b/apps/Analyzer_Execution/hat_runanalyzer_hmc_configuration_example.json
new file mode 100755
index 0000000..10d301f
--- /dev/null
+++ b/apps/Analyzer_Execution/hat_runanalyzer_hmc_configuration_example.json
@@ -0,0 +1,116 @@
+{
+ "algorithm":{
+ "flags": {
+ "cleaning_dynamic_source": false,
+ "cleaning_dynamic_analysis": false,
+ "cleaning_dynamic_destination": true,
+ "cleaning_dynamic_tmp": true
+ },
+ "template": {
+ "source_datetime": "%Y%m%d%H%M",
+ "source_sub_path_time": "%Y/%m/%d/%H",
+ "run_sub_path_datasets": "string_sub_path_datasets",
+ "run_sub_path_execution": "string_sub_path_execution",
+ "ancillary_datetime": "%Y%m%d%H%M",
+ "ancillary_sub_path_time": "%Y/%m/%d/%H",
+ "destination_datetime": "%Y%m%d%H%M",
+ "destination_sub_path_time": "%Y/%m/%d/",
+ "run_name": "string_name",
+ "section_name": "string_section_name",
+ "basin_name": "string_basin_name",
+ "domain_name": "string_domain_name"
+ },
+ "ancillary": {
+ "domain_name" : "marche"
+ },
+ "general": {
+ "title": "HAT - Run analyzer to control the execution of the HMC model",
+ "web-site": "",
+ "source": "Python library developed by CIMA Research Foundation",
+ "history": "1.5.0 [20210225]",
+ "project-info": "HAT - Hydrological Analysis Tool",
+ "algorithm": "Analysis tools developed by CIMA Research Foundation"
+ }
+ },
+ "time": {
+ "time_now": null,
+ "time_period": 36,
+ "time_frequency": "H",
+ "time_rounding": "H"
+ },
+ "data": {
+ "static": {
+ "section_data": {
+ "folder_name": "/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-ws/data/data_static/shapefile/",
+ "file_name": "fp_sections_marche.shp"
+ },
+ "execution_data": {
+ "id_01": {
+ "features": {"run_name": "weather_stations_realtime", "run_sub_path_execution": "deterministic", "run_description": "RUN OSSERVATO",
+ "run_sub_path_datasets": "collections/", "run_variable_sim": "discharge_simulated", "run_variable_obs": "discharge_observed" },
+ "time": {"time_period": 2, "time_frequency": "H", "time_rounding": "H"}
+ },
+ "id_02": {
+ "features": {"run_name": "nwp_ecmwf0100_realtime", "run_sub_path_execution": "deterministic", "run_description": "ECMWF DETERMINISTICO",
+ "run_sub_path_datasets": "collections/", "run_variable_sim": "discharge_simulated", "run_variable_obs": "discharge_observed" },
+ "time": {"time_period": 24, "time_frequency": "H", "time_rounding": "H"}
+ },
+ "id_03": {
+ "features": {"run_name": "rfarm_ecmwf0100_realtime", "run_sub_path_execution": "probabilistic_001", "run_description": "ECMWF PROBABILISTICO",
+ "run_sub_path_datasets": "probabilistic_ensemble/collections/", "run_variable_sim": "discharge_simulated_{:}", "run_variable_obs": "discharge_observed"},
+ "time": {"time_period": 24, "time_frequency": "H", "time_rounding": "H"}
+ },
+ "id_04": {
+ "features": {"run_name": "nwp_lami-2i_realtime", "run_sub_path_execution": "deterministic", "run_description": "LAMI DETERMINISTICO",
+ "run_sub_path_datasets": "collections/", "run_variable_sim": "discharge_simulated", "run_variable_obs": "discharge_observed"},
+ "time": {"time_period": 24, "time_frequency": "H", "time_rounding": "H"}
+ },
+ "id_05": {
+ "features": {"run_name": "rfarm_lami-2i_realtime", "run_sub_path_execution": "probabilistic_001","run_description": "LAMI PROBABILISTICO",
+ "run_sub_path_datasets": "probabilistic_ensemble/collections/", "run_variable_sim": "discharge_simulated_{:}", "run_variable_obs": "discharge_observed"},
+ "time": {"time_period": 24, "time_frequency": "H", "time_rounding": "H"}
+ }
+ }
+ },
+ "dynamic" : {
+ "source": {
+ "run_reference_start": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/run/{run_name}/{run_sub_path_execution}/exec/",
+ "file_name": "HMC_Model_V3_{run_name}.x"
+ },
+ "run_reference_end": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/{run_name}/{source_sub_path_time}/{run_sub_path_datasets}",
+ "file_name": "hydrograph_{section_name}_{basin_name}_{source_datetime}.json"
+ }
+ },
+ "ancillary": {
+ "run_source": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/ancillary/{ancillary_sub_path_time}",
+ "file_name": "run_analyzer_data_source_{ancillary_datetime}.workspace"
+ },
+ "run_analysis": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/ancillary/{ancillary_sub_path_time}",
+ "file_name": "run_analyzer_data_analysis_{ancillary_datetime}.workspace"
+ },
+ "run_destination": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/ancillary/{ancillary_sub_path_time}",
+ "file_name": "run_analyzer_data_destination_{ancillary_datetime}.workspace"
+ }
+ },
+ "destination": {
+ "run_summary": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/summary/{destination_sub_path_time}",
+ "file_name": "summary_simulations_marche.html"
+ },
+ "run_warnings": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/warnings/{destination_sub_path_time}",
+ "file_name": "warnings_simulations_marche.csv"
+ }
+ }
+ }
+ },
+ "log": {
+ "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/log/",
+ "file_name": "hat_runanalyzer_hmc_log.txt"
+ }
+}
diff --git a/apps/RunAnalyzer/hat_runanalyzer_hmc_main_realtime.sh b/apps/Analyzer_Execution/hat_runanalyzer_hmc_main_example.sh
similarity index 100%
rename from apps/RunAnalyzer/hat_runanalyzer_hmc_main_realtime.sh
rename to apps/Analyzer_Execution/hat_runanalyzer_hmc_main_example.sh
diff --git a/apps/Analyzer_Execution/lib_data_analysis.py b/apps/Analyzer_Execution/lib_data_analysis.py
new file mode 100644
index 0000000..468b193
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_data_analysis.py
@@ -0,0 +1,274 @@
+"""
+Library Features:
+
+Name: lib_data_analysis
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+#######################################################################################
+# Library
+import logging
+
+import pandas as pd
+from copy import deepcopy
+
+from lib_info_args import logger_name
+
+# Logging
+log_stream = logging.getLogger(logger_name)
+#######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Method to analyze data hydrograph
+def analyze_hydrograph_datasets(run_name, file_datasets,
+ tag_discharge_observed='discharge_observed',
+ tag_discharge_simulated='discharge_simulated',
+ tag_discharge_thr_alert='section_discharge_thr_alert',
+ tag_discharge_thr_alarm='section_discharge_thr_alarm',
+ tag_discharge_max_alert_value='discharge_alert_max_value',
+ tag_discharge_max_alert_index='discharge_alert_max_index',
+ tag_discharge_max_alarm_value='discharge_alarm_max_value',
+ tag_discharge_max_alarm_index='discharge_alarm_max_index',
+ tag_section_n='section_n', tag_run_n='run_n',
+ tag_run_type='run_type'):
+
+ section_n = file_datasets.__len__()
+
+ analysis_warnings_section = {}
+ analysis_datasets_section = {}
+ attrs_ts_collections = None
+ for section_tag, section_datasets in file_datasets.items():
+
+ logging.info(' ----> Analyze section ' + section_tag + ' ... ')
+
+ section_dframe = section_datasets[0]
+ section_attrs = section_datasets[1]
+
+ section_ts_vars = section_attrs['run_var'].split(',')
+ # data_thr_alert = float(section_attrs[tag_discharge_thr_alert])
+ # data_thr_alarm = float(section_attrs[tag_discharge_thr_alarm])
+
+ # DEBUG
+ data_thr_alert = 1
+ data_thr_alarm = 10
+
+ section_ts_time = section_dframe.index
+ section_ts_days = section_ts_time[1:].normalize().unique()
+
+ # Create analysis datasets section
+ if section_ts_vars.__len__() == 1:
+
+ run_n = 1
+
+ section_ts_collections = {}
+ for section_ts_step in section_ts_days:
+
+ section_ts_collections[section_ts_step] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_value] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_index] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_value] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_index] = {}
+
+ ts_day = section_ts_step.day
+ ts_month = section_ts_step.month
+ ts_year = section_ts_step.year
+
+ section_dframe_day = section_dframe.loc[(section_dframe.index.day == ts_day) &
+ (section_dframe.index.month == ts_month) &
+ (section_dframe.index.year == ts_year), :]
+
+ section_ts_day_sim = section_dframe_day[tag_discharge_simulated]
+ section_ts_day_obs = section_dframe_day[tag_discharge_observed]
+
+ section_ts_thr_alert = section_ts_day_sim.where(section_ts_day_sim > data_thr_alert)
+ section_ts_thr_alarm = section_ts_day_sim.where(section_ts_day_sim > data_thr_alarm)
+
+ section_ts_alert_max_value = section_ts_thr_alert.max(skipna=True)
+ section_ts_alert_max_index = section_ts_thr_alert.idxmax(skipna=True)
+
+ section_ts_alarm_max_value = section_ts_thr_alarm.max(skipna=True)
+ section_ts_alarm_max_index = section_ts_thr_alarm.idxmax(skipna=True)
+
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_value] = section_ts_alert_max_value
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_index] = section_ts_alert_max_index
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_value] = section_ts_alarm_max_value
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_index] = section_ts_alarm_max_index
+ section_ts_collections[section_ts_step][tag_discharge_thr_alert] = data_thr_alert
+ section_ts_collections[section_ts_step][tag_discharge_thr_alarm] = data_thr_alarm
+ section_ts_collections[section_ts_step][tag_run_type] = run_name
+
+ if attrs_ts_collections is None:
+ attrs_ts_collections = {tag_run_n: run_n, tag_section_n: section_n, tag_run_type: run_name}
+ else:
+
+ run_n = section_ts_vars.__len__()
+
+ section_ts_collections = {}
+ for section_ts_step in section_ts_days:
+
+ section_ts_collections[section_ts_step] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_value] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_index] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_value] = {}
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_index] = {}
+ section_ts_collections[section_ts_step][tag_discharge_thr_alert] = {}
+ section_ts_collections[section_ts_step][tag_discharge_thr_alarm] = {}
+
+ ts_day = section_ts_step.day
+ ts_month = section_ts_step.month
+ ts_year = section_ts_step.year
+
+ section_ts_alert_max_value_list = []
+ section_ts_alert_max_index_list = []
+ section_ts_alarm_max_value_list = []
+ section_ts_alarm_max_index_list = []
+ for section_ts_var in section_ts_vars:
+
+ tag_discharge_simulated_var = tag_discharge_simulated.format(section_ts_var)
+ tag_discharge_observed_var = tag_discharge_observed.format(section_ts_var)
+
+ section_dframe_day = section_dframe.loc[(section_dframe.index.day == ts_day) &
+ (section_dframe.index.month == ts_month) &
+ (section_dframe.index.year == ts_year), :]
+
+ section_ts_sim_var = section_dframe_day[tag_discharge_simulated_var]
+ section_ts_obs_var = section_dframe_day[tag_discharge_observed_var]
+
+ section_ts_thr_alert_var = section_ts_sim_var.where(section_ts_sim_var > data_thr_alert)
+ section_ts_thr_alarm_var = section_ts_sim_var.where(section_ts_sim_var > data_thr_alarm)
+
+ section_ts_alert_max_value_var = section_ts_thr_alert_var.max(skipna=True)
+ section_ts_alert_max_index_var = section_ts_thr_alert_var.idxmax(skipna=True)
+
+ section_ts_alarm_max_value_var = section_ts_thr_alarm_var.max(skipna=True)
+ section_ts_alarm_max_index_var = section_ts_thr_alarm_var.idxmax(skipna=True)
+
+ if section_ts_alert_max_index_var not in section_ts_alert_max_index_list:
+ section_ts_alert_max_value_list.append(section_ts_alert_max_value_var)
+ section_ts_alert_max_index_list.append(section_ts_alert_max_index_var)
+ if section_ts_alarm_max_index_var not in section_ts_alarm_max_index_list:
+ section_ts_alarm_max_value_list.append(section_ts_alarm_max_value_var)
+ section_ts_alarm_max_index_list.append(section_ts_alarm_max_index_var)
+
+ section_ts_thr_alert_max = pd.Series([section_ts_alert_max_value_list])
+ section_ts_thr_alert_max.index = section_ts_alert_max_index_list
+ section_ts_thr_alarm_max = pd.Series([section_ts_alarm_max_value_list])
+ section_ts_thr_alarm_max.index = section_ts_alarm_max_index_list
+
+ if section_ts_thr_alert_max.shape[0] == 1:
+ section_ts_alert_max_value = section_ts_thr_alert_max.values[0]
+ section_ts_alert_max_index = section_ts_thr_alert_max.index[0]
+ else:
+ section_ts_alert_max_value = section_ts_thr_alert_max.max(skipna=True)
+ section_ts_alert_max_index = section_ts_thr_alert_max.idxmax(skipna=True)
+
+ if section_ts_thr_alarm_max.shape[0] == 1:
+ section_ts_alarm_max_value = section_ts_thr_alarm_max.values[0]
+ section_ts_alarm_max_index = section_ts_thr_alarm_max.index[0]
+ else:
+ section_ts_alarm_max_value = section_ts_thr_alarm_max.max(skipna=True)
+ section_ts_alarm_max_index = section_ts_thr_alarm_max.idxmax(skipna=True)
+
+ if isinstance(section_ts_alert_max_value, list):
+ if section_ts_alert_max_value.__len__() == 1:
+ section_ts_alert_max_value = section_ts_alert_max_value[0]
+ else:
+ raise NotImplementedError('Analysis hydrograph format max alert value in unsupported format')
+
+ if isinstance(section_ts_alarm_max_value, list):
+ if section_ts_alarm_max_value.__len__() == 1:
+ section_ts_alarm_max_value = section_ts_alarm_max_value[0]
+ else:
+ raise NotImplementedError('Analysis hydrograph format max alarm value in unsupported format')
+
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_value] = section_ts_alert_max_value
+ section_ts_collections[section_ts_step][tag_discharge_max_alert_index] = section_ts_alert_max_index
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_value] = section_ts_alarm_max_value
+ section_ts_collections[section_ts_step][tag_discharge_max_alarm_index] = section_ts_alarm_max_index
+ section_ts_collections[section_ts_step][tag_discharge_thr_alert] = data_thr_alert
+ section_ts_collections[section_ts_step][tag_discharge_thr_alarm] = data_thr_alarm
+ section_ts_collections[section_ts_step][tag_run_type] = run_name
+
+ if attrs_ts_collections is None:
+ attrs_ts_collections = {tag_run_n: run_n, tag_section_n: section_n, tag_run_type: run_name}
+
+ # Create analysis warning section
+ if section_tag not in list(analysis_warnings_section.keys()):
+ analysis_warnings_section[section_tag] = {}
+ for section_time, section_data in section_ts_collections.items():
+ for section_key, section_value in section_data.items():
+ if section_key in list(analysis_warnings_section[section_tag].keys()):
+ section_value_tmp = deepcopy(analysis_warnings_section[section_tag][section_key])
+ section_value_tmp.append(section_value)
+ analysis_warnings_section[section_tag][section_key] = section_value_tmp
+ else:
+ analysis_warnings_section[section_tag][section_key] = {}
+ analysis_warnings_section[section_tag][section_key] = [section_value]
+
+ analysis_datasets_section[section_tag] = section_ts_collections
+
+ logging.info(' ----> Analyze section ' + section_tag + ' ... DONE')
+
+ return analysis_datasets_section, analysis_warnings_section, attrs_ts_collections
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to analyze time information
+def analyze_time_info(file_info_start, file_info_end, tag_file_create='file_create'):
+
+ time_creation_start = define_time_creation(
+ file_info_start, tag_file_create=tag_file_create)
+ time_creation_end = define_time_creation(
+ file_info_end, tag_file_create=tag_file_create)
+
+ time_creation_elapsed = time_creation_end[0] - time_creation_start[0]
+
+ if isinstance(time_creation_start, list) and (time_creation_start.__len__() == 1):
+ time_creation_start = time_creation_start[0]
+ else:
+ raise NotImplementedError('Time creation start format not supported yet')
+
+ if isinstance(time_creation_end, list) and (time_creation_end.__len__() == 1):
+ time_creation_end = time_creation_end[0]
+ else:
+ raise NotImplementedError('Time creation end format not supported yet')
+
+ if time_creation_start > time_creation_end:
+ time_creation_end = None
+ time_creation_elapsed = None
+ elif time_creation_start <= time_creation_end:
+ pass
+ else:
+ raise NotImplementedError('Analyze time information case not implemented yet')
+
+ return time_creation_start, time_creation_end, time_creation_elapsed
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to define file creation
+def define_time_creation(file_info, tag_file_create='file_create'):
+
+ if isinstance(file_info, dict):
+
+ if tag_file_create in list(file_info.keys()):
+ file_time_creation = [file_info[tag_file_create]]
+ else:
+ file_time_list = []
+ for file_values in file_info.values():
+ if tag_file_create in list(file_values.keys()):
+ file_time_step = file_values[tag_file_create]
+ file_time_list.append(file_time_step)
+
+ file_time_idx = pd.DatetimeIndex(file_time_list)
+ file_time_creation = [file_time_idx.max()]
+ else:
+ raise NotImplementedError('File info obj is not defined by known format')
+
+ return file_time_creation
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_data_io_ascii.py b/apps/Analyzer_Execution/lib_data_io_ascii.py
new file mode 100644
index 0000000..bcb360f
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_data_io_ascii.py
@@ -0,0 +1,120 @@
+"""
+Library Features:
+
+Name: lib_data_io_ascii
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+#######################################################################################
+# Library
+import logging
+import os
+import time
+import json
+import difflib
+
+import numpy as np
+import pandas as pd
+
+from lib_info_args import logger_name
+
+# Logging
+log_stream = logging.getLogger(logger_name)
+#######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Method to select data warnings
+def select_data_warnings(file_dframe, file_section_col, file_section_value, file_section_name, file_fields=None):
+
+ section_dframe_raw = file_dframe.loc[file_dframe[file_section_col] == file_section_name, file_fields]
+
+ section_dframe_unique = section_dframe_raw.drop_duplicates(keep='first')
+ section_dframe_cols = section_dframe_unique.columns
+
+ section_dict = {}
+ if section_dframe_unique.shape[0] == 1:
+
+ for section_dframe_col in section_dframe_cols:
+ section_dframe_val = section_dframe_unique[section_dframe_col].values
+ section_dict[section_dframe_col] = section_dframe_val
+ else:
+
+ section_dframe_unique = section_dframe_unique.reset_index()
+ section_dframe_idx = section_dframe_unique[file_section_value].idxmax()
+
+ if not np.isnan(section_dframe_idx):
+ section_dframe_select = section_dframe_unique.iloc[section_dframe_idx]
+
+ for section_dframe_col in section_dframe_cols:
+ section_dframe_val = section_dframe_select[section_dframe_col]
+ section_dict[section_dframe_col] = section_dframe_val
+ else:
+ section_dict = None
+
+ return section_dict
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to write warnings ascii file
+def write_file_warnings(file_name, warnings_collections, warnings_structure,
+ tag_section_name='section_name',
+ no_time='NA', no_data=-9999.0, no_string='NA',
+ column_delimiter=','):
+
+ field_type_list = ['float', 'time_stamp', 'float', 'string']
+
+ warnings_header = None
+ warnings_datasets = None
+ for section_name, section_data in warnings_collections.items():
+
+ field_list = []
+ field_values = []
+
+ field_list.append(tag_section_name)
+ field_values.append(section_name)
+ for key, data in section_data.items():
+
+ for (field_key, field_value), field_type in zip(data.items(), field_type_list):
+ field_list.append(field_key)
+
+ if field_type == 'float':
+ if isinstance(field_value, (int, float)):
+ if not np.isnan(field_value):
+ field_value = str(float(field_value))
+ else:
+ field_value = str(no_data)
+ else:
+ field_value = str(no_data)
+ elif field_type == 'time_stamp':
+ if isinstance(field_value, pd.Timestamp):
+ field_value = field_value.strftime('%Y-%m-%d %H:%M')
+ else:
+ field_value = no_time
+ elif field_type == 'string':
+ if isinstance(field_value, str):
+ field_value = str(field_value)
+ else:
+ field_value = no_string
+ else:
+ raise NotImplementedError('Parse warnings case not implemented yet')
+
+ field_values.append(field_value)
+
+ if warnings_header is None:
+ warnings_header = column_delimiter.join(field_list)
+ if warnings_datasets is None:
+ warnings_datasets = []
+
+ warnings_datasets.append(column_delimiter.join(field_values))
+
+ with open(file_name, 'w') as file_handle:
+ warnings_header = warnings_header + '\n'
+ file_handle.write(warnings_header)
+ for warnings_row in warnings_datasets:
+ warnings_row = warnings_row + '\n'
+ file_handle.write(warnings_row)
+
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_data_io_html.py b/apps/Analyzer_Execution/lib_data_io_html.py
new file mode 100644
index 0000000..4b3ae5d
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_data_io_html.py
@@ -0,0 +1,311 @@
+# -------------------------------------------------------------------------------------
+# Libraries
+import numpy as np
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to slice list every n elements
+def slice_list(source, step):
+ return [source[i::step] for i in range(step)]
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to write an html summary file
+def write_file_summary(time_run, time_exec, time_format='%Y-%m-%d %H:%M', time_mode='LOCAL',
+ html_name='run_analyzer.html', run_summary=None,
+ run_name=None, tag_summary_data='data', tag_summary_info='info',
+ tag_run_start='run_start', tag_run_end='run_end', tag_run_datasets='run_datasets',
+ tag_alarm_section='alarm_section',
+ tag_alarm_value='alarm_value',
+ tag_alarm_index='alarm_index',
+ tag_alert_section='alert_section',
+ tag_alert_value='alert_value',
+ tag_alert_index='alert_index'):
+
+ if not isinstance(run_name, list):
+ run_name = [run_name]
+
+ with open(html_name, "w") as html_handle:
+
+ html_handle.write('\n')
+
+ html_handle.write('\n')
+
+ html_handle.write('
Execution Time: | ' +
+ time_exec.strftime(time_format) + ' ' + time_mode + ' |
\n')
+
+ html_handle.write(' Reference Time: | ' +
+ time_run.strftime(time_format) + ' ' + time_mode + ' |
\n')
+
+ html_handle.write('\n')
+ html_handle.write('\n')
+ html_handle.write('Run Configuration Regione Marche | \n')
+ html_handle.write('State | \n')
+ html_handle.write('TimeStart | \n')
+ html_handle.write('TimeEnd | \n')
+ html_handle.write('TimeElapsed | \n')
+ html_handle.write('Scenarios | \n')
+ html_handle.write('Sections | \n')
+ html_handle.write(' | \n')
+ html_handle.write(' OGGI gialla | \n')
+ html_handle.write(' OGGI gialla | \n')
+ html_handle.write(' DOMANI gialla | \n')
+ html_handle.write(' DOMANI gialla | \n')
+ html_handle.write(' | \n')
+ html_handle.write(' OGGI rossa | \n')
+ html_handle.write(' OGGI rossa | \n')
+ html_handle.write(' DOMANI rossa | \n')
+ html_handle.write(' DOMANI rossa | \n')
+ html_handle.write(' | \n')
+ html_handle.write('\n')
+ html_handle.write('\n')
+
+ for run_name_step in run_name:
+
+ run_data = run_summary[run_name_step][tag_summary_data]
+ run_info = run_summary[run_name_step][tag_summary_info]
+
+ if 'run_n' in list(run_info.keys()):
+ run_n = str(run_info['run_n'])
+ else:
+ run_n = 'NA'
+ if 'run_description' in list(run_info.keys()):
+ run_description = str(run_info['run_description'])
+ else:
+ run_description = 'NA'
+ if 'section_n' in list(run_info.keys()):
+ section_n = str(run_info['section_n'])
+ else:
+ section_n = 'NA'
+
+ html_handle.write('')
+
+ if run_data is not None:
+
+ time_stamp_start = run_info['run_start']
+ if time_stamp_start is not None:
+ time_str_start = time_stamp_start.strftime(time_format)
+ else:
+ time_str_start = 'NA'
+ time_stamp_end = run_info['run_end']
+ if time_stamp_end is not None:
+ time_str_end = time_stamp_end.strftime(time_format)
+ else:
+ time_str_end = 'NA'
+ time_stamp_elapsed = run_info['run_elapsed']
+ if time_stamp_elapsed is not None:
+ time_str_elapsed = str(time_stamp_elapsed)
+ else:
+ time_str_elapsed = 'NA'
+
+ time_str_end = '2021-02-25 12:49'
+
+ # CASE CONDITIONS
+ if time_str_start != 'NA' and time_str_end != 'NA':
+
+ section_alert_collections = {}
+ section_alarm_collections = {}
+ for id_step, (time_step, data_step) in enumerate(run_data.items()):
+
+ alert_value_raw = data_step[tag_alert_value]
+ alert_index_raw = data_step[tag_alert_index]
+ alert_section_raw = data_step[tag_alert_section]
+ alarm_value_raw = data_step[tag_alarm_value]
+ alarm_index_raw = data_step[tag_alarm_index]
+ alarm_section_raw = data_step[tag_alarm_section]
+
+ alert_idx = [i for i, n in enumerate(alert_value_raw) if np.isfinite(n)]
+ alarm_idx = [i for i, n in enumerate(alarm_value_raw) if np.isfinite(n)]
+
+ alert_value_def = []
+ alert_index_def = []
+ alert_section_def = []
+ for idx in alert_idx:
+ alert_value_def.append(alert_value_raw[idx])
+ alert_index_def.append(alert_index_raw[idx])
+ alert_section_def.append(alert_section_raw[idx])
+ section_alert_collections[id_step] = {}
+ section_alert_collections[id_step][tag_alert_section] = {}
+ section_alert_collections[id_step][tag_alert_section] = alert_section_def
+ section_alert_collections[id_step][tag_alert_value] = {}
+ section_alert_collections[id_step][tag_alert_value] = alert_value_def
+ section_alert_collections[id_step][tag_alert_index] = {}
+ section_alert_collections[id_step][tag_alert_index] = alert_index_def
+
+ alarm_value_def = []
+ alarm_index_def = []
+ alarm_section_def = []
+ for idx in alarm_idx:
+ alarm_value_def.append(alarm_value_raw[idx])
+ alarm_index_def.append(alarm_index_raw[idx])
+ alarm_section_def.append(alarm_section_raw[idx])
+ section_alarm_collections[id_step] = {}
+ section_alarm_collections[id_step][tag_alarm_section] = {}
+ section_alarm_collections[id_step][tag_alarm_section] = alarm_section_def
+ section_alarm_collections[id_step][tag_alarm_value] = {}
+ section_alarm_collections[id_step][tag_alarm_value] = alarm_value_def
+ section_alarm_collections[id_step][tag_alarm_index] = {}
+ section_alarm_collections[id_step][tag_alarm_index] = alarm_index_def
+
+ if (alert_section_def.__len__() == 0) and (alarm_section_def.__len__() == 0):
+
+ html_handle.write('' + run_description + ' | ')
+ html_handle.write(' COMPLETED | ')
+ html_handle.write('' + time_str_start + ' ' + time_mode + ' | ')
+ html_handle.write('' + time_str_end + ' ' + time_mode + ' | ')
+ html_handle.write('' + time_str_elapsed + ' | ')
+ html_handle.write('' + run_n + ' | ')
+ html_handle.write('' + section_n + ' | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+
+ else:
+
+ # CASE COMPLETED
+ html_handle.write('' + run_description + ' | ')
+ html_handle.write(' COMPLETED | ')
+ html_handle.write('' + time_str_start + ' ' + time_mode + ' | ')
+ html_handle.write('' + time_str_end + ' ' + time_mode + ' | ')
+ html_handle.write('' + time_str_elapsed + ' | ')
+ html_handle.write('' + run_n + ' | ')
+ html_handle.write('' + section_n + ' | ')
+ html_handle.write(' | ')
+
+ for id, data in section_alert_collections.items():
+ section_list = data[tag_alert_section]
+ idx_list = data[tag_alert_index]
+ value_list = data[tag_alert_value]
+ section_n_alert = str(section_list.__len__())
+
+ alert_group = ''
+ for section_step, idx_step, value_step in zip(section_list, idx_list, value_list):
+ alert_string = section_step + ' :: ' + str(idx_step.strftime('%H')) + ' :: ' + str(value_step)
+ alert_group += alert_string + '\n'
+ alert_group = alert_group.replace("\n", "
\n")
+
+ # section_groups = slice_list(section_list, 5)
+ # section_cell = '\n'.join(' '.join(sub) for sub in section_groups)
+
+ # alert today
+ if id == 0:
+ if section_list.__len__() > 0:
+ html_handle.write('' + section_n_alert + '/' + section_n)
+ html_handle.write(' | ' + alert_group + ' | ')
+ else:
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+
+ # alert tomorrow
+ if id == 1:
+ if section_list:
+ html_handle.write('' + section_n_alert + '/' + section_n)
+ html_handle.write(' | ' + alert_group + ' | ')
+ else:
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+
+ for id, data in section_alarm_collections.items():
+ section_list = data[tag_alarm_section]
+ idx_list = data[tag_alarm_index]
+ value_list = data[tag_alarm_value]
+ section_n_alert = str(section_list.__len__())
+
+ alarm_group = ''
+ for section_step, idx_step, value_step in zip(section_list, idx_list, value_list):
+ alarm_string = section_step + ' :: ' + str(idx_step.strftime('%H')) + ' :: ' + str(value_step)
+ alarm_group += alarm_string + '\n'
+ alert_group = alert_group.replace("\n", "
\n")
+
+ # alert today
+ if id == 0:
+ if section_list:
+ html_handle.write('' + section_n_alert + '/' + section_n)
+ html_handle.write(' | ' + alarm_group + ' | ')
+ else:
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+
+ # alert tomorrow
+ if id == 1:
+ if section_list:
+ html_handle.write('' + section_n_alert + '/' + section_n)
+ html_handle.write(' | ' + alarm_group + ' | ')
+ else:
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+
+ elif time_str_start != 'NA' and time_str_end == 'NA':
+
+ # CASE RUNNING
+ html_handle.write('' + run_description + ' | ')
+ html_handle.write(' RUNNING ... | ')
+ html_handle.write('' + time_str_start + ' ' + time_mode + ' | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' NA | ')
+ html_handle.write('' + run_n + ' | ')
+ html_handle.write('' + section_n + ' | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+
+ else:
+
+ html_handle.write('' + run_description + ' | ')
+ html_handle.write(' RUN NOT AVAILABLE | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' NA | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write('- | ')
+ html_handle.write(' | ')
+
+ html_handle.write('
')
+
+ html_handle.write('')
+ html_handle.write('')
+
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_data_io_json.py b/apps/Analyzer_Execution/lib_data_io_json.py
new file mode 100755
index 0000000..e938121
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_data_io_json.py
@@ -0,0 +1,156 @@
+"""
+Library Features:
+
+Name: lib_data_io_json
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+#######################################################################################
+# Library
+import logging
+import os
+import time
+import json
+import difflib
+
+import numpy as np
+import pandas as pd
+
+from lib_info_args import logger_name
+
+# Logging
+log_stream = logging.getLogger(logger_name)
+#######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Method to write file hydrograph
+def write_file_hydrograph_ts(file_name, file_dict_raw, file_indent=4, file_sep=','):
+
+ file_dict_parser = {}
+ for file_key, file_value in file_dict_raw.items():
+ if isinstance(file_value, list):
+ file_value = [str(i) for i in file_value]
+ file_value = file_sep.join(file_value)
+ elif isinstance(file_value, (int, float)):
+ file_value = str(file_value)
+ elif isinstance(file_value, str):
+ pass
+ elif isinstance(file_value, np.ndarray):
+ file_value = [str(i) for i in file_value]
+ file_value = file_sep.join(file_value)
+ else:
+ log_stream.error(' ===> Error in parsering json time series')
+ raise RuntimeError('Parsering case not implemented yet')
+
+ file_dict_parser[file_key] = file_value
+
+ file_data = json.dumps(file_dict_parser, indent=file_indent, ensure_ascii=False, sort_keys=True)
+ with open(file_name, "w", encoding='utf-8') as file_handle:
+ file_handle.write(file_data)
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read file execution information
+def read_file_hydrograph_info(file_name,
+ tag_time_access='time_access', tag_time_modified='time_modified',
+ tag_time_create='time_create', tag_file_size='file_size'):
+
+ file_time_access = pd.Timestamp(time.ctime(os.path.getatime(file_name)))
+ file_time_modified = pd.Timestamp(time.ctime(os.path.getmtime(file_name)))
+ file_time_create = pd.Timestamp(time.ctime(os.path.getctime(file_name)))
+ file_size = os.path.getsize(file_name)
+
+ file_obj = {tag_time_access: file_time_access, tag_time_modified: file_time_modified,
+ tag_time_create: file_time_create, tag_file_size: file_size}
+
+ return file_obj
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read file hydrograph
+def read_file_hydrograph_ts(file_name, file_sep=',',
+ tag_time_in='time_period',
+ tag_discharge_obs_in='time_series_discharge_observed',
+ tag_discharge_sim_in='time_series_discharge_simulated',
+ tag_time_out='time_period',
+ tag_discharge_obs_out='discharge_observed',
+ tag_discharge_sim_out='discharge_simulated',
+ tag_index='time_period'):
+
+ if os.path.exists(file_name):
+ with open(file_name) as file_handle:
+ file_data = json.load(file_handle)
+ else:
+ log_stream.error(' ===> Error in reading hydrograph file ' + file_name)
+ raise IOError('File not found')
+
+ keys_list = list(file_data.keys())
+ tag_discharge_sim_in_select = []
+ tag_discharge_sim_out_select = []
+ for keys_step in keys_list:
+ if tag_discharge_sim_in in keys_step:
+ tag_discharge_sim_in_select.append(keys_step)
+
+ tag_diff_tmp = [li for li in difflib.ndiff(keys_step, tag_discharge_sim_in) if li[0] != ' ']
+ tag_diff_element = ''.join([tag_step[1:].strip() for tag_step in tag_diff_tmp])
+ tag_diff_idx = keys_step.find(tag_diff_element)
+
+ if tag_discharge_sim_out.__len__() == tag_diff_idx:
+ tag_discharge_sim_out_step = tag_discharge_sim_out[:tag_diff_idx] + tag_diff_element
+ elif tag_discharge_sim_out.__len__() > tag_diff_idx:
+ tag_discharge_sim_out_step = tag_discharge_sim_out + tag_diff_element
+ elif tag_discharge_sim_out.__len__() < tag_diff_idx:
+ tag_discharge_sim_out_step = tag_discharge_sim_out + tag_diff_element
+ else:
+ log_stream.error(' ===> Error in create tags for outcome variable')
+ raise NotImplementedError('Case not implemented yet')
+
+ tag_discharge_sim_out_select.append(tag_discharge_sim_out_step)
+
+ variable_list_in = [tag_time_in, tag_discharge_obs_in]
+ variable_list_in.extend(tag_discharge_sim_in_select)
+ variable_list_out = [tag_time_out, tag_discharge_obs_out]
+ variable_list_out.extend(tag_discharge_sim_out_select)
+
+ file_data_attrs = {}
+ file_data_dict = {}
+ for file_key, file_value in file_data.items():
+ if file_key in variable_list_in:
+ var_idx = variable_list_in.index(file_key)
+ var_name = variable_list_out[var_idx]
+ file_data_dict[var_name] = file_value
+ else:
+ file_data_attrs[file_key] = file_value
+
+ for file_key, file_value_tmp in file_data_dict.items():
+ file_list_tmp = file_value_tmp.split(file_sep)
+ if file_key == tag_time_out:
+ file_list_converted = pd.DatetimeIndex(file_list_tmp)
+ else:
+ file_list_converted = list(map(float, file_list_tmp))
+ file_data_dict[file_key] = file_list_converted
+
+ file_data_df = pd.DataFrame(data=file_data_dict)
+ if tag_index in list(file_data_df.columns):
+ file_data_df.set_index(tag_index, inplace=True)
+
+ return file_data_df, file_data_attrs
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read file settings
+def read_file_settings(file_name):
+ if os.path.exists(file_name):
+ with open(file_name) as file_handle:
+ file_data = json.load(file_handle)
+ else:
+ log_stream.error(' ===> Error in reading settings file ' + file_name)
+ raise IOError('File not found')
+ return file_data
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_data_io_shapefile.py b/apps/Analyzer_Execution/lib_data_io_shapefile.py
new file mode 100755
index 0000000..89e34cd
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_data_io_shapefile.py
@@ -0,0 +1,100 @@
+"""
+Class Features
+
+Name: lib_data_io_shapefile
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210225'
+Version: '1.0.0'
+"""
+
+#######################################################################################
+# Libraries
+import logging
+
+import pandas as pd
+import geopandas as gpd
+
+logging.getLogger('fiona').setLevel(logging.WARNING)
+logging.getLogger('geopandas').setLevel(logging.WARNING)
+
+# Debug
+# import matplotlib.pylab as plt
+#######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Method to find data section
+def find_file_section_data(section_df, section_name=None, basin_name=None,
+ tag_column_section_in='section_name', tag_column_basin_in='section_domain',
+ tag_column_section_out='section_name', tag_column_basin_out='basin_name'):
+
+ section_name_ref = section_name.lower()
+ basin_name_ref = basin_name.lower()
+
+ section_name_list = section_df[tag_column_section_in].values
+ basin_name_list = section_df[tag_column_basin_in].values
+
+ section_dict_tmp = {tag_column_section_in: section_name_list, tag_column_basin_in: basin_name_list}
+ section_df_tmp = pd.DataFrame(data=section_dict_tmp)
+ section_df_tmp = section_df_tmp.astype(str).apply(lambda x: x.str.lower())
+
+ point_idx = section_df_tmp[(section_df_tmp[tag_column_section_in] == section_name_ref) &
+ (section_df_tmp[tag_column_basin_in] == basin_name_ref)].index
+
+ if point_idx.shape[0] == 1:
+ point_idx = point_idx[0]
+ point_dict = section_df.iloc[point_idx, :].to_dict()
+
+ point_dict[tag_column_section_out] = point_dict.pop(tag_column_section_in)
+ point_dict[tag_column_basin_out] = point_dict.pop(tag_column_basin_in)
+
+ elif point_idx.shape[0] == 0:
+ raise IOError('Section selection failed; section not found')
+ else:
+ raise NotImplementedError('Section selection failed for unknown reason.')
+
+ return point_dict
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read shapefile section(s)
+def read_file_section_data(file_name, columns_name_expected_in=None, columns_name_expected_out=None, columns_name_type=None):
+
+ if columns_name_expected_in is None:
+ columns_name_expected_in = [
+ 'HMC_X', 'HMC_Y', 'LAT', 'LON', 'BASIN', 'SEC_NAME', 'SEC_RS', 'AREA', 'Q_THR1', 'Q_THR2']
+
+ if columns_name_expected_out is None:
+ columns_name_expected_out = [
+ 'hmc_idx_x', 'hmc_idx_y', 'latitude', 'longitude', 'section_domain', 'section_name', 'section_code',
+ 'section_drained_area', 'section_discharge_thr_alert', 'section_discharge_thr_alarm']
+
+ if columns_name_type is None:
+ columns_name_type = ['int', 'int', 'float', 'float',
+ 'str', 'str', 'str', 'float', 'float', 'float']
+
+ file_dframe_raw = gpd.read_file(file_name)
+ file_rows = file_dframe_raw.shape[0]
+
+ section_obj = {}
+ for column_name_in, column_name_out, column_type in zip(columns_name_expected_in,
+ columns_name_expected_out, columns_name_type):
+ if column_name_in in file_dframe_raw.columns:
+ column_data = file_dframe_raw[column_name_in].values.tolist()
+ else:
+ if column_type == 'int':
+ column_data = [-9999] * file_rows
+ elif column_type == 'str':
+ column_data = [''] * file_rows
+ elif column_type == 'float':
+ column_data = [-9999.0] * file_rows
+ else:
+ raise NotImplementedError('Datatype not implemented yet')
+
+ section_obj[column_name_out] = column_data
+
+ section_df = pd.DataFrame(data=section_obj)
+
+ return section_df
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_info_args.py b/apps/Analyzer_Execution/lib_info_args.py
new file mode 100755
index 0000000..aacb901
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_info_args.py
@@ -0,0 +1,32 @@
+"""
+Library Features:
+
+Name: lib_info_args
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210113'
+Version: '1.0.0'
+"""
+
+#######################################################################################
+# Library
+import pandas as pd
+#######################################################################################
+
+# -------------------------------------------------------------------------------------
+# Time information
+time_type = 'GMT' # 'GMT', 'local'
+time_units = 'days since 1858-11-17 00:00:00'
+time_calendar = 'gregorian'
+time_format_datasets = "%Y%m%d%H%M"
+time_format_algorithm = '%Y-%m-%d %H:%M'
+time_machine = pd.Timestamp.now
+
+# Logging information
+logger_name = 'hat_runanalyzer_logger'
+logger_file = 'hat_runanalyzer.txt'
+logger_handle = 'file' # 'file' or 'stream'
+logger_format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)-80s %(filename)s:[%(lineno)-6s - %(funcName)-20s()] '
+
+# Definition of zip extension
+zip_extension = '.gz'
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_utils_exec.py b/apps/Analyzer_Execution/lib_utils_exec.py
new file mode 100755
index 0000000..9acf9b8
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_utils_exec.py
@@ -0,0 +1,65 @@
+"""
+Library Features:
+
+Name: lib_utils_exec
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20210113'
+Version: '1.0.0'
+"""
+
+# -------------------------------------------------------------------------------------
+# Libraries
+import logging
+import os
+import time
+
+import pandas as pd
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read file execution information
+def read_file_execution_info(file_name,
+ tag_time_access='time_access', tag_time_modified='time_modified',
+ tag_time_create='time_create', tag_file_size='file_size'):
+
+ file_time_access = pd.Timestamp(time.ctime(os.path.getatime(file_name)))
+ file_time_modified = pd.Timestamp(time.ctime(os.path.getmtime(file_name)))
+ file_time_create = pd.Timestamp(time.ctime(os.path.getctime(file_name)))
+ file_size = os.path.getsize(file_name)
+
+ file_obj = {tag_time_access: file_time_access, tag_time_modified: file_time_modified,
+ tag_time_create: file_time_create, tag_file_size: file_size}
+
+ return file_obj
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read file execution data
+def read_file_execution_data(execution_data, tag_name='run_name'):
+ logging.info(' ---> Read execution data ... ')
+ collections_data = {}
+ collections_columns = None
+ for exec_id, exec_step in execution_data.items():
+ exec_key = exec_step['features'][tag_name]
+ collections_data[exec_key] = {}
+
+ data_list = []
+ columns_list = []
+ for exec_subkey in exec_step.keys():
+ for exec_type, exec_value in exec_step[exec_subkey].items():
+ columns_list.append(exec_type)
+ data_list.append(exec_value)
+
+ collections_data[exec_key] = data_list
+ if collections_columns is None:
+ collections_columns = columns_list
+
+ execution_df = pd.DataFrame.from_dict(collections_data, orient='index', columns=collections_columns)
+
+ logging.info(' ---> Read execution data ... DONE')
+
+ return execution_df
+
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_utils_io.py b/apps/Analyzer_Execution/lib_utils_io.py
new file mode 100755
index 0000000..f126140
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_utils_io.py
@@ -0,0 +1,180 @@
+# -------------------------------------------------------------------------------------
+# Libraries
+import logging
+import tempfile
+import os
+import time
+import json
+import pickle
+
+import numpy as np
+import pandas as pd
+
+from distutils.util import strtobool
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read discharge file
+def read_file_discharge(file_name, file_template=None, file_skiprows=4):
+
+ file_tmp = pd.read_table(file_name)
+ file_data = list(file_tmp.values)
+
+ collections_id = 0
+ obj_mod = None
+ obj_observed = None
+ obj_header = {}
+ for row_id, row_step in enumerate(file_data):
+ row_tmp = row_step[0]
+ if row_id < file_skiprows:
+ field_key, field_data = row_tmp.rstrip().split('=')
+
+ if field_key in list(file_template.keys()):
+ field_name = file_template[field_key]['name']
+ field_type = file_template[field_key]['type']
+ field_value = file_template[field_key]['value']
+ else:
+ field_name = field_key
+ field_type = None
+ field_value = None
+
+ if field_type is not None:
+ if field_type == 'time_stamp':
+ field_data = pd.Timestamp(field_data)
+ elif field_type == 'time_frequency':
+ field_data = pd.Timedelta(field_data)
+ elif field_type == 'int':
+ field_data = np.int(field_data)
+
+ if field_value is not None:
+ field_data = field_value
+
+ obj_header[field_name] = field_data
+ elif row_id == file_skiprows:
+ columns_values = row_tmp.rstrip().split(' ')
+ array_observed = np.array(columns_values, dtype=np.float)
+ array_observed[array_observed < 0.0] = np.nan
+ time_n = array_observed.shape[0]
+
+ if obj_observed is None:
+ obj_observed = np.zeros(shape=(time_n, 1))
+ obj_observed[:, :] = np.nan
+ obj_observed[:, 0] = array_observed
+
+ elif row_id >= file_skiprows:
+
+ columns_values = row_tmp.rstrip().split(' ')
+ array_values = np.array(columns_values, dtype=np.float)
+ array_values[array_values < 0.0] = np.nan
+ time_n = array_values.shape[0]
+
+ if obj_mod is None:
+ obj_mod = np.zeros(shape=(time_n , obj_header['scenario_n']))
+ obj_mod[:, :] = np.nan
+ obj_mod[:, collections_id] = array_values
+
+ collections_id += 1
+
+ return obj_header, obj_mod, obj_observed
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read warning file
+def read_file_warning(file_name, file_template=None, file_skiprows=4, tag_data='section_{:}'):
+
+ file_tmp = pd.read_table(file_name)
+ file_data = list(file_tmp.values)
+
+ file_time_modified = pd.Timestamp(time.ctime(os.path.getmtime(file_name)))
+ file_time_created = pd.Timestamp(time.ctime(os.path.getctime(file_name)))
+
+ collections_id = 0
+ file_header = {}
+ file_collections = {}
+ for row_id, row_step in enumerate(file_data):
+ row_tmp = row_step[0]
+ if row_id < file_skiprows:
+ field_key, field_data = row_tmp.rstrip().split('=')
+
+ if field_key in list(file_template.keys()):
+ field_name = file_template[field_key]['name']
+ field_type = file_template[field_key]['type']
+ field_value = file_template[field_key]['value']
+ else:
+ field_name = field_key
+ field_type = None
+ field_value = None
+
+ if field_type is not None:
+ if field_type == 'time_stamp':
+ field_data = pd.Timestamp(field_data)
+ elif field_type == 'time_frequency':
+ field_data = pd.Timedelta(field_data)
+ elif field_type == 'int':
+ field_data = np.int(field_data)
+
+ if field_value is not None:
+ field_data = field_value
+
+ file_header[field_name] = field_data
+ elif row_id == file_skiprows:
+ columns_name = row_tmp.rstrip().split(' ')
+ columns_type = [str, str, float, float, bool, float, bool]
+ elif row_id > file_skiprows:
+ columns_values = row_tmp.rstrip().split(' ')
+ file_collections[tag_data.format(collections_id)] = {}
+
+ for column_name, column_value, column_type in zip(columns_name, columns_values, columns_type):
+ file_collections[tag_data.format(collections_id)][column_name] = {}
+
+ if column_type is float:
+ column_value = np.float(column_value)
+ elif column_type is bool:
+ column_value = strtobool(column_value.lower())
+ elif column_type is str:
+ column_value = column_value.strip()
+ file_collections[tag_data.format(collections_id)][column_name] = column_value
+
+ collections_id += 1
+
+ file_header['time_file_created'] = file_time_created
+ file_header['time_file_modified'] = file_time_modified
+
+ return file_header, file_collections
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to create a tmp name
+def create_filename_tmp(prefix='tmp_', suffix='.tiff', folder=None):
+
+ if folder is None:
+ folder = '/tmp'
+
+ with tempfile.NamedTemporaryFile(dir=folder, prefix=prefix, suffix=suffix, delete=False) as tmp:
+ temp_file_name = tmp.name
+ return temp_file_name
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to read data obj
+def read_obj(filename):
+ if os.path.exists(filename):
+ data = pickle.load(open(filename, "rb"))
+ else:
+ data = None
+ return data
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to write data obj
+def write_obj(filename, data):
+ if os.path.exists(filename):
+ os.remove(filename)
+ with open(filename, 'wb') as handle:
+ pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
+# -------------------------------------------------------------------------------------
diff --git a/apps/Analyzer_Execution/lib_utils_logging.py b/apps/Analyzer_Execution/lib_utils_logging.py
new file mode 100755
index 0000000..90bcbc3
--- /dev/null
+++ b/apps/Analyzer_Execution/lib_utils_logging.py
@@ -0,0 +1,125 @@
+"""
+Library Features:
+
+Name: lib_utils_logging
+Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
+Date: '20201202'
+Version: '1.0.0'
+"""
+
+#######################################################################################
+# Library
+import logging
+import os
+import logging.config
+import glob
+
+from lib_info_args import logger_name as logger_name_default
+from lib_info_args import logger_file as logger_file_default
+from lib_info_args import logger_handle as logger_handle_default
+from lib_info_args import logger_format as logger_formatter_default
+
+from lib_utils_system import make_folder
+
+# Debug
+# import matplotlib.pylab as plt
+#######################################################################################
+
+
+# -------------------------------------------------------------------------------------
+# Method to set logging file
+def set_logging_file(logger_file=logger_file_default, logger_name=logger_name_default,
+ logger_handle=logger_handle_default, logger_formatter=logger_formatter_default,
+ logger_history=False, logger_history_maxfiles=12,
+ logger_extra_tags=None):
+
+ # Set to flush progressbar output in logging stream handle
+ # progressbar.streams.wrap_stderr()
+
+ if logger_extra_tags is not None:
+ for extra_key, extra_value in logger_extra_tags.items():
+ logger_file = logger_file.replace(extra_key, ':')
+ string_count = logger_file.count(':')
+ extra_value = [extra_value] * string_count
+ logger_file = logger_file.format(*extra_value)
+
+ logger_folder_name, logger_file_name = os.path.split(logger_file)
+ make_folder(logger_folder_name)
+
+ # Save old logger file (to check run in the past)
+ if logger_history:
+ store_logging_file(logger_file, logger_file_max=logger_history_maxfiles)
+
+ # Remove old logging file
+ if os.path.exists(logger_file):
+ os.remove(logger_file)
+
+ # Open logger
+ logging.getLogger(logger_name)
+ logging.root.setLevel(logging.DEBUG)
+
+ # Set logger handle type
+ if logger_handle == 'file':
+
+ # Set logger handler obj
+ logger_handle_1 = logging.FileHandler(logger_file, 'w')
+ logger_handle_2 = logging.StreamHandler()
+ # Set logger level
+ logger_handle_1.setLevel(logging.DEBUG)
+ logger_handle_2.setLevel(logging.DEBUG)
+ # Set logger formatter
+ logger_handle_1.setFormatter(logging.Formatter(logger_formatter))
+ logger_handle_2.setFormatter(logging.Formatter(logger_formatter))
+ # Add handle to logger
+ logging.getLogger('').addHandler(logger_handle_1)
+ logging.getLogger('').addHandler(logger_handle_2)
+
+ elif logger_handle == 'stream':
+
+ # Set logger handler obj
+ logging.StreamHandler()
+ # Set logger level
+ logger_handle.setLevel(logging.DEBUG)
+ # Set logger formatter
+ logger_handle.setFormatter(logging.Formatter(logger_formatter))
+ # Add handle to logger
+ logging.getLogger('').addHandler(logger_handle)
+
+ else:
+
+ # Set logger handler obj
+ logging.NullHandler()
+ # Add handle to logger
+ logging.getLogger('').addHandler(logger_handle)
+
+# -------------------------------------------------------------------------------------
+
+
+# -------------------------------------------------------------------------------------
+# Method to store logging file (to save execution history)
+def store_logging_file(logger_file, logger_ext='.old.{}', logger_file_max=12):
+
+ # Get logger folder
+ logger_folder = os.path.split(logger_file)[0]
+
+ # Iterate to store old logging file
+ if os.path.exists(logger_file):
+
+ logger_file_loop = logger_file
+ logger_file_id = 0
+ while os.path.exists(logger_file_loop):
+ logger_file_id = logger_file_id + 1
+ logger_file_loop = logger_file + logger_ext.format(logger_file_id)
+
+ if logger_file_id > logger_file_max:
+ logger_file_obj = glob.glob(os.path.join(logger_folder, '*'))
+ for logger_file_step in logger_file_obj:
+ if logger_file_step.startswith(logger_file):
+ os.remove(logger_file_step)
+ logger_file_loop = logger_file
+ break
+
+ if logger_file_loop:
+ if logger_file != logger_file_loop:
+ os.rename(logger_file, logger_file_loop)
+# -------------------------------------------------------------------------------------
diff --git a/apps/RunAnalyzer/lib_utils_system.py b/apps/Analyzer_Execution/lib_utils_system.py
similarity index 87%
rename from apps/RunAnalyzer/lib_utils_system.py
rename to apps/Analyzer_Execution/lib_utils_system.py
index 74c2e2b..40baaf5 100755
--- a/apps/RunAnalyzer/lib_utils_system.py
+++ b/apps/Analyzer_Execution/lib_utils_system.py
@@ -36,12 +36,15 @@ def fill_tags2string(string_raw, tags_format=None, tags_filling=None):
tags_format_tmp = deepcopy(tags_format)
for tag_key, tag_value in tags_format.items():
tag_key_tmp = '{' + tag_key + '}'
- if tag_value is not None:
- if tag_key_tmp in string_raw:
- string_filled = string_raw.replace(tag_key_tmp, tag_value)
- string_raw = string_filled
- else:
- tags_format_tmp.pop(tag_key, None)
+ if tag_key in list(tags_filling.keys()):
+ tag_filled = tags_filling[tag_key]
+ if tag_value is not None:
+ if tag_key_tmp in string_raw:
+ if tag_filled is not None:
+ string_filled = string_raw.replace(tag_key_tmp, tag_value)
+ string_raw = string_filled
+ else:
+ tags_format_tmp.pop(tag_key, None)
dim_max = 1
for tags_filling_values_tmp in tags_filling.values():
diff --git a/apps/RunAnalyzer/lib_utils_time.py b/apps/Analyzer_Execution/lib_utils_time.py
similarity index 100%
rename from apps/RunAnalyzer/lib_utils_time.py
rename to apps/Analyzer_Execution/lib_utils_time.py
diff --git a/apps/RunAnalyzer/HAT_RunAnalyzer_HMC_Main.py b/apps/RunAnalyzer/HAT_RunAnalyzer_HMC_Main.py
deleted file mode 100755
index 5e43843..0000000
--- a/apps/RunAnalyzer/HAT_RunAnalyzer_HMC_Main.py
+++ /dev/null
@@ -1,796 +0,0 @@
-#!/usr/bin/python3
-"""
-Hydrological Analysis Tool - RunAnalyzer
-
-__date__ = '20200605'
-__version__ = '1.0.0'
-__author__ =
- 'Fabio Delogu (fabio.delogu@cimafoundation.org',
- 'Flavio Pignone (flavio.pignone@cimafoundation.org',
-
-__library__ = 'HAT'
-
-General command line:
-python3 HAT_RunAnalyzer_HMC_Main.py -settings_file configuration.json -time "YYYY-MM-DD HH:MM"
-
-Version(s):
-20200605 (1.0.0) --> Beta release
-"""
-
-# -------------------------------------------------------------------------------------
-# Complete library
-import logging
-import warnings
-import time
-import os
-
-import numpy as np
-import pandas as pd
-
-from copy import deepcopy
-from argparse import ArgumentParser
-
-from lib_utils_io import read_file_json, read_file_warning, read_file_discharge, write_file_status, read_obj, write_obj
-from lib_utils_system import make_folder, fill_tags2string
-from lib_utils_time import set_time
-# -------------------------------------------------------------------------------------
-
-# -------------------------------------------------------------------------------------
-# Algorithm information
-alg_version = '1.0.0'
-alg_release = '2020-06-05'
-alg_name = 'RUN ANALYZER'
-# Algorithm parameter(s)
-time_format = '%Y-%m-%d %H:%M'
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Script Main
-def main():
-
- # -------------------------------------------------------------------------------------
- # Get algorithm settings
- alg_settings, alg_time = get_args()
-
- # Set algorithm settings
- data_settings = read_file_json(alg_settings)
-
- # Set algorithm logging
- make_folder(data_settings['log']['folder_name'])
- set_logging(logger_file=os.path.join(data_settings['log']['folder_name'],
- data_settings['log']['file_name']))
- # -------------------------------------------------------------------------------------
-
- # -------------------------------------------------------------------------------------
- # Info algorithm
- logging.info(' ============================================================================ ')
- logging.info(' ==> ' + alg_name + ' (Version: ' + alg_version + ' Release_Date: ' + alg_release + ')')
- logging.info(' ==> START ... ')
- logging.info(' ')
-
- # Time algorithm information
- start_time = time.time()
- # -------------------------------------------------------------------------------------
-
- # -------------------------------------------------------------------------------------
- # Organize time information
- time_run, time_exec, time_range = set_time(
- time_run_args=alg_time, time_run_file=data_settings['time']['time_now'],
- time_format=time_format,
- time_period=data_settings['time']['time_period'],
- time_frequency=data_settings['time']['time_frequency'],
- time_rounding=data_settings['time']['time_rounding'])
- # -------------------------------------------------------------------------------------
-
- # -------------------------------------------------------------------------------------
- # Method to freeze run basin information
- info_collections = freeze_run_info(data_settings['run'])
-
- # Method to search available executions
- execution_collections = search_run_execution(
- time_run,
- algorithm_execution=data_settings['data']['source']['execution'],
- run_data=data_settings['run'],
- template_tags=data_settings['template_tags'])
-
- # Method to search available results
- results_header, results_collections = search_run_results(
- time_range,
- algorithm_results=data_settings['data']['source']['results'],
- run_data=data_settings['run'],
- template_tags=data_settings['template_tags'])
-
- # Method to get run warnings
- data_collections_warning = get_run_warning(results_collections, template_fields=data_settings['template_fields'])
-
- # Method to get run discharges
- data_header, data_collections_discharge = get_run_discharge(results_collections, data_collections_warning,
- template_fields=data_settings['template_fields'])
-
- # Method to analyze run execution
- analysis_collections_executions = analyze_run_execution(execution_collections,
- template_fields=data_settings['template_fields'])
-
- # Method to analyze run results
- analysis_header, analysis_collections_results = analyze_run_results(
- time_run, data_header, results_header, data_collections_discharge,
- flag_ancillary=data_settings['flags']['update_ancillary'],
- data_ancillary=data_settings['data']['ancillary'],
- template_tags=data_settings['template_tags']
- )
-
- # Method to dump run status
- dump_run_status(time_run, time_exec, time_format=time_format,
- algorithm_destination=data_settings['data']['destination'],
- default_header=info_collections,
- analysis_header=analysis_header,
- analysis_execution=analysis_collections_executions,
- analysis_results=analysis_collections_results,
- template_tags=data_settings['template_tags'])
- # -------------------------------------------------------------------------------------
-
- # -------------------------------------------------------------------------------------
- # Info algorithm
- time_elapsed = round(time.time() - start_time, 1)
-
- logging.info(' ')
- logging.info(' ==> ' + alg_name + ' (Version: ' + alg_version + ' Release_Date: ' + alg_release + ')')
- logging.info(' ==> TIME ELAPSED: ' + str(time_elapsed) + ' seconds')
- logging.info(' ==> ... END')
- logging.info(' ==> Bye, Bye')
- logging.info(' ============================================================================ ')
-
- # -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to dump run status
-def dump_run_status(time_run, time_exec, time_format='%Y-%m-%d %H:%M',
- algorithm_destination=None,
- default_header=None,
- analysis_header=None, analysis_execution=None, analysis_results=None,
- template_tags=None,
- folder_name_tag='folder_name', file_name_tag='file_name'):
-
- datetime_run = time_run.to_pydatetime()
- # datetime_exec = time_exec.to_pydatetime()
-
- time_ref = time_run.replace(hour=0, minute=0)
- time_range_ref = pd.date_range(start=time_ref, periods=2, freq='D')
-
- folder_name_raw = algorithm_destination[folder_name_tag]
- file_name_raw = algorithm_destination[file_name_tag]
-
- template_values = {'run_datetime': datetime_run, 'run_sub_path_time': datetime_run,
- 'analysis_datetime': datetime_run, 'analysis_sub_path_time': datetime_run}
-
- folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values)
- file_name_def = fill_tags2string(file_name_raw, template_tags, template_values)
-
- make_folder(folder_name_def)
- file_path_def = os.path.join(folder_name_def, file_name_def)
-
- logging.info(' ---> Organize html datasets ... ')
-
- collections_ancillary = {}
- collections_alarm = {}
- collections_alert = {}
- for run_key, run_fields in analysis_header.items():
-
- # DEBUG START
- # run_key = 'realtime_rf-ecmwf-0100'
- # run_fields = analysis_header[run_key]
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- if run_fields is not None:
-
- collections_ancillary[run_key] = {}
- collections_alarm[run_key] = {}
- collections_alert[run_key] = {}
-
- run_analysis_execution = analysis_execution[run_key]
- run_analysis_results = analysis_results[run_key]
-
- collections_ancillary[run_key]['description'] = run_fields['description']
- collections_ancillary[run_key]['scenario_n'] = run_fields['scenario_n']
- collections_ancillary[run_key]['section_n'] = run_fields['section_n']
- collections_ancillary[run_key]['time_modified_first'] = run_analysis_execution['time_created_first']
- collections_ancillary[run_key]['time_modified_last'] = run_fields['time_file_modified']
- collections_ancillary[run_key]['time_range'] = time_range_ref
-
- for point_key, point_fields in run_analysis_results.items():
-
- point_tag = point_fields['name']
- point_fields_alarm = point_fields['discharge_alarm']
- point_fields_alert = point_fields['discharge_alert']
-
- point_field_alert_select = point_fields_alert[point_fields_alert.index >= time_ref]
- point_field_alarm_select = point_fields_alarm[point_fields_alarm.index >= time_ref]
-
- logging.info(' -----> Point ' + point_tag + ' ... ')
-
- for time_id, time_range_step in enumerate(time_range_ref):
-
- time_alert_control = [time_check for time_check in point_field_alert_select.index if
- time_check.replace(hour=0, minute=0) == time_range_step]
- time_alarm_control = [time_check for time_check in point_field_alarm_select.index if
- time_check.replace(hour=0, minute=0) == time_range_step]
-
- if time_alert_control:
-
- if point_field_alert_select.values[time_id] != -9999:
- point_field_alert_idx = point_field_alert_select.index[time_id]
- point_field_alert_value = point_field_alert_select.values[time_id]
-
- if time_range_step not in list(collections_alert[run_key].keys()):
- collections_alert[run_key][time_range_step] = {}
- collections_alert[run_key][time_range_step]['name'] = [point_tag]
- collections_alert[run_key][time_range_step]['idx'] = [point_field_alert_idx]
- collections_alert[run_key][time_range_step]['value'] = [point_field_alert_value]
- else:
- name_tmp = collections_alert[run_key][time_range_step]['point']
- idx_tmp = collections_alert[run_key][time_range_step]['idx']
- value_tmp = collections_alert[run_key][time_range_step]['value']
-
- name_tmp.append(point_tag)
- idx_tmp.append(point_field_alert_idx)
- value_tmp.append(point_field_alert_value)
-
- collections_alert[run_key][time_range_step]['name'] = name_tmp
- collections_alert[run_key][time_range_step]['idx'] = idx_tmp
- collections_alert[run_key][time_range_step]['value'] = value_tmp
-
- if time_alarm_control:
-
- if point_field_alarm_select.values[time_id] != -9999:
- point_field_alarm_idx = point_field_alarm_select.index[time_id]
- point_field_alarm_value = point_field_alarm_select.values[time_id]
-
- if time_range_step not in list(collections_alarm[run_key].keys()):
- collections_alarm[run_key][time_range_step] = {}
- collections_alarm[run_key][time_range_step]['name'] = [point_tag]
- collections_alarm[run_key][time_range_step]['idx'] = [point_field_alarm_idx]
- collections_alarm[run_key][time_range_step]['value'] = [point_field_alarm_value]
- else:
- name_tmp = collections_alarm[run_key][time_range_step]['point']
- idx_tmp = collections_alarm[run_key][time_range_step]['idx']
- value_tmp = collections_alarm[run_key][time_range_step]['value']
-
- name_tmp.append(point_tag)
- idx_tmp.append(point_field_alarm_idx)
- value_tmp.append(point_field_alarm_value)
-
- collections_alarm[run_key][time_range_step]['name'] = name_tmp
- collections_alarm[run_key][time_range_step]['idx'] = idx_tmp
- collections_alarm[run_key][time_range_step]['value'] = value_tmp
-
- logging.info(' -----> Point ' + point_tag + ' ... DONE')
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- else:
- collections_ancillary[run_key] = None
- collections_alarm[run_key] = None
- collections_alert[run_key] = None
-
- logging.info(' ----> RunType ' + run_key + ' ... SKIPPED. Datasets are undefined.')
-
- logging.info(' ---> Organize html datasets ... DONE')
-
- logging.info(' ---> Write html datasets ... ')
- write_file_status(time_run, time_exec, time_format='%Y-%m-%d %H:%M', html_name=file_path_def,
- run_default=default_header,
- run_execution=analysis_execution,
- run_ancillary=collections_ancillary,
- run_alert=collections_alert, run_alarm=collections_alarm)
- logging.info(' ---> Write html datasets ... DONE')
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to analyze run execution(s)
-def analyze_run_execution(execution_collections, template_fields=None):
-
- logging.info(' ---> Analyze run executions ... ')
-
- html_collections = {}
- for run_key, run_file_list in execution_collections.items():
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- if run_file_list:
-
- time_created = []
- time_modified = []
- for file_name_raw in run_file_list:
-
- template_values = {'run_name': run_key}
-
- file_name_def = fill_tags2string(file_name_raw, template_fields, template_values)
-
- file_modified = pd.Timestamp(time.ctime(os.path.getmtime(file_name_def)))
- file_created = pd.Timestamp(time.ctime(os.path.getctime(file_name_def)))
-
- time_created.append(file_created)
- time_modified.append(file_modified)
-
- scenario_n = run_file_list.__len__()
-
- datetime_created = pd.DatetimeIndex(time_created)
- datetime_modified = pd.DatetimeIndex(time_modified)
-
- datetime_created_first = datetime_created.min()
- datetime_created_last = datetime_created.max()
-
- datetime_modified_first = datetime_modified.min()
- datetime_modified_last = datetime_modified.max()
-
- html_collections[run_key] = {}
- html_collections[run_key]['scenario_n'] = scenario_n
- html_collections[run_key]['time_created_first'] = datetime_created_first
- html_collections[run_key]['time_created_last'] = datetime_created_last
- html_collections[run_key]['time_modified_first'] = datetime_modified_first
- html_collections[run_key]['time_modified_last'] = datetime_modified_last
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- else:
-
- html_collections[run_key] = None
- logging.info(' ----> RunType ' + run_key + ' ... SKIPPED. Run is not available.')
-
- logging.info(' ---> Analyze run executions ... DONE')
-
- return html_collections
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to analyze run results
-def analyze_run_results(time_run, data_header, results_header, data_collections,
- tag_columns='data_max', data_ancillary=None, flag_ancillary=False,
- template_tags=None):
-
- logging.info(' ---> Analyze run results ... ')
-
- # Prepare ancillary data
- folder_name_raw = data_ancillary['folder_name']
- file_name_raw = data_ancillary['file_name']
-
- template_values = {'run_datetime': time_run, 'run_sub_path_time': time_run}
-
- folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values)
- file_name_def = fill_tags2string(file_name_raw, template_tags, template_values)
-
- file_path_def = os.path.join(folder_name_def, file_name_def)
-
- if flag_ancillary:
- if os.path.exists(file_path_def):
- os.remove(file_path_def)
-
- if not os.path.exists(file_path_def) or flag_ancillary:
- html_header = {}
- html_collections = {}
- for run_key, run_data in data_collections.items():
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- # DEBUG START
- # run_key = 'realtime_rf-ecmwf-0100'
- # run_data = data_collections[run_key]
-
- if run_data is not None:
-
- html_header[run_key] = {**data_header[run_key], **results_header[run_key]}
-
- html_collections[run_key] = {}
- for point_key, point_data in run_data.items():
-
- point_basin = point_data['Basin']
- point_section = point_data['Section']
- point_alarm_thr = point_data['QAlarm']
- point_alert_thr = point_data['QAlert']
- point_data_raw = point_data['discharge_modelled']
-
- point_tag = '@'.join([point_basin, point_section])
-
- logging.info(' -----> Point ' + point_tag + ' ... ')
-
- point_time_n = point_data_raw.shape[0]
- point_step_n = point_data_raw.shape[1]
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- point_data_max = np.nanmax(point_data_raw, axis=1)
-
- point_time_hourly_idx = pd.date_range(start=data_header[run_key]['time_start'],
- periods=point_time_n, freq=data_header[run_key]['time_frequency'])
-
- point_time_hourly_start = point_time_hourly_idx[0]
- point_time_hourly_end = point_time_hourly_idx[-1]
-
- point_df_hourly = pd.DataFrame(index=point_time_hourly_idx, data=point_data_max, columns=[tag_columns])
-
- point_df_daily = point_df_hourly.loc[point_df_hourly.groupby(pd.Grouper(freq='D')).idxmax().iloc[:, 0]]
- point_time_daily_maximum = point_df_daily.index
- point_time_daily_default = point_df_daily.index.floor('D')
-
- point_df_alarm_select = point_df_daily[point_df_daily[tag_columns] >= point_alarm_thr]
- point_df_alert_select = point_df_daily[point_df_daily[tag_columns] >= point_alert_thr]
-
- point_data_daily_default = np.zeros(shape=point_time_daily_default.size)
- point_data_daily_default[:] = -9999.0
- point_df_daily_default = pd.DataFrame(index=point_time_daily_default, data=point_data_daily_default,
- columns=[tag_columns])
-
- point_df_alarm_filled = point_df_daily_default.combine_first(point_df_alarm_select)
- point_df_alarm_unique = point_df_alarm_filled.loc[point_df_alarm_filled.groupby(
- pd.Grouper(freq='D')).idxmax().iloc[:, 0]]
-
- point_df_alert_filled = point_df_daily_default.combine_first(point_df_alert_select)
- point_df_alert_unique = point_df_alert_filled.loc[point_df_alert_filled.groupby(
- pd.Grouper(freq='D')).idxmax().iloc[:, 0]]
-
- html_collections[run_key][point_key] = {}
- html_collections[run_key][point_key]['name'] = point_tag
- html_collections[run_key][point_key]['time_start'] = point_time_hourly_start
- html_collections[run_key][point_key]['time_end'] = point_time_hourly_end
- html_collections[run_key][point_key]['discharge_alarm'] = point_df_alarm_unique
- html_collections[run_key][point_key]['discharge_alert'] = point_df_alert_unique
-
- logging.info(' -----> Point ' + point_tag + ' ... DONE')
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- else:
- html_header[run_key] = None
- html_collections[run_key] = None
-
- logging.info(' ----> RunType ' + run_key + ' ... SKIPPED. Datasets are undefined.')
-
- make_folder(folder_name_def)
- html_obj = {'header': html_header, 'collections': html_collections}
- write_obj(file_path_def, html_obj)
-
- logging.info(' ---> Analyze run results ... DONE')
-
- else:
-
- html_obj = read_obj(file_path_def)
- html_header = html_obj['header']
- html_collections = html_obj['collections']
-
- logging.info(' ---> Analyze run results ... SKIPPED. PREVIOUSLY SAVED')
-
- return html_header, html_collections
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to get warning data
-def get_run_warning(run_collections, tag_field='warning', template_fields=None):
-
- logging.info(' ---> Get warning table ... ')
-
- warning_collections = {}
- for run_key, run_data in run_collections.items():
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- warning_collections[run_key] = {}
- if run_data:
-
- run_fields = run_data[tag_field]
- file_step = run_fields['file_list'][0]
-
- if os.path.exists(file_step):
- data_header, data_collections = read_file_warning(file_step, file_template=template_fields)
- data_header['section_n'] = list(data_collections.keys()).__len__()
- else:
- data_header = None
- data_collections = None
-
- warning_collections[run_key]['header'] = data_header
- warning_collections[run_key]['data'] = data_collections
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- else:
- warning_collections[run_key]['header'] = None
- warning_collections[run_key]['data'] = None
-
- logging.info(' ----> RunType ' + run_key + ' ... SKIPPED. Warning table is not available')
-
- logging.info(' ---> Get warning table ... DONE')
-
- return warning_collections
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to get discharge data
-def get_run_discharge(run_collections, warning_collections, tag_field='discharge', template_fields=None):
-
- logging.info(' ---> Get discharge time-series ... ')
-
- data_header = {}
- data_collections = {}
- for run_key, run_data in run_collections.items():
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- if run_data:
-
- run_fields = run_data[tag_field]
- warning_section = warning_collections[run_key]
-
- warning_header = warning_section['header']
- warning_data = warning_section['data']
-
- data_workspace = deepcopy(warning_data)
-
- data_header[run_key] = {}
- data_collections[run_key] = {}
- for (data_key, data_field), data_ws in zip(warning_data.items(), data_workspace.values()):
-
- field_basin = data_field['Basin']
- field_section = data_field['Section']
-
- file_select = [elem for elem in run_fields['file_list'] if (field_basin in elem) and (field_section in elem)][0]
-
- if os.path.exists(file_select):
- discharge_header, discharge_data_modelled, discharge_data_observed = read_file_discharge(
- file_select, file_template=template_fields)
- else:
- discharge_header = None
- discharge_data_modelled = None
- discharge_data_observed = None
-
- data_header[run_key] = {**warning_header, **discharge_header}
- data_ws['discharge_observed'] = discharge_data_observed
- data_ws['discharge_modelled'] = discharge_data_modelled
-
- data_collections[run_key][data_key] = data_ws
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- else:
- data_header[run_key] = None
- data_collections[run_key] = None
-
- logging.info(' ----> RunType ' + run_key + ' ... SKIPPED. Discharge time-series are not available')
-
- logging.info(' ---> Get discharge time-series ... DONE')
-
- return data_header, data_collections
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to freeze run information
-def freeze_run_info(run_data):
- logging.info(' ---> Freeze run information ... ')
- info_collections = {}
- for run_id, run_step in run_data.items():
- run_key = run_step['ancillary']['name']
- info_collections[run_key] = {}
- for run_field, run_value in run_step['ancillary'].items():
- info_collections[run_key][run_field] = run_value
- logging.info(' ---> Freeze run information ... DONE')
-
- return info_collections
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to search available run
-def search_run_execution(time_run, algorithm_execution, run_data=None, template_tags=None,
- folder_name_tag='folder_name', file_name_tag='file_name'):
-
- logging.info(' ---> Search run executions ... ')
-
- execution_collections = {}
-
- datetime_run = time_run.to_pydatetime()
- for run_id, run_step in run_data.items():
-
- run_key = run_step['ancillary']['name']
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- folder_name_raw = algorithm_execution[folder_name_tag]
- file_name_raw = algorithm_execution[file_name_tag]
-
- template_values = {'run_name': run_key,
- 'run_datetime': datetime_run, 'run_sub_path_time': datetime_run}
-
- folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values)
- file_name_def = fill_tags2string(file_name_raw, template_tags, template_values)
-
- file_list = []
- for root, dirs, files in os.walk(folder_name_def, topdown=False):
- for name in files:
- if name == file_name_def:
- file_list.append(os.path.join(root, name))
-
- execution_collections[run_key] = {}
- execution_collections[run_key] = file_list
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- logging.info(' ---> Search run executions ... DONE')
-
- return execution_collections
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to search available run results
-def search_run_results(time_range_max, algorithm_results=None, run_data=None, template_tags=None,
- folder_name_tag='folder_name', file_name_tag='file_name'):
-
- logging.info(' ---> Search run results ... ')
-
- results_header = {}
- results_collections = {}
- for run_id, run_step in run_data.items():
-
- run_key = run_step['ancillary']['name']
-
- time_period = run_step['time']['time_period']
- time_frequency = run_step['time']['time_frequency']
- time_rounding = run_step['time']['time_rounding']
-
- time_end = time_range_max[0]
- if time_period == 1:
- time_start = time_end.floor(time_rounding)
- else:
- time_start = pd.date_range(end=time_end.floor(time_rounding),
- periods=time_period, freq=time_rounding)[0]
-
- if time_end == time_start:
- time_range_adapt = pd.date_range(end=time_end, periods=time_period, freq=time_frequency)
- else:
- time_range_adapt = pd.date_range(start=time_start, end=time_end, freq=time_frequency)
- time_range_adapt = time_range_adapt[::-1]
-
- logging.info(' ----> RunType ' + run_key + ' ... ')
-
- run_locs = deepcopy(algorithm_results)
-
- results_collections[run_key] = {}
- results_header[run_key] = {}
- path_name_time = []
- path_name_list = []
- for time_step in time_range_adapt:
-
- datetime_step = time_step.to_pydatetime()
-
- for loc_key, loc_args in run_locs.items():
-
- template_values = {'run_name': run_key,
- 'run_datetime': datetime_step, 'run_sub_path_time': datetime_step}
-
- folder_name_raw = loc_args[folder_name_tag]
- file_name_raw = loc_args[file_name_tag]
-
- folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values)
- file_name_def = fill_tags2string(file_name_raw, template_tags, template_values)
-
- if os.path.exists(folder_name_def):
-
- if loc_key not in list(results_collections[run_key].keys()):
- results_collections[run_key][loc_key] = {}
- path_name_time = []
- path_name_list = []
-
- if not path_name_list:
-
- if '*' in file_name_def:
- file_name_list = os.listdir(folder_name_def)
-
- if file_name_list.__len__() > 0:
-
- for file_name_step in file_name_list:
- path_name_step = os.path.join(folder_name_def, file_name_step)
- path_name_list.append(path_name_step)
- path_name_time.append(time_step)
-
- else:
- path_name_def = os.path.join(folder_name_def, file_name_def)
- if os.path.exists(path_name_def):
- path_name_list.append(path_name_def)
- path_name_time.append(time_step)
-
- if path_name_list.__len__() > 0:
- results_collections[run_key][loc_key]['time'] = path_name_time
- results_collections[run_key][loc_key]['file_list'] = path_name_list
-
- for field_key, field_value in run_step['ancillary'].items():
- results_header[run_key][field_key] = field_value
- for field_key, field_value in run_step['time'].items():
- results_header[run_key][field_key] = field_value
-
- logging.info(' ----> RunType ' + run_key + ' ... DONE')
-
- logging.info(' ---> Search run results ... DONE')
-
- return results_header, results_collections
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to get script argument(s)
-def get_args():
- parser_handle = ArgumentParser()
- parser_handle.add_argument('-settings_file', action="store", dest="alg_settings")
- parser_handle.add_argument('-time', action="store", dest="alg_time")
- parser_values = parser_handle.parse_args()
-
- if parser_values.alg_settings:
- alg_settings = parser_values.alg_settings
- else:
- alg_settings = 'configuration.json'
-
- if parser_values.alg_time:
- alg_time = parser_values.alg_time
- else:
- alg_time = None
-
- return alg_settings, alg_time
-
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to set logging information
-def set_logging(logger_file='log.txt', logger_format=None):
- if logger_format is None:
- logger_format = '%(asctime)s %(name)-12s %(levelname)-8s ' \
- '%(filename)s:[%(lineno)-6s - %(funcName)20s()] %(message)s'
-
- # Remove old logging file
- if os.path.exists(logger_file):
- os.remove(logger_file)
-
- # Set level of root debugger
- logging.root.setLevel(logging.DEBUG)
-
- # Open logging basic configuration
- logging.basicConfig(level=logging.DEBUG, format=logger_format, filename=logger_file, filemode='w')
-
- # Set logger handle
- logger_handle_1 = logging.FileHandler(logger_file, 'w')
- logger_handle_2 = logging.StreamHandler()
- # Set logger level
- logger_handle_1.setLevel(logging.DEBUG)
- logger_handle_2.setLevel(logging.DEBUG)
- # Set logger formatter
- logger_formatter = logging.Formatter(logger_format)
- logger_handle_1.setFormatter(logger_formatter)
- logger_handle_2.setFormatter(logger_formatter)
-
- # Add handle to logging
- logging.getLogger('').addHandler(logger_handle_1)
- logging.getLogger('').addHandler(logger_handle_2)
-
-
-# -------------------------------------------------------------------------------------
-
-
-# ----------------------------------------------------------------------------
-# Call script from external library
-if __name__ == '__main__':
- main()
-# ----------------------------------------------------------------------------
diff --git a/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_local.json b/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_local.json
deleted file mode 100755
index bd2fe65..0000000
--- a/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_local.json
+++ /dev/null
@@ -1,115 +0,0 @@
-{
- "flags": {
- "update_ancillary": false,
- "clean_ancillary": false
- },
- "template_tags": {
- "run_name": "string_name",
- "run_datetime": "%Y%m%d%H%M",
- "run_sub_path_time": "%Y/%m/%d/%H",
- "analysis_datetime": "%Y%m%d%H%M",
- "analysis_sub_path_time": "%Y/%m/%d/"
- },
- "template_fields": {
- "DateMeteoModel": {"name" : "time_run", "type" : "time_stamp", "value": null},
- "sTimeNow": {"name" : "time_run", "type" : "time_stamp", "value": null},
- "TimeFrom": {"name" : "time_start", "type" : "time_stamp", "value": null},
- "TimeTo": {"name" : "time_end", "type" : "time_stamp", "value": null},
- "DateStart": {"name" : "time_start", "type" : "time_stamp", "value": null},
- "Temp.Resolution": {"name" : "time_frequency", "type" : "time_frequency", "value": "H"},
- "SscenariosNumber": {"name" : "scenario_n", "type" : "int", "value": null}
- },
- "time": {
- "time_now": null,
- "time_period": 36,
- "time_frequency": "H",
- "time_rounding": "H"
- },
- "data": {
- "source" : {
- "execution": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/run/{run_name}/exec/",
- "file_name": "HMC_Model_V2_{run_name}.x"
- },
- "results": {
- "discharge": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/{run_name}/{run_sub_path_time}/timeseries/section_q",
- "file_name": "hydrograph_{*}_{*}_{run_datetime}.txt"
- },
- "warning": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/{run_name}/{run_sub_path_time}/warnings/",
- "file_name": "warnings_sections_{run_datetime}.txt"
- }
- }
- },
- "ancillary": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/ancillary/",
- "file_name": "run_analyzer_{run_datetime}.workspace"
- },
- "destination": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/archive/run_analyzer/html/{analysis_sub_path_time}",
- "file_name": "summary_simulations_marche.html"
- }
- },
- "run": {
- "id_01": {
- "ancillary": {
- "name": "realtime_ws-db",
- "description": "RUN OSSERVATO"
- },
- "time": {
- "time_period": 20,
- "time_frequency": "H",
- "time_rounding": "H"
- }
- },
- "id_02": {
- "ancillary": {
- "name": "realtime_nwp-ecmwf-0100",
- "description": "ECMWF DETERMINISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_03": {
- "ancillary": {
- "name": "realtime_rf-ecmwf-0100",
- "description": "ECMWF PROBABILISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_04": {
- "ancillary": {
- "name": "realtime_nwp-lami-2i",
- "description": "LAMI DETERMINISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_05": {
- "ancillary": {
- "name": "realtime_rf-lami-2i",
- "description": "LAMI PROBABILISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- }
- },
- "log": {
- "folder_name": "/home/fabio/Desktop/PyCharm_Workspace/hat-ws/log/",
- "file_name": "hat_runanalyzer_hmc_log.txt"
- }
-}
diff --git a/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_realtime.json b/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_realtime.json
deleted file mode 100755
index a5ccd25..0000000
--- a/apps/RunAnalyzer/hat_runanalyzer_hmc_configuration_realtime.json
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "flags": {
- "update_ancillary": false,
- "clean_ancillary": false
- },
- "template_tags": {
- "run_name": "string_name",
- "run_datetime": "%Y%m%d%H%M",
- "run_sub_path_time": "%Y/%m/%d/%H",
- "analysis_datetime": "%Y%m%d%H%M",
- "analysis_sub_path_time": "%Y/%m/%d/"
- },
- "template_fields": {
- "DateMeteoModel": {"name" : "time_run", "type" : "time_stamp", "value": null},
- "sTimeNow": {"name" : "time_run", "type" : "time_stamp", "value": null},
- "TimeFrom": {"name" : "time_start", "type" : "time_stamp", "value": null},
- "TimeTo": {"name" : "time_end", "type" : "time_stamp", "value": null},
- "DateStart": {"name" : "time_start", "type" : "time_stamp", "value": null},
- "Temp.Resolution": {"name" : "time_frequency", "type" : "time_frequency", "value": "H"},
- "SscenariosNumber": {"name" : "scenario_n", "type" : "int", "value": null}
- },
- "time": {
- "time_now": null,
- "time_period": 36,
- "time_frequency": "H",
- "time_rounding": "H"
- },
- "data": {
- "source" : {
- "execution": {
- "folder_name": "/hydro/run/{run_name}/exec/",
- "file_name": "hmc.log"
- },
- "results": {
- "discharge": {
- "folder_name": "/hydro/archive/{run_name}/{run_sub_path_time}/timeseries/section_q",
- "file_name": "hydrograph_{*}_{*}_{run_datetime}.txt"
- },
- "warning": {
- "folder_name": "/hydro/archive/{run_name}/{run_sub_path_time}/warnings/",
- "file_name": "warnings_sections_{run_datetime}.txt"
- }
- }
- },
- "ancillary": {
- "folder_name": "/hydro/tmp/hat-checker/",
- "file_name": "run_analyzer_{run_datetime}.workspace"
- },
- "destination": {
- "folder_name": "/hydro/summary/{analysis_sub_path_time}",
- "file_name": "summary_simulations_marche.html"
- }
- },
- "run": {
- "id_01": {
- "ancillary": {
- "name": "realtime_ws-db",
- "description": "OSSERVATO SENSORI A TERRA"
- },
- "time": {
- "time_period": 3,
- "time_frequency": "H",
- "time_rounding": "H"
- }
- },
- "id_02": {
- "ancillary": {
- "name": "realtime_nwp-ecmwf-0100",
- "description": "ECMWF DETERMINISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_03": {
- "ancillary": {
- "name": "realtime_rf-ecmwf-0100",
- "description": "ECMWF PROBABILISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_04": {
- "ancillary": {
- "name": "realtime_nwp-lami-2i",
- "description": "LAMI DETERMINISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_05": {
- "ancillary": {
- "name": "realtime_rf-lami-2i",
- "description": "LAMI PROBABILISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "D"
- }
- },
- "id_06": {
- "ancillary": {
- "name": "realtime_ef-lami-2i",
- "description": "EXPERT FORECAST PROBABILISTICO"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "H"
- }
- },
- "id_07": {
- "ancillary": {
- "name": "realtime_radar-mcm",
- "description": "OSSERVATO RADAR MCM"
- },
- "time": {
- "time_period": 1,
- "time_frequency": "H",
- "time_rounding": "H"
- }
- }
- },
- "log": {
- "folder_name": "/hydro/log/",
- "file_name": "hat_runanalyzer_hmc_log_realtime.txt"
- }
-}
diff --git a/apps/RunAnalyzer/lib_utils_io.py b/apps/RunAnalyzer/lib_utils_io.py
deleted file mode 100755
index 1da23a1..0000000
--- a/apps/RunAnalyzer/lib_utils_io.py
+++ /dev/null
@@ -1,542 +0,0 @@
-# -------------------------------------------------------------------------------------
-# Libraries
-import logging
-import tempfile
-import os
-import time
-import json
-import pickle
-import rasterio
-import numpy as np
-import xarray as xr
-import pandas as pd
-
-from distutils.util import strtobool
-
-from rasterio.transform import Affine
-from osgeo import gdal, gdalconst
-
-logging.getLogger('rasterio').setLevel(logging.WARNING)
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to read discharge file
-def read_file_discharge(file_name, file_template=None, file_skiprows=4):
-
- file_tmp = pd.read_table(file_name)
- file_data = list(file_tmp.values)
-
- collections_id = 0
- obj_mod = None
- obj_observed = None
- obj_header = {}
- for row_id, row_step in enumerate(file_data):
- row_tmp = row_step[0]
- if row_id < file_skiprows:
- field_key, field_data = row_tmp.rstrip().split('=')
-
- if field_key in list(file_template.keys()):
- field_name = file_template[field_key]['name']
- field_type = file_template[field_key]['type']
- field_value = file_template[field_key]['value']
- else:
- field_name = field_key
- field_type = None
- field_value = None
-
- if field_type is not None:
- if field_type == 'time_stamp':
- field_data = pd.Timestamp(field_data)
- elif field_type == 'time_frequency':
- field_data = pd.Timedelta(field_data)
- elif field_type == 'int':
- field_data = np.int(field_data)
-
- if field_value is not None:
- field_data = field_value
-
- obj_header[field_name] = field_data
- elif row_id == file_skiprows:
- columns_values = row_tmp.rstrip().split(' ')
- array_observed = np.array(columns_values, dtype=np.float)
- array_observed[array_observed < 0.0] = np.nan
- time_n = array_observed.shape[0]
-
- if obj_observed is None:
- obj_observed = np.zeros(shape=(time_n, 1))
- obj_observed[:, :] = np.nan
- obj_observed[:, 0] = array_observed
-
- elif row_id >= file_skiprows:
-
- columns_values = row_tmp.rstrip().split(' ')
- array_values = np.array(columns_values, dtype=np.float)
- array_values[array_values < 0.0] = np.nan
- time_n = array_values.shape[0]
-
- if obj_mod is None:
- obj_mod = np.zeros(shape=(time_n , obj_header['scenario_n']))
- obj_mod[:, :] = np.nan
- obj_mod[:, collections_id] = array_values
-
- collections_id += 1
-
- return obj_header, obj_mod, obj_observed
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to read warning file
-def read_file_warning(file_name, file_template=None, file_skiprows=4, tag_data='section_{:}'):
-
- file_tmp = pd.read_table(file_name)
- file_data = list(file_tmp.values)
-
- file_time_modified = pd.Timestamp(time.ctime(os.path.getmtime(file_name)))
- file_time_created = pd.Timestamp(time.ctime(os.path.getctime(file_name)))
-
- collections_id = 0
- file_header = {}
- file_collections = {}
- for row_id, row_step in enumerate(file_data):
- row_tmp = row_step[0]
- if row_id < file_skiprows:
- field_key, field_data = row_tmp.rstrip().split('=')
-
- if field_key in list(file_template.keys()):
- field_name = file_template[field_key]['name']
- field_type = file_template[field_key]['type']
- field_value = file_template[field_key]['value']
- else:
- field_name = field_key
- field_type = None
- field_value = None
-
- if field_type is not None:
- if field_type == 'time_stamp':
- field_data = pd.Timestamp(field_data)
- elif field_type == 'time_frequency':
- field_data = pd.Timedelta(field_data)
- elif field_type == 'int':
- field_data = np.int(field_data)
-
- if field_value is not None:
- field_data = field_value
-
- file_header[field_name] = field_data
- elif row_id == file_skiprows:
- columns_name = row_tmp.rstrip().split(' ')
- columns_type = [str, str, float, float, bool, float, bool]
- elif row_id > file_skiprows:
- columns_values = row_tmp.rstrip().split(' ')
- file_collections[tag_data.format(collections_id)] = {}
-
- for column_name, column_value, column_type in zip(columns_name, columns_values, columns_type):
- file_collections[tag_data.format(collections_id)][column_name] = {}
-
- if column_type is float:
- column_value = np.float(column_value)
- elif column_type is bool:
- column_value = strtobool(column_value.lower())
- elif column_type is str:
- column_value = column_value.strip()
- file_collections[tag_data.format(collections_id)][column_name] = column_value
-
- collections_id += 1
-
- file_header['time_file_created'] = file_time_created
- file_header['time_file_modified'] = file_time_modified
-
- return file_header, file_collections
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to write an html status file
-def write_file_status(time_run, time_exec, time_format='%Y-%m-%d %H:%M',
- html_name='run_analyzer.html',
- run_default=None, run_execution=None, run_ancillary=None,
- run_alert=None, run_alarm=None, time_mode='LOCAL'):
-
- with open(html_name, "w") as html_handle:
-
- html_handle.write('\n')
-
- html_handle.write('\n')
-
- html_handle.write(' Execution Time: | ' +
- time_exec.strftime(time_format) + ' ' + time_mode + ' |
\n')
-
- html_handle.write(' Reference Time: | ' +
- time_run.strftime(time_format) + ' ' + time_mode + ' |
\n')
-
- html_handle.write('\n')
- html_handle.write('\n')
- html_handle.write('Run Configuration Regione Marche | \n')
- html_handle.write('State | \n')
- html_handle.write('TimeStart | \n')
- html_handle.write('TimeEnd | \n')
- html_handle.write('Scenarios | \n')
- html_handle.write('Sections | \n')
- html_handle.write(' | \n')
- html_handle.write(' OGGI gialla | \n')
- html_handle.write(' OGGI gialla | \n')
- html_handle.write(' DOMANI gialla | \n')
- html_handle.write(' DOMANI gialla | \n')
- html_handle.write(' | \n')
- html_handle.write(' OGGI rossa | \n')
- html_handle.write(' OGGI rossa | \n')
- html_handle.write(' DOMANI rossa | \n')
- html_handle.write(' DOMANI rossa | \n')
- html_handle.write('\n')
- html_handle.write('\n')
-
- for (run_key, run_execution_step), run_ancillary_step, run_alert_step, run_alarm_step in zip(
- run_execution.items(), run_ancillary.values(), run_alert.values(), run_alarm.values()):
-
- html_handle.write('')
-
- run_description = run_default[run_key]['description']
-
- if run_ancillary_step is not None:
- run_time_mod_first = run_ancillary_step['time_modified_first'].strftime(time_format)
- run_time_mod_last = run_ancillary_step['time_modified_last'].strftime(time_format)
- run_section_n_found = str(run_ancillary_step['section_n'])
- run_scenario_n = str(run_ancillary_step['scenario_n'])
- # run_time_range = str(run_ancillary_step['time_range'])
-
- if run_alert_step or run_alarm_step:
- html_handle.write('' + run_description + ' | ')
- html_handle.write(' COMPLETED | ')
- html_handle.write('' + str(run_time_mod_first) + ' ' + time_mode + ' | ')
- html_handle.write('' + str(run_time_mod_last) + ' ' + time_mode + ' | ')
- html_handle.write('' + run_scenario_n + ' | ')
- html_handle.write('' + run_section_n_found + ' | ')
- html_handle.write(' | ')
-
- if run_alert_step:
-
- for id, (run_alert_key, run_alert_value) in enumerate(run_alert_step.items()):
-
- section_list_name = run_alert_value['name']
- section_list_idx = run_alert_value['idx']
- section_list_value = run_alert_value['value']
- section_n_alert = str(section_list_name.__len__())
-
- section_name = ','.join(section_list_name)
-
- # alert today
- if id == 0:
- if section_list_name:
- html_handle.write('' + section_n_alert + '/' + run_section_n_found)
- html_handle.write(' | ' + section_name + ' | ')
- else:
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- # alert tomorrow
- if id == 1:
- if section_list_name:
- html_handle.write('' + section_n_alert + '/' + run_section_n_found)
- html_handle.write(' | ' + section_name + ' | ')
- else:
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write(' | ')
-
- if run_alarm_step:
-
- for id, (run_alarm_key, run_alarm_value) in enumerate(run_alarm_step.items()):
-
- section_list_name = run_alarm_value['name']
- section_list_idx = run_alarm_value['idx']
- section_list_value = run_alarm_value['value']
- section_n_alarm = str(section_list_name.__len__())
-
- section_name = ','.join(section_list_name)
-
- # alert today
- if id == 0:
- if section_list_name:
- html_handle.write('' + section_n_alarm + 'di' + run_section_n_found)
- html_handle.write(' | ' + section_name + ' | ')
- else:
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- # alert tomorrow
- if id == 1:
- if section_list_name:
- html_handle.write('' + section_n_alarm + 'di' + run_section_n_found)
- html_handle.write(' | ' + section_name + ' | ')
- else:
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- if (not run_alert_step) and (not run_alarm_step):
- html_handle.write('' + run_description + ' | ')
- html_handle.write(' COMPLETED | ')
- html_handle.write('' + str(run_time_mod_first) + ' ' + time_mode + ' | ')
- html_handle.write('' + str(run_time_mod_last) + ' ' + time_mode + ' | ')
- html_handle.write('' + run_scenario_n + ' | ')
- html_handle.write('' + run_section_n_found + ' | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- else:
-
- if run_execution_step and (run_ancillary_step is None):
-
- run_time_mod_first = run_execution_step['time_modified_first'].strftime(time_format)
- run_time_mod_last = run_execution_step['time_modified_last'].strftime(time_format)
- run_scenario_n = str(run_execution_step['scenario_n'])
-
- html_handle.write('' + run_description + ' | ')
- html_handle.write(' RUNNING ... | ')
- html_handle.write('' + str(run_time_mod_first) + ' ' + time_mode + ' | ')
- html_handle.write(' NA | ')
- html_handle.write('' + run_scenario_n + ' | ')
- html_handle.write(' NA | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- elif (run_execution_step is None) and (run_ancillary_step is None):
-
- html_handle.write('' + run_description + ' | ')
- html_handle.write(' RUN NOT AVAILABLE | ')
- html_handle.write(' NA | ')
- html_handle.write(' NA | ')
- html_handle.write(' NA | ')
- html_handle.write(' NA | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write(' | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
- html_handle.write('- | ')
-
- html_handle.write('
')
-
- ####
-
- html_handle.write('')
- html_handle.write('')
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to create a tmp name
-def create_filename_tmp(prefix='tmp_', suffix='.tiff', folder=None):
-
- if folder is None:
- folder = '/tmp'
-
- with tempfile.NamedTemporaryFile(dir=folder, prefix=prefix, suffix=suffix, delete=False) as tmp:
- temp_file_name = tmp.name
- return temp_file_name
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to write file tiff
-def write_file_tif(file_name, file_data, file_wide, file_high, file_geotrans, file_proj,
- file_metadata=None,
- file_format=gdalconst.GDT_Float32):
-
- if not isinstance(file_data, list):
- file_data = [file_data]
-
- if file_metadata is None:
- file_metadata = {'description_field': 'data'}
- if not isinstance(file_metadata, list):
- file_metadata = [file_metadata] * file_data.__len__()
-
- if isinstance(file_geotrans, Affine):
- file_geotrans = file_geotrans.to_gdal()
-
- file_n = file_data.__len__()
- dset_handle = gdal.GetDriverByName('GTiff').Create(file_name, file_wide, file_high, file_n, file_format,
- options=['COMPRESS=DEFLATE'])
- dset_handle.SetGeoTransform(file_geotrans)
- dset_handle.SetProjection(file_proj)
-
- for file_id, (file_data_step, file_metadata_step) in enumerate(zip(file_data, file_metadata)):
- dset_handle.GetRasterBand(file_id + 1).WriteArray(file_data_step)
- dset_handle.GetRasterBand(file_id + 1).SetMetadata(file_metadata_step)
- del dset_handle
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to read file tif
-def read_file_tif(file_name, file_bands=1):
-
-
- file_handle = rasterio.open(file_name)
- file_proj = file_handle.crs.wkt
- file_geotrans = file_handle.transform
-
- file_tags = file_handle.tags()
- file_bands = file_handle.count
- file_metadata = file_handle.profile
-
- if file_bands == 1:
- file_data = file_handle.read(1)
- elif file_bands > 1:
- file_data = []
- for band_id in range(0, file_bands):
- file_data_tmp = file_handle.read(band_id + 1)
- file_data.append(file_data_tmp)
- else:
- logging.error(' ===> File multi-band are not supported')
- raise NotImplementedError('File multi-band not implemented yet')
-
- return file_data, file_proj, file_geotrans
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to read file json
-def read_file_json(file_name):
- env_ws = {}
- for env_item, env_value in os.environ.items():
- env_ws[env_item] = env_value
-
- with open(file_name, "r") as file_handle:
- json_block = []
- for file_row in file_handle:
-
- for env_key, env_value in env_ws.items():
- env_tag = '$' + env_key
- if env_tag in file_row:
- env_value = env_value.strip("'\\'")
- file_row = file_row.replace(env_tag, env_value)
- file_row = file_row.replace('//', '/')
-
- # Add the line to our JSON block
- json_block.append(file_row)
-
- # Check whether we closed our JSON block
- if file_row.startswith('}'):
- # Do something with the JSON dictionary
- json_dict = json.loads(''.join(json_block))
- # Start a new block
- json_block = []
-
- return json_dict
-
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to create a data array
-def create_darray_3d(data, time, geo_x, geo_y, geo_1d=True,
- coord_name_x='west_east', coord_name_y='south_north', coord_name_time='time',
- dim_name_x='west_east', dim_name_y='south_north', dim_name_time='time',
- dims_order=None):
-
- if dims_order is None:
- dims_order = [dim_name_y, dim_name_x, dim_name_time]
-
- if geo_1d:
- if geo_x.shape.__len__() == 2:
- geo_x = geo_x[0, :]
- if geo_y.shape.__len__() == 2:
- geo_y = geo_y[:, 0]
-
- data_da = xr.DataArray(data,
- dims=dims_order,
- coords={coord_name_time: (dim_name_time, time),
- coord_name_x: (dim_name_x, geo_x),
- coord_name_y: (dim_name_y, geo_y)})
- else:
- logging.error(' ===> Longitude and Latitude must be 1d')
- raise IOError('Variable shape is not valid')
-
- return data_da
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to create a data array
-def create_darray_2d(data, geo_x, geo_y, geo_1d=True, name='geo',
- coord_name_x='west_east', coord_name_y='south_north',
- dim_name_x='west_east', dim_name_y='south_north',
- dims_order=None):
-
- if dims_order is None:
- dims_order = [dim_name_y, dim_name_x]
-
- if geo_1d:
- if geo_x.shape.__len__() == 2:
- geo_x = geo_x[0, :]
- if geo_y.shape.__len__() == 2:
- geo_y = geo_y[:, 0]
-
- data_da = xr.DataArray(data,
- dims=dims_order,
- coords={coord_name_x: (dim_name_x, geo_x),
- coord_name_y: (dim_name_y, geo_y)},
- name=name)
- else:
- logging.error(' ===> Longitude and Latitude must be 1d')
- raise IOError('Variable shape is not valid')
-
- return data_da
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to read data obj
-def read_obj(filename):
- if os.path.exists(filename):
- data = pickle.load(open(filename, "rb"))
- else:
- data = None
- return data
-# -------------------------------------------------------------------------------------
-
-
-# -------------------------------------------------------------------------------------
-# Method to write data obj
-def write_obj(filename, data):
- if os.path.exists(filename):
- os.remove(filename)
- with open(filename, 'wb') as handle:
- pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
-# -------------------------------------------------------------------------------------
diff --git a/apps/profiling.sh b/apps/profiling.sh
deleted file mode 100755
index e69de29..0000000
diff --git a/bin/local/hat_datamanager_hmc_nrt_local_profiling.sh b/bin/local/hat_datamanager_hmc_nrt_local_profiling.sh
index 5f6669f..64abae9 100755
--- a/bin/local/hat_datamanager_hmc_nrt_local_profiling.sh
+++ b/bin/local/hat_datamanager_hmc_nrt_local_profiling.sh
@@ -13,8 +13,8 @@ fp_env_libs='virtualenv_python3'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/HAT_DataManager_HMC_NRT.py'
-setting_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/hat_datamanager_hmc_nrt_configuration_example_generic.json'
+script_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/HAT_RunOrganizer_HMC_NRT.py'
+setting_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/hat_runorganizer_hmc_nrt_configuration_example_generic.json'
script_folder='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/'
# Get information (-u to get gmt time)
diff --git a/bin/local/hat_datapublisher_hmc_nrt_local_profiling.sh b/bin/local/hat_datapublisher_hmc_nrt_local_profiling.sh
index 6cd7028..4e95e82 100755
--- a/bin/local/hat_datapublisher_hmc_nrt_local_profiling.sh
+++ b/bin/local/hat_datapublisher_hmc_nrt_local_profiling.sh
@@ -2,7 +2,7 @@
#-----------------------------------------------------------------------------------------
# Script information
-script_name='HAT - DataPublisher HMC NRT - LOCAL'
+script_name='HAT - Analyzer_Datasets HMC NRT - LOCAL'
script_version="1.5.0"
script_date='2019/03/04'
@@ -13,8 +13,8 @@ fp_env_libs='virtualenv_python3'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps//HAT_DataPublisher_HMC_NRT.py'
-settings_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/hat_datapublisher_hmc_nrt_configuration_example_generic.json'
+script_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps//HAT_RunPublisher_HMC_NRT.py'
+settings_file='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/apps/hat_runpublisher_hmc_nrt_configuration_example_generic.json'
script_folder='/home/fabio/Documents/Work_Area/Code_Development/Workspace/PyCharm_Workspace/hat-master/'
stats_folder='/home/fabio/Desktop/PyCharm_Workspace/hat-master/bin/local/'
diff --git a/bin/local/profile.svg b/bin/local/profile.svg
deleted file mode 100755
index 3b41f27..0000000
--- a/bin/local/profile.svg
+++ /dev/null
@@ -1,4361 +0,0 @@
-
-
-
-
-
diff --git a/bin/local/stats.binary b/bin/local/stats.binary
deleted file mode 100755
index dd38f60..0000000
Binary files a/bin/local/stats.binary and /dev/null differ
diff --git a/bin/server/hat_datamanager_hmc_nrt_history.sh b/bin/server/hat_datamanager_hmc_nrt_history.sh
index e2aec9f..dd5e53d 100755
--- a/bin/server/hat_datamanager_hmc_nrt_history.sh
+++ b/bin/server/hat_datamanager_hmc_nrt_history.sh
@@ -13,7 +13,7 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/dpc-marche/library/hat-master/apps/HAT_DataManager_HMC_NRT.py'
+script_file='/home/dpc-marche/library/hat-master/apps/HAT_RunOrganizer_HMC_NRT.py'
setting_file='/home/dpc-marche/library/hat-master/apps/hat_datamanager_hmc_nrt_configuration_server_history.json'
script_folder='/home/dpc-marche/library/hat-master/'
diff --git a/bin/server/hat_datamanager_hmc_nrt_realtime.sh b/bin/server/hat_datamanager_hmc_nrt_realtime.sh
index 5f4648f..5d4790d 100755
--- a/bin/server/hat_datamanager_hmc_nrt_realtime.sh
+++ b/bin/server/hat_datamanager_hmc_nrt_realtime.sh
@@ -13,7 +13,7 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/dpc-marche/library/hat-master/apps/HAT_DataManager_HMC_NRT.py'
+script_file='/home/dpc-marche/library/hat-master/apps/HAT_RunOrganizer_HMC_NRT.py'
setting_file='/home/dpc-marche/library/hat-master/apps/hat_datamanager_hmc_nrt_configuration_server_realtime.json'
script_folder='/home/dpc-marche/library/hat-master/'
diff --git a/bin/server/hat_datapublisher_hmc_nrt_history.sh b/bin/server/hat_datapublisher_hmc_nrt_history.sh
index b7b6129..5bf80bb 100755
--- a/bin/server/hat_datapublisher_hmc_nrt_history.sh
+++ b/bin/server/hat_datapublisher_hmc_nrt_history.sh
@@ -2,7 +2,7 @@
#-----------------------------------------------------------------------------------------
# Script information
-script_name='HAT - DataPublisher HMC NRT - HISTORY'
+script_name='HAT - Analyzer_Datasets HMC NRT - HISTORY'
script_version="1.5.0"
script_date='2019/03/04'
@@ -13,7 +13,7 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/dpc-marche/library/hat-master/apps/HAT_DataPublisher_HMC_NRT.py'
+script_file='/home/dpc-marche/library/hat-master/apps/HAT_RunPublisher_HMC_NRT.py'
setting_file='/home/dpc-marche/library/hat-master/apps/hat_datapublisher_hmc_nrt_configuration_server_history.json'
script_folder='/home/dpc-marche/library/hat-master/'
diff --git a/bin/server/hat_datapublisher_hmc_nrt_realtime.sh b/bin/server/hat_datapublisher_hmc_nrt_realtime.sh
index a72cab0..35a01a0 100755
--- a/bin/server/hat_datapublisher_hmc_nrt_realtime.sh
+++ b/bin/server/hat_datapublisher_hmc_nrt_realtime.sh
@@ -2,7 +2,7 @@
#-----------------------------------------------------------------------------------------
# Script information
-script_name='HAT - DataPublisher HMC NRT - REALTIME'
+script_name='HAT - Analyzer_Datasets HMC NRT - REALTIME'
script_version="1.5.0"
script_date='2019/03/04'
@@ -13,7 +13,7 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file='/home/dpc-marche/library/hat-master/apps/HAT_DataPublisher_HMC_NRT.py'
+script_file='/home/dpc-marche/library/hat-master/apps/HAT_RunPublisher_HMC_NRT.py'
setting_file='/home/dpc-marche/library/hat-master/apps/hat_datapublisher_hmc_nrt_configuration_server_realtime.json'
script_folder='/home/dpc-marche/library/hat-master/'
diff --git a/bin/server/hat_datarunner_hmc_nrt_history.sh b/bin/server/hat_datarunner_hmc_nrt_history.sh
index 8817c29..82ee89e 100755
--- a/bin/server/hat_datarunner_hmc_nrt_history.sh
+++ b/bin/server/hat_datarunner_hmc_nrt_history.sh
@@ -13,9 +13,9 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file_manager='/home/dpc-marche/library/hat-master/apps/HAT_DataManager_HMC_NRT.py'
+script_file_manager='/home/dpc-marche/library/hat-master/apps/HAT_RunOrganizer_HMC_NRT.py'
setting_file_manager='/home/dpc-marche/library/hat-master/apps/hat_datamanager_hmc_nrt_configuration_server_history.json'
-script_file_publisher='/home/dpc-marche/library/hat-master/apps/HAT_DataPublisher_HMC_NRT.py'
+script_file_publisher='/home/dpc-marche/library/hat-master/apps/HAT_RunPublisher_HMC_NRT.py'
setting_file_publisher='/home/dpc-marche/library/hat-master/apps/hat_datapublisher_hmc_nrt_configuration_server_history.json'
script_folder='/home/dpc-marche/library/hat-master/'
diff --git a/bin/server/hat_datarunner_hmc_nrt_realtime.sh b/bin/server/hat_datarunner_hmc_nrt_realtime.sh
index c70c227..0e56c80 100755
--- a/bin/server/hat_datarunner_hmc_nrt_realtime.sh
+++ b/bin/server/hat_datarunner_hmc_nrt_realtime.sh
@@ -13,9 +13,9 @@ fp_env_libs='fp_env_python'
#-----------------------------------------------------------------------------------------
# Get file information
-script_file_manager='/home/dpc-marche/library/hat-master/apps/HAT_DataManager_HMC_NRT.py'
+script_file_manager='/home/dpc-marche/library/hat-master/apps/HAT_RunOrganizer_HMC_NRT.py'
setting_file_manager='/home/dpc-marche/library/hat-master/apps/hat_datamanager_hmc_nrt_configuration_server_realtime.json'
-script_file_publisher='/home/dpc-marche/library/hat-master/apps/HAT_DataPublisher_HMC_NRT.py'
+script_file_publisher='/home/dpc-marche/library/hat-master/apps/HAT_RunPublisher_HMC_NRT.py'
setting_file_publisher='/home/dpc-marche/library/hat-master/apps/hat_datapublisher_hmc_nrt_configuration_server_realtime.json'
script_folder='/home/dpc-marche/library/hat-master/'