From 6435f711586f9feebc57d81c9c18f7b94c2e3423 Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Tue, 14 May 2024 15:22:28 -0500 Subject: [PATCH 01/17] added flag for clean columns for joining table dataset with source dataset (#302) * added flag for clean columns * removed unnecessary line * added guid index * added indeing * fixed pytest * default to Flase * set clean flag to false --- CHANGELOG.md | 4 ++++ pyincore/utils/datasetutil.py | 14 +++++++++++--- tests/pyincore/utils/test_datasetutil.py | 5 +++-- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2f84f562..0b7d4c931 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] + +### Changed +- join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) ## [1.18.1] - 2024-04-30 diff --git a/pyincore/utils/datasetutil.py b/pyincore/utils/datasetutil.py index 2ae644f2b..33a177b8f 100644 --- a/pyincore/utils/datasetutil.py +++ b/pyincore/utils/datasetutil.py @@ -17,12 +17,13 @@ class DatasetUtil: @staticmethod - def join_datasets(geodataset, tabledataset): + def join_datasets(geodataset, tabledataset, clean_attributes=False): """Join Geopands geodataframe and non-geospatial Dataset using GUID field Args: geodataset (gpd.Dataset): pyincore Dataset object with geospatial data tabledataset (gpd.Dataset): pyincore Dataset object without geospatial data + clean_attributes (boolean): flag for deleting the fields except guid and the fields in csv table Returns: gpd.GeoDataFrame: Geopandas DataFrame object @@ -30,17 +31,24 @@ def join_datasets(geodataset, tabledataset): """ gdf = gpd.read_file(geodataset.local_file_path) df = tabledataset.get_dataframe_from_csv() + + # remove the tables except guid + if clean_attributes: + gdf = gdf[['geometry','guid']] + + # joining and indexing join_gdf = gdf.set_index("guid").join(df.set_index("guid")) return join_gdf @staticmethod - def join_table_dataset_with_source_dataset(dataset, client): + def join_table_dataset_with_source_dataset(dataset, client, clean_attributes=False): """Creates geopandas geodataframe by joining table dataset and its source dataset Args: dataset (Dataset): pyincore dataset object client (Client): pyincore service client object + clean_attributes (boolean): flag for deleting the fields except guid and the fields in csv table Returns: gpd.Dataset: Geopandas geodataframe object. @@ -64,7 +72,7 @@ def join_table_dataset_with_source_dataset(dataset, client): if is_source_dataset: # merge dataset and source dataset geodataset = Dataset.from_data_service(source_dataset, DataService(client)) - joined_gdf = DatasetUtil.join_datasets(geodataset, dataset) + joined_gdf = DatasetUtil.join_datasets(geodataset, dataset, clean_attributes) else: return None diff --git a/tests/pyincore/utils/test_datasetutil.py b/tests/pyincore/utils/test_datasetutil.py index a98c620f7..c9dc7374c 100644 --- a/tests/pyincore/utils/test_datasetutil.py +++ b/tests/pyincore/utils/test_datasetutil.py @@ -15,7 +15,8 @@ def test_join_table_dataset_with_source_dataset(client): joined_gdf = util.join_table_dataset_with_source_dataset(dataset, client) # assert if the fields from each dataset exist - assert 'year_built' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() + # note that guid is out in here since it got indexed + assert 'geometry' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() def test_join_datasets(client): @@ -26,4 +27,4 @@ def test_join_datasets(client): joined_gdf = util.join_datasets(bldg_dataset, bldg_dmg_dataset) # assert if the fields from each dataset exist - assert 'year_built' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() + assert 'geometry' in joined_gdf.keys() and 'meandamage' in joined_gdf.keys() From 8b88128db90bfdb22daa6609558de9583f525d6d Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Tue, 14 May 2024 17:09:41 -0500 Subject: [PATCH 02/17] Fxied permission error in clear cache (#573) --- CHANGELOG.md | 5 +++++ pyincore/client.py | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b7d4c931..e8f9fc781 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed - join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) +## [Unreleased] + +### Fixed +- Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/pyincore/client.py b/pyincore/client.py index ee64aeae9..966f81c14 100644 --- a/pyincore/client.py +++ b/pyincore/client.py @@ -414,8 +414,11 @@ def clear_cache(self): if not os.path.isdir(self.hashed_svc_data_dir): logger.warning("Cached folder doesn't exist") return None + try: + shutil.rmtree(self.hashed_svc_data_dir) + except PermissionError as e: + print(f"Error clearing cache : {e}") - shutil.rmtree(self.hashed_svc_data_dir) # clear entry from service.json update_hash_entry("edit", hashed_url=self.hashed_service_url) return From 9e27a3f85e971e759e24a972b0d6578ff8c09198 Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Wed, 22 May 2024 14:13:59 -0500 Subject: [PATCH 03/17] Rename Social Vulnerability to Social Vulnerability Score (#572) * Rename Social Vulnerability to Social Vulnerability Score * readd old files * update requirements * updated copyright * fixed wrong caption * renamded output name * renamded output name in the code * fixed typo in result name * revert back to previous result name * Changed copyright year * modified copyright year --- CHANGELOG.md | 5 + docs/source/modules.rst | 7 + environment.yml | 1 + .../socialvulnerability.py | 158 +---------------- .../socialvulnerabilityscore/__init__.py | 8 + .../socialvulnerabilityscore.py | 163 ++++++++++++++++++ requirements.imports | 1 + requirements.min | 1 + requirements.txt | 1 + setup.py | 1 + .../test_socialvulnerabilityscore.py | 34 ++++ 11 files changed, 230 insertions(+), 150 deletions(-) create mode 100644 pyincore/analyses/socialvulnerabilityscore/__init__.py create mode 100644 pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py create mode 100644 tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py diff --git a/CHANGELOG.md b/CHANGELOG.md index e8f9fc781..79dd5c0d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) +## [Unreleased] + +### Changed +- Rename Social Vulnerability Analysis to Social Vulnerability Index Analysis [#556](https://github.com/IN-CORE/pyincore/issues/556) + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/docs/source/modules.rst b/docs/source/modules.rst index 1d031c821..a6e52691c 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -306,6 +306,13 @@ analyses/socialvulnerability ============================ .. autoclass:: socialvulnerability.socialvulnerability.SocialVulnerability :members: +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`socialvulnerabilityscore.SocialVulnerabilityScore` instead. + +analyses/socialvulnerabilityscore +============================ +.. autoclass:: socialvulnerabilityscore.socialvulnerabilityscore.SocialVulnerabilityScore + :members: analyses/tornadoepndamage ========================= diff --git a/environment.yml b/environment.yml index be15b6336..dcacabdc1 100644 --- a/environment.yml +++ b/environment.yml @@ -20,3 +20,4 @@ dependencies: - rtree>=1.1.0 - scipy>=1.11.3 - shapely>=2.0.2 + - deprecated>=1.2.14 diff --git a/pyincore/analyses/socialvulnerability/socialvulnerability.py b/pyincore/analyses/socialvulnerability/socialvulnerability.py index 3c890d416..86d86d05a 100644 --- a/pyincore/analyses/socialvulnerability/socialvulnerability.py +++ b/pyincore/analyses/socialvulnerability/socialvulnerability.py @@ -4,160 +4,18 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import pandas as pd -from pyincore import BaseAnalysis +from deprecated.sphinx import deprecated +from pyincore.analyses.socialvulnerabilityscore import SocialVulnerabilityScore -class SocialVulnerability(BaseAnalysis): - """This analysis computes a social vulnerability score for per associated zone in census data. - - The computation extracts zoning and a social vulnerability score obtained by computing demographic features of - interest against national average values. - - The output of the computation is a dataset CSV format. - - Contributors - | Science: Elaina Sutley, Amin Enderami - | Implementation: Amin Enderami, Santiago Núñez-Corrales, and NCSA IN-CORE Dev Team - - Related publications - - Args: - incore_client (IncoreClient): Service authentication. - - """ +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use SocialVulnerabilityScore instead.") +class SocialVulnerability(): def __init__(self, incore_client): - super(SocialVulnerability, self).__init__(incore_client) - - def run(self): - """Execute the social vulnerability analysis using known parameters.""" - df_navs = pd.DataFrame(self.get_input_dataset('national_vulnerability_feature_averages').get_csv_reader()) - - df_dem = pd.DataFrame(self.get_input_dataset('social_vulnerability_demographic_factors').get_csv_reader()) - - # Make sure data types match - df_dem["factor_white_nonHispanic"] = df_dem["factor_white_nonHispanic"].astype(float) - df_dem["factor_owner_occupied"] = df_dem["factor_owner_occupied"].astype(float) - df_dem["factor_earning_higher_than_national_poverty_rate"] =\ - df_dem["factor_earning_higher_than_national_poverty_rate"].astype(float) - df_dem["factor_over_25_with_high_school_diploma_or_higher"] =\ - df_dem["factor_over_25_with_high_school_diploma_or_higher"].astype(float) - df_dem["factor_without_disability_age_18_to_65"] =\ - df_dem["factor_without_disability_age_18_to_65"].astype(float) - - self.social_vulnerability_model(df_navs, df_dem) - - def social_vulnerability_model(self, df_navs, df_dem): - """ - - Args: - df_navs (pd.DataFrame): dataframe containing the national average values for vulnerability factors - df_dem (pd.DataFrame): dataframe containing demographic factors required for the vulnerability score - - Returns: - - """ - - # Compute the social vulnerability index - df_sv = self.compute_svs(df_dem, df_navs) - - # Save into a CSV file - result_name = self.get_parameter("result_name") - self.set_result_csv_data("sv_result", df_sv, - name=result_name, - source="dataframe") - - @staticmethod - def compute_svs(df, df_navs): - """ Computation of the social vulnerability score and corresponding zoning + self._delegate = SocialVulnerabilityScore(incore_client) - Args: - df (pd.DataFrame): dataframe for the census geographic unit of interest - df_navs (pd.DataFrame): dataframe containing national average values - - Returns: - pd.DataFrame: Social vulnerability score and corresponding zoning data + def __getattr__(self, name): """ - navs = df_navs['average'].astype(float).array - - df['R1'] = df['factor_white_nonHispanic'] / navs[0] - df['R2'] = df['factor_owner_occupied'] / navs[1] - df['R3'] = df['factor_earning_higher_than_national_poverty_rate'] / navs[2] - df['R4'] = df['factor_over_25_with_high_school_diploma_or_higher'] / navs[3] - df['R5'] = df['factor_without_disability_age_18_to_65'] / navs[4] - df['SVS'] = df.apply(lambda row: (row['R1'] + row['R2'] + row['R3'] + row['R4'] + row['R5']) / 5, axis=1) - - maximum_nav = 1/navs - std = abs(1 - (sum(maximum_nav) / len(maximum_nav))) / 3 - - lb_2 = 1 - 1.5*std - lb_1 = 1 - 0.5*std - ub_1 = 1 + 0.5*std - ub_2 = 1 + 1.5*std - - zones = [] - - for svs in df['SVS'].tolist(): - if svs < lb_2: - new_zone = 'High Vulnerable (zone5)' - elif svs < lb_1: - new_zone = 'Medium to High Vulnerable (zone4)' - elif svs < ub_1: - new_zone = 'Medium Vulnerable (zone3)' - elif svs < ub_2: - new_zone = 'Medium to Low Vulnerable (zone2)' - elif svs > ub_2: - new_zone = 'Low Vulnerable (zone1)' - else: - new_zone = 'No Data' - zones.append(new_zone) - - df['zone'] = zones - df = df.sort_values(by="GEO_ID") - - return df - - def get_spec(self): - """Get specifications of the housing serial recovery model. - - Returns: - obj: A JSON object of specifications of the social vulnerability model. - + Delegate attribute access to the SocialVulnerabilityScore instance. """ - return { - 'name': 'social-vulnerability', - 'description': 'Social vulnerability score model', - 'input_parameters': [ - { - 'id': 'result_name', - 'required': True, - 'description': 'Result CSV dataset name', - 'type': str - }, - ], - 'input_datasets': [ - { - 'id': 'national_vulnerability_feature_averages', - 'required': True, - 'description': 'A csv file with national vulnerability feature averages', - 'type': ['incore:socialVulnerabilityFeatureAverages'] - }, - { - 'id': 'social_vulnerability_demographic_factors', - 'required': True, - 'description': 'A csv file with social vulnerability demographic factors for a given geographic ' - 'type', - 'type': ['incore:socialVulnerabilityDemFactors'] - } - ], - 'output_datasets': [ - { - 'id': 'sv_result', - 'parent_type': 'social_vulnerability_score', - 'description': 'A csv file with zones containing demographic factors' - 'qualified by a social vulnerability score', - 'type': 'incore:socialVulnerabilityScore' - } - ] - } + return getattr(self._delegate, name) diff --git a/pyincore/analyses/socialvulnerabilityscore/__init__.py b/pyincore/analyses/socialvulnerabilityscore/__init__.py new file mode 100644 index 000000000..c4b8e13bb --- /dev/null +++ b/pyincore/analyses/socialvulnerabilityscore/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.socialvulnerabilityscore.socialvulnerabilityscore import SocialVulnerabilityScore diff --git a/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py b/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py new file mode 100644 index 000000000..f76e3f7ce --- /dev/null +++ b/pyincore/analyses/socialvulnerabilityscore/socialvulnerabilityscore.py @@ -0,0 +1,163 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import pandas as pd +from pyincore import BaseAnalysis + + +class SocialVulnerabilityScore(BaseAnalysis): + """This analysis computes a social vulnerability score for per associated zone in census data. + + The computation extracts zoning and a social vulnerability score obtained by computing demographic features of + interest against national average values. + + The output of the computation is a dataset CSV format. + + Contributors + | Science: Elaina Sutley, Amin Enderami + | Implementation: Amin Enderami, Santiago Núñez-Corrales, and NCSA IN-CORE Dev Team + + Related publications + + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + super(SocialVulnerabilityScore, self).__init__(incore_client) + + def run(self): + """Execute the social vulnerability score analysis using known parameters.""" + df_navs = pd.DataFrame(self.get_input_dataset('national_vulnerability_feature_averages').get_csv_reader()) + + df_dem = pd.DataFrame(self.get_input_dataset('social_vulnerability_demographic_factors').get_csv_reader()) + + # Make sure data types match + df_dem["factor_white_nonHispanic"] = df_dem["factor_white_nonHispanic"].astype(float) + df_dem["factor_owner_occupied"] = df_dem["factor_owner_occupied"].astype(float) + df_dem["factor_earning_higher_than_national_poverty_rate"] =\ + df_dem["factor_earning_higher_than_national_poverty_rate"].astype(float) + df_dem["factor_over_25_with_high_school_diploma_or_higher"] =\ + df_dem["factor_over_25_with_high_school_diploma_or_higher"].astype(float) + df_dem["factor_without_disability_age_18_to_65"] =\ + df_dem["factor_without_disability_age_18_to_65"].astype(float) + + self.social_vulnerability_score_model(df_navs, df_dem) + + def social_vulnerability_score_model(self, df_navs, df_dem): + """ + + Args: + df_navs (pd.DataFrame): dataframe containing the national average values for vulnerability factors + df_dem (pd.DataFrame): dataframe containing demographic factors required for the vulnerability score + + Returns: + + """ + + # Compute the social vulnerability score index + df_sv = self.compute_svs(df_dem, df_navs) + + # Save into a CSV file + result_name = self.get_parameter("result_name") + self.set_result_csv_data("sv_result", df_sv, + name=result_name, + source="dataframe") + + @staticmethod + def compute_svs(df, df_navs): + """ Computation of the social vulnerability score and corresponding zoning + + Args: + df (pd.DataFrame): dataframe for the census geographic unit of interest + df_navs (pd.DataFrame): dataframe containing national average values + + Returns: + pd.DataFrame: Social vulnerability score and corresponding zoning data + """ + navs = df_navs['average'].astype(float).array + + df['R1'] = df['factor_white_nonHispanic'] / navs[0] + df['R2'] = df['factor_owner_occupied'] / navs[1] + df['R3'] = df['factor_earning_higher_than_national_poverty_rate'] / navs[2] + df['R4'] = df['factor_over_25_with_high_school_diploma_or_higher'] / navs[3] + df['R5'] = df['factor_without_disability_age_18_to_65'] / navs[4] + df['SVS'] = df.apply(lambda row: (row['R1'] + row['R2'] + row['R3'] + row['R4'] + row['R5']) / 5, axis=1) + + maximum_nav = 1/navs + std = abs(1 - (sum(maximum_nav) / len(maximum_nav))) / 3 + + lb_2 = 1 - 1.5*std + lb_1 = 1 - 0.5*std + ub_1 = 1 + 0.5*std + ub_2 = 1 + 1.5*std + + zones = [] + + for svs in df['SVS'].tolist(): + if svs < lb_2: + new_zone = 'High Vulnerable (zone5)' + elif svs < lb_1: + new_zone = 'Medium to High Vulnerable (zone4)' + elif svs < ub_1: + new_zone = 'Medium Vulnerable (zone3)' + elif svs < ub_2: + new_zone = 'Medium to Low Vulnerable (zone2)' + elif svs > ub_2: + new_zone = 'Low Vulnerable (zone1)' + else: + new_zone = 'No Data' + zones.append(new_zone) + + df['zone'] = zones + df = df.sort_values(by="GEO_ID") + + return df + + def get_spec(self): + """Get specifications of the housing serial recovery model. + + Returns: + obj: A JSON object of specifications of the social vulnerability score model. + + """ + return { + 'name': 'social-vulnerability-score', + 'description': 'Social vulnerability score model', + 'input_parameters': [ + { + 'id': 'result_name', + 'required': True, + 'description': 'Result CSV dataset name', + 'type': str + }, + ], + 'input_datasets': [ + { + 'id': 'national_vulnerability_feature_averages', + 'required': True, + 'description': 'A csv file with national vulnerability feature averages', + 'type': ['incore:socialVulnerabilityFeatureAverages'] + }, + { + 'id': 'social_vulnerability_demographic_factors', + 'required': True, + 'description': 'A csv file with social vulnerability score demographic factors for a given geographic ' + 'type', + 'type': ['incore:socialVulnerabilityDemFactors'] + } + ], + 'output_datasets': [ + { + 'id': 'sv_result', + 'parent_type': 'social_vulnerability_score', + 'description': 'A csv file with zones containing demographic factors' + 'qualified by a social vulnerability score', + 'type': 'incore:socialVulnerabilityScore' + } + ] + } diff --git a/requirements.imports b/requirements.imports index 99829a026..bad57f2f9 100644 --- a/requirements.imports +++ b/requirements.imports @@ -15,3 +15,4 @@ shapely pycodestyle pytest python-jose +Deprecated diff --git a/requirements.min b/requirements.min index a5a7e0be9..e6e22d39a 100644 --- a/requirements.min +++ b/requirements.min @@ -16,3 +16,4 @@ requests>=2.31.0 rtree>=1.1.0 scipy>=1.11.3 shapely>=2.0.2 +Deprecated>=1.2.14 diff --git a/requirements.txt b/requirements.txt index 67803f1a7..986a33f58 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,3 +15,4 @@ requests>=2.31.0 rtree>=1.1.0 scipy>=1.11.3 shapely>=2.0.2 +Deprecated>=1.2.14 diff --git a/setup.py b/setup.py index 197f307cd..3ea2d73df 100644 --- a/setup.py +++ b/setup.py @@ -67,6 +67,7 @@ 'rtree>=1.1.0', 'scipy>=1.11.3', 'shapely>=2.0.2', + 'Deprecated>=1.2.14' ], extras_require={ diff --git a/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py b/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py new file mode 100644 index 000000000..77a731c8d --- /dev/null +++ b/tests/pyincore/analyses/socialvulnerabilityscore/test_socialvulnerabilityscore.py @@ -0,0 +1,34 @@ +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore import IncoreClient +from pyincore.analyses.socialvulnerabilityscore import SocialVulnerabilityScore +import pyincore.globals as pyglobals + + +def run_with_base_class(): + census_geo_level = "tract" + + # For a full example, datasets should be obtained using the Census API in pyincore_data + national_vulnerability_feature_averages = "6241d9c653302c512d67ef26" + # social_vulnerability_demographic_factors = "6241e58653302c512d67fb38" + social_vulnerability_demographic_factors = "62b4d2f7be53de4a4737e52d" # calculated from censusutil + + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + social_vulnerability_score = SocialVulnerabilityScore(client) + + social_vulnerability_score.set_parameter("result_name", "social_vulnerabilty_score") + + social_vulnerability_score.load_remote_input_dataset("national_vulnerability_feature_averages", + national_vulnerability_feature_averages) + social_vulnerability_score.load_remote_input_dataset("social_vulnerability_demographic_factors", + social_vulnerability_demographic_factors) + + # Run pipeline damage analysis + result = social_vulnerability_score.run_analysis() + + +if __name__ == "__main__": + run_with_base_class() From 4e66ba2eda30c514120313dd0d928703e371ac25 Mon Sep 17 00:00:00 2001 From: Chris Navarro Date: Wed, 22 May 2024 15:49:06 -0500 Subject: [PATCH 04/17] 568 - Create natural gas facility damage analysis (#574) * Fixes #568 - added gas facility damage analysis * Reformatted * Added test for gas facility damage analysis * Removed print statement * Update input dataset type --- CHANGELOG.md | 7 +- docs/source/modules.rst | 7 + .../analyses/gasfacilitydamage/__init__.py | 9 + .../gasfacilitydamage/gasfacilitydamage.py | 466 ++++++++++++++++++ pyincore/analyses/gasfacilitydamage/gfutil.py | 12 + pyincore/models/fragilitycurveset.py | 3 + .../test_gasfacilitydamage.py | 40 ++ 7 files changed, 541 insertions(+), 3 deletions(-) create mode 100644 pyincore/analyses/gasfacilitydamage/__init__.py create mode 100644 pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py create mode 100644 pyincore/analyses/gasfacilitydamage/gfutil.py create mode 100644 tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 79dd5c0d9..3e7b35bd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,14 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] +## [Unreleased] + +### Added +- Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) ### Changed - join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) -## [Unreleased] - ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) diff --git a/docs/source/modules.rst b/docs/source/modules.rst index a6e52691c..2811f7851 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -118,6 +118,13 @@ analyses/galvestoncge .. autofunction:: galvestoncge.galvestonoutput.gams_to_dataframes :members: +analyses/gasfacilitydamage +============================ +.. autoclass:: gasfacilitydamage.gasfacilitydamage.GasFacilityDamage + :members: +.. autoclass:: gasfacilitydamage.gfutil.GfUtil + :members: + analyses/housingrecovery ======================== .. autoclass:: housingrecovery.housingrecovery.HousingRecovery diff --git a/pyincore/analyses/gasfacilitydamage/__init__.py b/pyincore/analyses/gasfacilitydamage/__init__.py new file mode 100644 index 000000000..e4bf0b684 --- /dev/null +++ b/pyincore/analyses/gasfacilitydamage/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.gasfacilitydamage.gasfacilitydamage import GasFacilityDamage +from pyincore.analyses.gasfacilitydamage.gfutil import GfUtil diff --git a/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py b/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py new file mode 100644 index 000000000..b1e729112 --- /dev/null +++ b/pyincore/analyses/gasfacilitydamage/gasfacilitydamage.py @@ -0,0 +1,466 @@ +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +import concurrent.futures +from itertools import repeat +from pyincore import ( + BaseAnalysis, + HazardService, + FragilityService, + GeoUtil, + AnalysisUtil, +) +from pyincore.analyses.gasfacilitydamage.gfutil import GfUtil +from pyincore.models.dfr3curve import DFR3Curve + + +class GasFacilityDamage(BaseAnalysis): + """Computes gas facility structural damage for earthquake. + + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + self.hazardsvc = HazardService(incore_client) + self.fragilitysvc = FragilityService(incore_client) + super(GasFacilityDamage, self).__init__(incore_client) + + def run(self): + """Executes Gas Facility damage analysis""" + + # Facility dataset + inventory_set = self.get_input_dataset("gas_facilities").get_inventory_reader() + + # get input hazard + hazard, hazard_type, hazard_dataset_id = ( + self.create_hazard_object_from_input_params() + ) + + user_defined_cpu = 1 + + if ( + not self.get_parameter("num_cpu") is None + and self.get_parameter("num_cpu") > 0 + ): + user_defined_cpu = self.get_parameter("num_cpu") + + num_workers = AnalysisUtil.determine_parallelism_locally( + self, len(inventory_set), user_defined_cpu + ) + + avg_bulk_input_size = int(len(inventory_set) / num_workers) + inventory_args = [] + count = 0 + inventory_list = list(inventory_set) + while count < len(inventory_list): + inventory_args.append(inventory_list[count : count + avg_bulk_input_size]) + count += avg_bulk_input_size + + (ds_results, metadata_results) = self.gasfacility_damage_concurrent_futures( + self.gasfacility_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id), + ) + + result_name = self.get_parameter("result_name") + self.set_result_csv_data("result", ds_results, name=result_name + "_damage") + self.set_result_json_data( + "metadata", metadata_results, name=result_name + "_additional_info" + ) + + return True + + def gasfacility_damage_concurrent_futures(self, function_name, parallelism, *args): + """Utilizes concurrent.future module. + + Args: + function_name (function): The function to be parallelized. + parallelism (int): Number of workers in parallelization. + *args: All the arguments in order to pass into parameter function_name. + + Returns: + list: A list of ordered dictionaries with building damage values and other data/metadata. + + """ + output_ds = [] + output_dmg = [] + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallelism + ) as executor: + for ret1, ret2 in executor.map(function_name, *args): + output_ds.extend(ret1) + output_dmg.extend(ret2) + + return output_ds, output_dmg + + def gasfacility_damage_analysis_bulk_input( + self, facilities, hazard, hazard_type, hazard_dataset_id + ): + """Gets applicable fragilities and calculates damage + + Args: + facilities (list): Multiple gas facilities from input inventory set. + hazard (object): A hazard object. + hazard_type (str): A hazard type of the hazard exposure (earthquake). + hazard_dataset_id (str): An id of the hazard exposure. + + Returns: + list: A list of ordered dictionaries with gas facility damage values + list: A list of ordered dictionaries with other gas facility data/metadata + """ + + # Liquefaction related variables + use_liquefaction = False + liquefaction_available = False + fragility_sets_liq = None + liquefaction_resp = None + geology_dataset_id = None + liq_hazard_vals = None + liq_demand_types = None + liq_demand_units = None + liquefaction_prob = None + loc = None + + # Obtain the fragility key + fragility_key = self.get_parameter("fragility_key") + + if fragility_key is None: + if hazard_type == "earthquake": + fragility_key = GfUtil.DEFAULT_FRAGILITY_KEY + else: + raise ValueError( + "Hazard type other than Earthquake and Tsunami are not currently supported." + ) + + self.set_parameter("fragility_key", fragility_key) + + # Obtain the fragility set + fragility_sets = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), facilities, fragility_key + ) + + # Obtain the liquefaction fragility Key + liquefaction_fragility_key = self.get_parameter("liquefaction_fragility_key") + + if hazard_type == "earthquake": + if self.get_parameter("use_liquefaction") is True: + if liquefaction_fragility_key is None: + liquefaction_fragility_key = GfUtil.DEFAULT_LIQ_FRAGILITY_KEY + + use_liquefaction = self.get_parameter("use_liquefaction") + + # Obtain the geology dataset + geology_dataset_id = self.get_parameter( + "liquefaction_geology_dataset_id" + ) + + if geology_dataset_id is not None: + fragility_sets_liq = self.fragilitysvc.match_inventory( + self.get_input_dataset("dfr3_mapping_set"), + facilities, + liquefaction_fragility_key, + ) + + if fragility_sets_liq is not None: + liquefaction_available = True + + # Determine whether to use hazard uncertainty + uncertainty = self.get_parameter("use_hazard_uncertainty") + + # Setup fragility translation structures + values_payload = [] + values_payload_liq = [] + unmapped_gasfacilities = [] + mapped_gasfacilities = [] + + for facility in facilities: + if facility["id"] in fragility_sets.keys(): + # Fill in generic details + fragility_set = fragility_sets[facility["id"]] + location = GeoUtil.get_location(facility) + loc = str(location.y) + "," + str(location.x) + demands = fragility_set.demand_types + units = fragility_set.demand_units + value = {"demands": demands, "units": units, "loc": loc} + values_payload.append(value) + mapped_gasfacilities.append(facility) + + # Fill in liquefaction parameters + if liquefaction_available and facility["id"] in fragility_sets_liq: + fragility_set_liq = fragility_sets_liq[facility["id"]] + demands_liq = fragility_set_liq.demand_types + units_liq = fragility_set_liq.demand_units + value_liq = {"demands": demands_liq, "units": units_liq, "loc": loc} + values_payload_liq.append(value_liq) + else: + unmapped_gasfacilities.append(facility) + + del facilities + + if hazard_type == "earthquake": + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) + else: + raise ValueError( + "The provided hazard type is not supported yet by this analysis" + ) + + # Check if liquefaction is applicable + if liquefaction_available: + liquefaction_resp = self.hazardsvc.post_liquefaction_values( + hazard_dataset_id, geology_dataset_id, values_payload_liq + ) + + # Calculate LS and DS + facility_results = [] + damage_results = [] + + for i, facility in enumerate(mapped_gasfacilities): + fragility_set = fragility_sets[facility["id"]] + limit_states = dict() + dmg_intervals = dict() + + # Setup conditions for the analysis + hazard_std_dev = 0 + + if uncertainty: + raise ValueError("Hazard uncertainty is not yet implemented.") + + if isinstance(fragility_set.fragility_curves[0], DFR3Curve): + hazard_vals = AnalysisUtil.update_precision_of_lists( + hazard_resp[i]["hazardValues"] + ) + demand_types = hazard_resp[i]["demands"] + demand_units = hazard_resp[i]["units"] + + hval_dict = dict() + + for j, d in enumerate(fragility_set.demand_types): + hval_dict[d] = hazard_vals[j] + + if not AnalysisUtil.do_hazard_values_have_errors( + hazard_resp[i]["hazardValues"] + ): + facility_args = ( + fragility_set.construct_expression_args_from_inventory(facility) + ) + limit_states = fragility_set.calculate_limit_state( + hval_dict, + std_dev=hazard_std_dev, + inventory_type="gas_facility", + **facility_args + ) + # Evaluate liquefaction: if it is not none, then liquefaction is available + if liquefaction_resp is not None: + fragility_set_liq = fragility_sets_liq[facility["id"]] + + if isinstance(fragility_set_liq.fragility_curves[0], DFR3Curve): + liq_hazard_vals = AnalysisUtil.update_precision_of_lists( + liquefaction_resp[i]["pgdValues"] + ) + liq_demand_types = liquefaction_resp[i]["demands"] + liq_demand_units = liquefaction_resp[i]["units"] + liquefaction_prob = liquefaction_resp[i]["liqProbability"] + + hval_dict_liq = dict() + + for j, d in enumerate(fragility_set_liq.demand_types): + hval_dict_liq[d] = liq_hazard_vals[j] + + facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory( + facility + ) + pgd_limit_states = fragility_set_liq.calculate_limit_state( + hval_dict_liq, + std_dev=hazard_std_dev, + inventory_type="gas_facility", + **facility_liq_args + ) + else: + raise ValueError( + "One of the fragilities is in deprecated format. " + "This should not happen If you are seeing this please report the issue." + ) + + limit_states = AnalysisUtil.adjust_limit_states_for_pgd( + limit_states, pgd_limit_states + ) + + dmg_intervals = fragility_set.calculate_damage_interval( + limit_states, + hazard_type=hazard_type, + inventory_type="gas_facility", + ) + else: + raise ValueError( + "One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue." + ) + + facility_result = { + "guid": facility["properties"]["guid"], + **limit_states, + **dmg_intervals, + } + facility_result["haz_expose"] = ( + AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + ) + damage_result = dict() + damage_result["guid"] = facility["properties"]["guid"] + damage_result["fragility_id"] = fragility_set.id + damage_result["demandtypes"] = demand_types + damage_result["demandunits"] = demand_units + damage_result["hazardtype"] = hazard_type + damage_result["hazardvals"] = hazard_vals + + if use_liquefaction and fragility_sets_liq and geology_dataset_id: + damage_result["liq_fragility_id"] = fragility_sets_liq[ + facility["id"] + ].id + damage_result["liqdemandtypes"] = liq_demand_types + damage_result["liqdemandunits"] = liq_demand_units + damage_result["liqhazval"] = liq_hazard_vals + damage_result["liqprobability"] = liquefaction_prob + else: + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None + + facility_results.append(facility_result) + damage_results.append(damage_result) + + for facility in unmapped_gasfacilities: + facility_result = dict() + damage_result = dict() + facility_result["guid"] = facility["properties"]["guid"] + damage_result["guid"] = facility["properties"]["guid"] + damage_result["fragility_id"] = None + damage_result["demandtypes"] = None + damage_result["demandunits"] = None + damage_result["hazardtype"] = None + damage_result["hazardvals"] = None + damage_result["liq_fragility_id"] = None + damage_result["liqdemandtypes"] = None + damage_result["liqdemandunits"] = None + damage_result["liqhazval"] = None + damage_result["liqprobability"] = None + + facility_results.append(facility_result) + damage_results.append(damage_result) + + return facility_results, damage_results + + def get_spec(self): + """Get specifications of the bridge damage analysis. + + Returns: + obj: A JSON object of specifications of the bridge damage analysis. + + """ + return { + "name": "gas-facility-damage", + "description": "Gas facility damage analysis", + "input_parameters": [ + { + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str, + }, + { + "id": "fragility_key", + "required": False, + "description": "Fragility key to use in mapping dataset", + "type": str, + }, + { + "id": "liquefaction_fragility_key", + "required": False, + "description": "Fragility key to use in liquefaction mapping dataset", + "type": str, + }, + { + "id": "use_liquefaction", + "required": False, + "description": "Use liquefaction", + "type": bool, + }, + { + "id": "liquefaction_geology_dataset_id", + "required": False, + "description": "Geology dataset id", + "type": str, + }, + { + "id": "use_hazard_uncertainty", + "required": False, + "description": "Use hazard uncertainty", + "type": bool, + }, + { + "id": "num_cpu", + "required": False, + "description": "If using parallel execution, the number of cpus to request", + "type": int, + }, + { + "id": "hazard_id", + "required": False, + "description": "Hazard object id", + "type": str, + }, + { + "id": "hazard_type", + "required": False, + "description": "Hazards type", + "type": str, + }, + ], + "input_hazards": [ + { + "id": "hazard", + "required": False, + "description": "Hazard object", + "type": ["earthquake"], + }, + ], + "input_datasets": [ + { + "id": "gas_facilities", + "required": True, + "description": "Gas facility Inventory", + "type": ["ergo:gasFacilityInventory"], + }, + { + "id": "dfr3_mapping_set", + "required": True, + "description": "DFR3 Mapping Set Object", + "type": ["incore:dfr3MappingSet"], + }, + ], + "output_datasets": [ + { + "id": "result", + "parent_type": "gas_facilities", + "description": "CSV file of gas facility structural damage", + "type": "ergo:gasFacilityInventoryDamage", + }, + { + "id": "metadata", + "parent_type": "gas_facilities", + "description": "additional metadata in json file about applied hazard value and " + "fragility", + "type": "incore:gasFacilityDamageSupplement", + }, + ], + } diff --git a/pyincore/analyses/gasfacilitydamage/gfutil.py b/pyincore/analyses/gasfacilitydamage/gfutil.py new file mode 100644 index 000000000..43f90fe84 --- /dev/null +++ b/pyincore/analyses/gasfacilitydamage/gfutil.py @@ -0,0 +1,12 @@ +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +class GfUtil: + """Utility methods for the gas facility damage analysis.""" + + DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" + DEFAULT_LIQ_FRAGILITY_KEY = "pgd" diff --git a/pyincore/models/fragilitycurveset.py b/pyincore/models/fragilitycurveset.py index ac85d9c95..511b03660 100644 --- a/pyincore/models/fragilitycurveset.py +++ b/pyincore/models/fragilitycurveset.py @@ -140,6 +140,7 @@ def calculate_damage_interval(self, damage, hazard_type="earthquake", inventory_ ("earthquake", "road", 4): FragilityCurveSet._4ls_to_5ds, ("earthquake", "water_facility", 4): FragilityCurveSet._4ls_to_5ds, ("earthquake", "electric_facility", 4): FragilityCurveSet._4ls_to_5ds, + ("earthquake", "gas_facility", 4): FragilityCurveSet._4ls_to_5ds, ("tornado", "bridge", 4): FragilityCurveSet._4ls_to_5ds, ("tornado", "electric_facility", 4): FragilityCurveSet._4ls_to_5ds, ("flood", "bridge", 4): FragilityCurveSet._4ls_to_5ds, @@ -379,6 +380,8 @@ def _initialize_limit_states(inventory_type): output = {"LS_0": 0.0, "LS_1": 0.0, "LS_2": 0.0, "LS_3": 0.0} elif inventory_type == "water_facility": output = {"LS_0": 0.0, "LS_1": 0.0, "LS_2": 0.0, "LS_3": 0.0} + elif inventory_type == "gas_facility": + output = {"LS_0": 0.0, "LS_1": 0.0, "LS_2": 0.0, "LS_3": 0.0} elif inventory_type == "Transmission Towers": output = {"LS_0": 0.0} elif inventory_type == "Distribution Poles": diff --git a/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py b/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py new file mode 100644 index 000000000..32d0d9c87 --- /dev/null +++ b/tests/pyincore/analyses/gasfacilitydamage/test_gasfacilitydamage.py @@ -0,0 +1,40 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake +from pyincore.analyses.gasfacilitydamage import GasFacilityDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # New madrid earthquake using Atkinson Boore 1995 + hazard_svc = HazardService(client) + eq = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_svc) + + # Gas Facility inventory + gas_facility_id = "5a284f26c7d30d13bc081bb8" + + gas_facility_dmg = GasFacilityDamage(client) + gas_facility_dmg.load_remote_input_dataset("gas_facilities", gas_facility_id) + gas_facility_dmg.set_input_hazard("hazard", eq) + gas_facility_dmg.set_parameter("num_cpu", 4) + + # Uncomment to include liquefaction + # liq_geology_dataset_id = "5a284f53c7d30d13bc08249c" + # gas_facility_dmg.set_parameter("use_liquefaction", True) + # gas_facility_dmg.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + + # Gas facility mapping for earthquake with liquefaction fragilities + mapping_id = "5b47c292337d4a38568f8386" + fragility_svc = FragilityService(client) + mapping_set = MappingSet(fragility_svc.get_mapping(mapping_id)) + + gas_facility_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + + result_name = "shelby gas facility" + gas_facility_dmg.set_parameter("result_name", result_name) + + gas_facility_dmg.run_analysis() + + +if __name__ == "__main__": + run_with_base_class() From e7bceaf5bc267626110d39831c742a723f4b1fba Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Thu, 23 May 2024 10:58:02 -0500 Subject: [PATCH 05/17] Rename transporation recovery to traffic flow recovery (#570) * Rename transporation recovery to traffic flow recovery * removed test file from the analysis folder * modified text based files * Updated documentation * updated requirements * modfied existing class * updated copyright * fixed wrong caption * modified copyrights to 2018 --- CHANGELOG.md | 5 + docs/source/modules.rst | 23 ++ pyincore/analyses/trafficflowrecovery/WIPW.py | 202 ++++++++++++ .../analyses/trafficflowrecovery/__init__.py | 13 + .../analyses/trafficflowrecovery/nsga2.py | 291 ++++++++++++++++ .../post_disaster_long_term_solution.py | 257 +++++++++++++++ .../trafficflowrecovery.py | 310 +++++++++++++++++ .../trafficflowrecoveryutil.py | 164 +++++++++ .../test_transportation_recovery.py | 39 --- .../transportationrecovery.py | 311 +----------------- .../test_trafficflowrecovery.py | 39 +++ 11 files changed, 1318 insertions(+), 336 deletions(-) create mode 100644 pyincore/analyses/trafficflowrecovery/WIPW.py create mode 100644 pyincore/analyses/trafficflowrecovery/__init__.py create mode 100644 pyincore/analyses/trafficflowrecovery/nsga2.py create mode 100644 pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py create mode 100644 pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py create mode 100644 pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py delete mode 100644 pyincore/analyses/transportationrecovery/test_transportation_recovery.py create mode 100644 tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e7b35bd7..de2bd2b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed - Rename Social Vulnerability Analysis to Social Vulnerability Index Analysis [#556](https://github.com/IN-CORE/pyincore/issues/556) +## [Unreleased] + +### Changed +- Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/docs/source/modules.rst b/docs/source/modules.rst index 2811f7851..440e90374 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -346,6 +346,29 @@ analyses/transportationrecovery :members: .. autofunction:: transportationrecovery.WIPW.path_adt_from_edges :members: +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`trafficflowrecovery.TrafficFlowRecovery` instead. + +analyses/trafficflowrecovery +=============================== +.. autoclass:: trafficflowrecovery.trafficflowrecovery.TrafficFlowRecovery + :members: +.. autoclass:: trafficflowrecovery.trafficflowrecoveryutil.TrafficFlowRecoveryUtil + :members: +.. autoclass:: trafficflowrecovery.post_disaster_long_term_solution.PostDisasterLongTermSolution + :members: +.. autoclass:: trafficflowrecovery.nsga2.Solution + :members: +.. autoclass:: trafficflowrecovery.nsga2.NSGAII + :members: +.. autofunction:: trafficflowrecovery.WIPW.ipw_search + :members: +.. autofunction:: trafficflowrecovery.WIPW.tipw_index + :members: +.. autofunction:: trafficflowrecovery.WIPW.path_service_level_edges + :members: +.. autofunction:: trafficflowrecovery.WIPW.path_adt_from_edges + :members: analyses/waterfacilitydamage ============================ diff --git a/pyincore/analyses/trafficflowrecovery/WIPW.py b/pyincore/analyses/trafficflowrecovery/WIPW.py new file mode 100644 index 000000000..e89d4f1f9 --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/WIPW.py @@ -0,0 +1,202 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import copy +import networkx as nx + + +def ipw_search(v, e): + """ + Indpendent pathway search + :param v: vertex + :param e: edge + :return: path and path length + """ + + g_local = nx.Graph() + + nodelist = copy.deepcopy(v) + edgeslist = copy.deepcopy(list(e)) + + g_local.add_nodes_from(nodelist) + for headnode, tailnode in edgeslist: + g_local.add_edge(headnode, tailnode) + + # calculate length of each link which is 1 for all edges + length = {} + + for (i, j) in edgeslist: + length[i, j] = 1 + length[j, i] = 1 + + # input the intact links information + nodespair = [] + for i in nodelist: + for j in nodelist: + if i != j and (j, i) not in nodespair: + nodespair.append((i, j)) + + degree = {} + degree_view = g_local.degree(nodelist) + for pair in degree_view: + i = pair[0] + j = pair[1] + degree[i] = j + + # calculate upper bound + ub = {} + + # k-th independent path between nodes pair + ipath = {} + + # the length of kth independent path between node pair + path_length = {} + + for (w, q) in nodespair: + + # creat a temp list to search path + temp_edgelist = copy.deepcopy(edgeslist) + + # up bound of possible number of independent paths + ub[w, q] = min(degree[w], degree[q]) + ipath[w, q] = {} + path_length[w, q] = {} + k = 1 + + # label whether stop the calculation + flag = 1 + while flag == 1: + g_local.clear() + g_local.add_nodes_from(nodelist) + + for headnode, tailnode in temp_edgelist: + g_local.add_edge(headnode, tailnode, length=length[headnode, + tailnode]) + + try: + temp = copy.deepcopy(nx.shortest_path(g_local, + source=w, + target=q, + weight='length')) + except nx.NetworkXNoPath as e: + # print(w,q) + # print("NetworkXNoPath") + temp = [] + + # if there is a path connecting the source and target, + # start to calculate IPW + if temp: + + # find the shortest path + ipath[w, q][k] = copy.deepcopy(temp) + path_length[w, q][k] = 0 + ipathtuple = [] + + if len(ipath[w, q][k]) == 2: + # for the path just has two nodes + # (origin and destination) + ipathtuple.append((ipath[w, q][k][0], + ipath[w, q][k][1])) + path_length[w, q][k] = length[ipath[w, q][k][0], + ipath[w, q][k][1]] + + else: + # for the path has more than two nodes + for p in range(0, len(ipath[w, q][k]) - 1): + ipathtuple.append((ipath[w, q][k][p], + ipath[w, q][k][p + 1])) + + path_length[w, q][k] += length[ipath[w, q][k][p], + ipath[w, q][k][p + 1]] + + # delete edges that used in previous shortest paths + for (s, t) in ipathtuple: + if (s, t) in temp_edgelist: + temp_edgelist.remove((s, t)) + # temp_edgelist.remove((t, s)) + + k += 1 + + else: + flag = 0 + + return ipath, path_length + + +def tipw_index(g, l, path_adt): + """ + caculate the TIPW index of the network + :param g: graph + :param l: Indpendent pathway + :param path_adt: Adt of the path + :return: TIPW index of the network + """ + + gnodes = g.nodes() + + # compute the normalized ADT + normal_path_adt = {} + for key in path_adt.keys(): + normal_path_adt[key] = {} + for i, j in path_adt[key].items(): + normal_path_adt[key][i] = len(path_adt[key].values()) * j \ + / sum(path_adt[key].values()) + + # compute the TIPW of node + node_tipw = {} + for node in gnodes: + node_tipw[node] = 0 + for pairnode in gnodes: + if pairnode != node: + if (node, pairnode) in l.keys(): + for key, value in l[node, pairnode].items(): + node_tipw[node] \ + += normal_path_adt[node, pairnode][key] \ + * path_service_level_edges(g, value) + elif (pairnode, node) in l.keys(): + for key, value in l[pairnode, node].items(): + node_tipw[node] \ + += normal_path_adt[pairnode, node][key] \ + * path_service_level_edges(g, value) + + # caculate the TIPW index + tipw_index_val = 0 + for node in gnodes: + # network IPW + tipw_index_val \ + += (1 / float(len(gnodes)) * node_tipw[node]) / (len(gnodes) - 1) + + return tipw_index_val + + +def path_service_level_edges(g, path): + """ + compute the service level of a path + :param g: graph + :param path: path + :return: service_level + """ + + service_level = 1 + for i in range(len(path) - 1): + service_level \ + *= (1 - g.edges[path[i], path[i + 1]]['Damage_Status'] / 4.0) + return service_level + + +def path_adt_from_edges(g, path): + """ + compute the reliabity of path from a set of edges + :param g: graph + :param path: path + :return: reliability + """ + + adt = max(nx.get_edge_attributes(g, 'adt').values()) + for i in range(len(path) - 1): + adt = min(adt, g.edges[path[i], path[i + 1]]['adt']) + + return adt diff --git a/pyincore/analyses/trafficflowrecovery/__init__.py b/pyincore/analyses/trafficflowrecovery/__init__.py new file mode 100644 index 000000000..0387c8dbe --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.trafficflowrecovery.nsga2 import Solution +from pyincore.analyses.trafficflowrecovery.nsga2 import NSGAII +from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution +from pyincore.analyses.trafficflowrecovery import WIPW +from pyincore.analyses.trafficflowrecovery.trafficflowrecovery import TrafficFlowRecovery +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil diff --git a/pyincore/analyses/trafficflowrecovery/nsga2.py b/pyincore/analyses/trafficflowrecovery/nsga2.py new file mode 100644 index 000000000..04e87e162 --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/nsga2.py @@ -0,0 +1,291 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ +import sys, random + + +class Solution: + """Abstract solution. To be implemented.""" + + def __init__(self, num_objectives): + """Constructor. Parameters: number of objectives.""" + self.num_objectives = num_objectives + self.objectives = [] + for _ in range(num_objectives): + self.objectives.append(None) + self.attributes = [] + self.rank = sys.maxsize + self.distance = 0.0 + + self.chromos_fitness = {} + self.sch = {} + + def __rshift__(self, other): + """True if this solution dominates the other (">>" operator).""" + dominates = False + + for i in range(len(self.objectives)): + if self.objectives[i] > other.objectives[i]: + return False + + elif self.objectives[i] < other.objectives[i]: + dominates = True + + return dominates + + def __lshift__(self, other): + """True if this solution is dominated by the other ("<<" operator).""" + return other >> self + + +def crowded_comparison(s1, s2): + """Compare the two solutions based on crowded comparison. + + Args: + s1 (obj): One chromosome. + s2 (obj): Another chromosome. + + Returns: + float: A comparison value. + + """ + if s1.rank < s2.rank: + return 1 + + elif s1.rank > s2.rank: + return -1 + + elif s1.distance > s2.distance: + return 1 + + elif s1.distance < s2.distance: + return -1 + + else: + return 0 + + +class NSGAII: + """Implementation of NSGA-II algorithm.""" + current_evaluated_objective = 0 + + def __init__(self, num_objectives, mutation_rate=0.1, crossover_rate=1.0): + """Constructor. + + Args: + num_objectives (obj): Number of objectives. + mutation_rate (float): Mutation rate (default value 10%). + crossover_rate (float): Crossover rate (default value 100%).. + + """ + self.num_objectives = num_objectives + self.mutation_rate = mutation_rate + self.crossover_rate = crossover_rate + + random.seed(100) + + def run(self, p, population_size, num_generations): + """Run NSGA-II. + + Args: + p (obj): A set of chromosomes (population). + population_size (obj): A population size. + num_generations (obj): A number of generations. + + Returns: + list: First front of Pareto front. + + """ + for s in p: + s.evaluate_solution(0) + + first_front = [] + for i in range(num_generations): + + r = [] + r.extend(p) + + fronts = self.fast_nondominated_sort(r) + + del p[:] + + for front in fronts.values(): + if len(front) == 0: + break + + self.crowding_distance_assignment(front) + p.extend(front) + + if len(p) >= population_size: + break + + self.sort_crowding(p) + + if len(p) > population_size: + del p[population_size:] + + first_front = list(fronts.values())[0] + + return first_front + + @staticmethod + def sort_ranking(p): + """Run sort the sort of chromosomes according to their ranks. + + Args: + p (obj): A set of chromosomes (population). + + """ + for i in range(len(p) - 1, -1, -1): + for j in range(1, i + 1): + s1 = p[j - 1] + s2 = p[j] + + if s1.rank > s2.rank: + p[j - 1] = s2 + p[j] = s1 + + @staticmethod + def sort_objective(p, obj_idx): + """Run sort the chromosome based on their objective value. + + Args: + p (obj): A set of chromosomes (population). + obj_idx (int): The index of objective function. + + """ + for i in range(len(p) - 1, -1, -1): + for j in range(1, i + 1): + s1 = p[j - 1] + s2 = p[j] + + if s1.objectives[obj_idx] > s2.objectives[obj_idx]: + p[j - 1] = s2 + p[j] = s1 + + @staticmethod + def sort_crowding(p): + """Run calculate the crowding distance of adjacent two chromosome in a front level. + + Args: + p (obj): A set of chromosomes (population). + + """ + for i in range(len(p) - 1, -1, -1): + for j in range(1, i + 1): + s1 = p[j - 1] + s2 = p[j] + + if crowded_comparison(s1, s2) < 0: + p[j - 1] = s2 + p[j] = s1 + + def make_new_pop(self, p): + """Make new population Q, offspring of P. + + Args: + p (obj): A set of chromosomes (population). + + Returns: + list: Offspring. + + """ + q = [] + + while len(q) != len(p): + selected_solutions = [None, None] + + while selected_solutions[0] == selected_solutions[1]: + for i in range(2): + s1 = random.choice(p) + s2 = s1 + while s1 == s2: + s2 = random.choice(p) + + if crowded_comparison(s1, s2) > 0: + selected_solutions[i] = s1 + + else: + selected_solutions[i] = s2 + + if random.random() < self.crossover_rate: + child_solution = selected_solutions[0].crossover(selected_solutions[1]) + + if random.random() < self.mutation_rate: + child_solution.mutate() + + child_solution.evaluate_solution(0) + + q.append(child_solution) + + return q + + @staticmethod + def fast_nondominated_sort(p): + """Discover Pareto fronts in P, based on non-domination criterion. + + Args: + p (obj): A set of chromosomes (population). + + Returns: + dict: Fronts. + + """ + fronts = {} + + s = {} + n = {} + for i in p: + s[i] = [] + n[i] = 0 + + fronts[1] = [] + + for pk in p: + for qk in p: + if pk == qk: + continue + + if pk >> qk: + s[pk].append(qk) + + elif pk << qk: + n[pk] += 1 + + if n[pk] == 0: + fronts[1].append(pk) + + i = 1 + while len(fronts[i]) != 0: + next_front = [] + for r in fronts[i]: + for j in s[r]: + n[j] -= 1 + if n[j] == 0: + next_front.append(j) + + i += 1 + fronts[i] = next_front + + return fronts + + def crowding_distance_assignment(self, front): + """Assign a crowding distance for each solution in the front. + + Args: + front (dict): A set of chromosomes in the front level. + + """ + for p in front: + p.distance = 0 + + for obj_index in range(self.num_objectives): + self.sort_objective(front, obj_index) + + front[0].distance = float('inf') + front[len(front) - 1].distance = float('inf') + + for i in range(1, len(front) - 1): + front[i].distance += (front[i + 1].distance - front[i - 1].distance) diff --git a/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py b/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py new file mode 100644 index 000000000..7eef30092 --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/post_disaster_long_term_solution.py @@ -0,0 +1,257 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import copy +import networkx as nx +import random +from pyincore.analyses.trafficflowrecovery.nsga2 import Solution +from pyincore.analyses.trafficflowrecovery import WIPW as WIPW +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil + + +class PostDisasterLongTermSolution(Solution): + """ + Solution for the post disaster long term recovery function. + """ + + # for repair time of bridge assignment + # slight damage state + slightRepair = 0.6 + # moderate damage state + modRepair = 2.5 + # extensive damage state + extRepair = 75 + # complete damage state + compRepair = 230 + + def __init__(self, candidates, node_df, arc_df, bridge_df, bridge_damage_value, + network, pm, all_ipw, path_adt): + """ + initialize the chromosomes + """ + Solution.__init__(self, 2) + self.candidates = candidates + self.node_df = node_df + self.arc_df = arc_df + self.bridge_df = bridge_df + self.attributes = [i for i in range(len(self.candidates))] + self.bridge_damage_value = bridge_damage_value + self.network = network + self.pm = pm + self.all_ipw = all_ipw + self.path_adt = path_adt + + # random sort the sequence + random.shuffle(self.attributes) + + def evaluate_solution(self, final): + """ + Implementation of evaluation for all solutions + """ + temp_bridge_damage_value = copy.deepcopy(self.bridge_damage_value) + + # input the total number of crew groups repairing the bridge at the + # same time + simax = 8 + + # determine the bridge schedule sequence for each candidate + # (Chromosome) + candidate_schedule = {} + for i in range(len(self.candidates)): + candidate_schedule[self.attributes[i]] = self.candidates[i] + + trt = 0 + srt = 0 + + start = {} + end = {} + schedule_time = [] + + l = copy.deepcopy(self.attributes) + + for i in range(len(l)): + if i <= simax - 1: + + # repair start from time 0 + start[candidate_schedule[i]] = 0.0 + + # if damage state of bridge is slight damage, repair time + # is slightRepair + if temp_bridge_damage_value[candidate_schedule[i]] == 1: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.slightRepair + + # if damage state of bridge is moderate damage, repair time + # is modRepair + elif temp_bridge_damage_value[candidate_schedule[i]] == 2: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.modRepair + + # if damage state of bridge is extensive damage, repair time + # is extRepair + elif temp_bridge_damage_value[candidate_schedule[i]] == 3: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.extRepair + + # if damage state of bridge is complete damage, repair time + # is compRepair + else: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.compRepair + + # store the ending time + schedule_time.append(end[candidate_schedule[i]]) + + # sort the complete time for scheduled bridges to hold maximum + # simultaneously intervention + schedule_time.sort() + + else: + # the repair time of next bridge will be start from the minimum + # end time of pre-scheduled bridges + start[candidate_schedule[i]] = schedule_time.pop(0) + + if temp_bridge_damage_value[candidate_schedule[i]] == 1: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.slightRepair + + elif temp_bridge_damage_value[candidate_schedule[i]] == 2: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.modRepair + + elif temp_bridge_damage_value[candidate_schedule[i]] == 3: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.extRepair + + else: + end[candidate_schedule[i]] \ + = start[candidate_schedule[i]] \ + + PostDisasterLongTermSolution.compRepair + + schedule_time.append(end[candidate_schedule[i]]) + schedule_time.sort() + + # calculate the total recovery time + trt += max(end.values()) + + # compute the srt under current realization + schedule_time = [] + pp = list(end.values()) + for i in range(len(pp)): + if pp[i] not in schedule_time: + schedule_time.append(pp[i]) + + schedule_time = sorted(schedule_time) + denominator = 0 + numerator = 0 + + schedule = {} + self_candidates = copy.deepcopy(self.candidates) + + # label whether a bridge has been repaired + fg = {} + for bridge in self_candidates: + fg[bridge] = 0 + + # to reduce the computation, just the performance at every 8-time + # steps, you can adjust it + inte = 1 + pl = len(schedule_time) + + for ii in range(pl % inte - 1 + inte, pl, inte): + + if ii > 0: + + # update the damage status of bridges + for bridge in self_candidates: + if fg[bridge] == 0: + if end[bridge] <= schedule_time[ii]: + temp_bridge_damage_value[bridge] = 0 + schedule[bridge] = [start[bridge], end[bridge]] + fg[bridge] = 1 + + for i in range(len(self.arc_df)): + nod1 = self.node_df.loc[self.node_df['ID'] == self.arc_df['fromnode'][i], 'guid'].values[0] + nod2 = self.node_df.loc[self.node_df['ID'] == self.arc_df['tonode'][i], 'guid'].values[0] + self.network.edges[nod1, nod2]['Damage_Status'] = 0 + + for key, val in temp_bridge_damage_value.items(): + linknwid = self.bridge_df.loc[self.bridge_df['guid'] == key, 'linkID'].values[0] + + nod_id1 = self.arc_df[self.arc_df['id'] == linknwid]['fromnode'].values[0] + nod1 = self.node_df.loc[self.node_df['ID'] == nod_id1, 'guid'].values[0] + + nod_id2 = self.arc_df[self.arc_df['id'] == linknwid]['tonode'].values[0] + nod2 = self.node_df.loc[self.node_df['ID'] == nod_id2, 'guid'].values[0] + + self.network.edges[nod1, nod2]['Damage_Status'] = val + + nx.get_edge_attributes(self.network, 'Damage_Status') + + # calculate the travel efficiency based on different + # performance metrics based on travel time + te = None + if self.pm == 1: + te = TrafficFlowRecoveryUtil.traveltime_freeflow(self.network) + + # based on WIPW + elif self.pm == 0: + te = WIPW.tipw_index(self.network, + self.all_ipw, self.path_adt) + + numerator += te * schedule_time[ii] * (schedule_time[ii] + - schedule_time[ + ii - inte]) + aa = te + denominator += te * (schedule_time[ii] + - schedule_time[ii - inte]) + + # calculate the skewness of the recovery trajectory + try: + srt += numerator / denominator + except ZeroDivisionError as e: + print(e) + + # the first objective assignment + self.objectives[0] = trt + + # the second objective assignment + self.objectives[1] = srt + + # repair schedule of bridge assignment + self.sch[self.objectives[0], self.objectives[1]] = schedule + + if final == 0: + return self.objectives[0], self.objectives[1] + else: + return self.objectives[0], self.objectives[1], \ + self.sch[self.objectives[0], self.objectives[1]] + + def mutate(self): + """ + Mutation operator + """ + + # label whether continuous the mutation operator, for each chromosome, + # it just has one time mutation + rec = True + + # implementation of mutation + while rec: + index1 = random.randint(0, len(self.candidates) - 1) + index2 = random.randint(0, len(self.candidates) - 1) + if index1 != index2: + re = self.attributes[index1] + self.attributes[index1] = self.attributes[index2] + self.attributes[index2] = re + rec = False diff --git a/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py b/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py new file mode 100644 index 000000000..065de4584 --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/trafficflowrecovery.py @@ -0,0 +1,310 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +from __future__ import division +import pandas as pd +import copy +import random + +from pyincore.analyses.trafficflowrecovery.trafficflowrecoveryutil import TrafficFlowRecoveryUtil +from pyincore.analyses.trafficflowrecovery.nsga2 import NSGAII +from pyincore.analyses.trafficflowrecovery import WIPW as WIPW +from pyincore.analyses.trafficflowrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution +from pyincore import BaseAnalysis + + +class TrafficFlowRecovery(BaseAnalysis): + + def run(self): + """ Executes traffic flow recovery analysis""" + + # read the nodes in traffic flow + node_set = self.get_input_dataset("nodes").get_inventory_reader() + nodes = list(node_set) + node_df = pd.DataFrame(columns=nodes[0]['properties'].keys()) + for node in nodes: + node_properties_df = pd.DataFrame.from_dict(node['properties'], orient='index').T + node_df = pd.concat([node_df, node_properties_df], ignore_index=True) + + # read the link in traffic flow + link_set = self.get_input_dataset("links").get_inventory_reader() + links = list(link_set) + arc_df = pd.DataFrame(columns=links[0]['properties'].keys()) + for link in links: + link_properties_df = pd.DataFrame.from_dict(link['properties'], orient='index').T + arc_df = pd.concat([arc_df, link_properties_df], ignore_index=True) + + # read bridge information + bridge_set = self.get_input_dataset("bridges").get_inventory_reader() + bridges = list(bridge_set) + bridge_df = pd.DataFrame(columns=bridges[0]['properties'].keys()) + for bridge in bridges: + bridge_properties_df = pd.DataFrame.from_dict(bridge['properties'], orient='index').T + bridge_df = pd.concat([bridge_df, bridge_properties_df], ignore_index=True) + + # read bridge damage information + bridge_damage_value = self.get_input_dataset("bridge_damage_value").get_json_reader() + unrepaired_bridge = self.get_input_dataset("unrepaired_bridge").get_json_reader() + + seed = 333 + random.seed(seed) + + # record the average daily traffic (ADT) data of bridge, a road has the + # same ADT value as its crossing bridge + adt_data = self.get_input_dataset("ADT").get_json_reader() + ADT = copy.deepcopy(adt_data) + + # if there is no bridge across a road, the ADT of the road equals to + # the maximum value of ADTs + for i in range(len(arc_df)): + if arc_df["guid"][i] not in list(adt_data.keys()): + adt_data[arc_df["guid"][i]] = max(ADT.values()) + + pm = self.get_parameter("pm") + ini_num_population = self.get_parameter("ini_num_population") + population_size = self.get_parameter("population_size") + num_generation = self.get_parameter("num_generation") + mutation_rate = self.get_parameter("mutation_rate") + crossover_rate = self.get_parameter("crossover_rate") + + # create network + network = TrafficFlowRecoveryUtil.nw_reconstruct(node_df, arc_df, adt_data) + + if pm == 0: + # calculate the WIPW (Weighted independent pathways) index + all_ipw, all_ipw_length = WIPW.ipw_search(network.nodes(), network.edges()) + # Calculate the ADT for each path + path_adt = {} + for node in range(len(network.nodes()) - 1): + for pairnode in range(node + 1, len(network.nodes())): + if (node, pairnode) in all_ipw.keys(): + path_adt[node, pairnode] = {} + path_adt[pairnode, node] = {} + for key, value in all_ipw[node, pairnode].items(): + path_adt[node, pairnode][key] \ + = WIPW.path_adt_from_edges(network, value) + path_adt[pairnode, node][key] \ + = path_adt[node, pairnode][key] + else: + all_ipw = None + all_ipw_length = None + path_adt = None + + num_objectives = 2 + + # implement NSGA for traffic flow network post-disaster recovery + nsga2 = NSGAII(num_objectives, mutation_rate, crossover_rate) + + p = [] + for i in range(ini_num_population): + p.append(PostDisasterLongTermSolution(unrepaired_bridge, node_df, + arc_df, bridge_df, + bridge_damage_value, network, + pm, all_ipw, path_adt)) + + first_front = nsga2.run(p, population_size, num_generation) + + # output the NSGA result + order = len(first_front) - 1 + obj0 = first_front[order].objectives[0] + obj1 = first_front[order].objectives[1] + repair_schedule = first_front[order].sch[obj0, obj1] + + # extract the time just finishing repairing a bridge + ending_time = [] + bridge_set = [] + for key, value in repair_schedule.items(): + bridge_set.append(key) + ending_time.append(value[1]) + + # output the optimal solution of bridge repair schedule based on NSGA + bridge_recovery = pd.DataFrame( + {"Bridge ID": bridge_set, "Ending Time": ending_time}) + self.set_result_csv_data( + "optimal_solution_of_bridge_repair_schedule", bridge_recovery, + name="optimal_solution_of_bridge_repair_schedule", source="dataframe") + + network = TrafficFlowRecoveryUtil.nw_reconstruct(node_df, arc_df, adt_data) + + temp_bridge_damage_value = copy.deepcopy(bridge_damage_value) + + # record the non-recurrence ending time of bridges + schedule_time = [] + end = {} + for key, value in repair_schedule.items(): + if value[1] not in schedule_time: + schedule_time.append(value[1]) + end[key] = value[1] + schedule_time.sort() + + # label whether the bridge has been repaired + bridge_repair = list(repair_schedule.keys()) + fg = {} + for bridge in bridge_repair: + fg[bridge] = 0 + + # calculate the traffic flow network efficiency + efficiency = [] + for ii in range(len(schedule_time)): + + # update the damage status of bridge + for bridge in bridge_repair: + if fg[bridge] == 0: + if end[bridge] <= schedule_time[ii]: + temp_bridge_damage_value[bridge] = 0 + fg[bridge] = 1 + + for i in range(len(arc_df)): + nod1 = node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[0] + nod2 = node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] + network.edges[nod1, nod2]['Damage_Status'] = 0 + + for key, val in temp_bridge_damage_value.items(): + linknwid = bridge_df.loc[bridge_df['guid'] == key, 'linkID'].values[0] + + nod_id1 = arc_df[arc_df['id'] == linknwid]['fromnode'].values[0] + nod1 = node_df.loc[node_df['ID'] == nod_id1, 'guid'].values[0] + + nod_id2 = arc_df[arc_df['id'] == linknwid]['tonode'].values[0] + nod2 = node_df.loc[node_df['ID'] == nod_id2, 'guid'].values[0] + + network.edges[nod1, nod2]['Damage_Status'] = val + + # calculate different travel efficiency based on different + # performance metrics + current_te = None + if pm == 1: + current_te = TrafficFlowRecoveryUtil.traveltime_freeflow(network) + + elif pm == 0: + current_te = WIPW.tipw_index(network, all_ipw, path_adt) + + efficiency.append(current_te) + + # normalize the efficiency at each time of repairing a bridge + try: + maxe = max(efficiency) + for i in range(len(efficiency)): + efficiency[i] = efficiency[i] / maxe + except ValueError as e: + print(e) + + # output the recovery trajectory + recovery_trajectory = pd.DataFrame( + {"Ending Time": schedule_time, "Travel Efficiency": efficiency}) + self.set_result_csv_data("overall_traffic_flow_recovery_trajectory", recovery_trajectory, + name="overall_traffic_flow_recovery_trajectory", source="dataframe") + + return None + + def get_spec(self): + """Get specifications of the traffic flow recovery model. + + Returns: + obj: A JSON object of specifications of the traffic flow recovery model. + + """ + return { + 'name': 'traffic-flow-recovery', + 'description': 'traffic flow recovery model', + 'input_parameters': [ + { + 'id': 'num_cpu', + 'required': False, + 'description': 'If using parallel execution, the number of cpus to request', + 'type': int + }, + { + 'id': 'pm', + 'required': True, + 'description': 'traffic flow performance metrics 0: WIPW, 1:Free flow travel time', + 'type': int + }, + { + 'id': 'ini_num_population', + 'required': True, + 'description': 'ini_num_population: 5 or 50', + 'type': int + }, + { + 'id': 'population_size', + 'required': True, + 'description': 'population_size: 3 or 30', + 'type': int + }, + { + 'id': 'num_generation', + 'required': True, + 'description': 'num_generation: 2 or 250', + 'type': int + }, + { + 'id': 'mutation_rate', + 'required': True, + 'description': '0.1', + 'type': float + }, + { + 'id': 'crossover_rate', + 'required': True, + 'description': '1.0', + 'type': float + } + ], + 'input_datasets': [ + { + 'id': 'nodes', + 'required': True, + 'description': 'road nodes', + 'type': ['ergo:roadNetwork'], + }, + { + 'id': 'links', + 'required': True, + 'description': 'road links', + 'type': ['ergo:roadNetwork'], + }, + { + 'id': 'bridges', + 'required': True, + 'description': 'bridges', + 'type': ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'], + }, + { + 'id': 'bridge_damage_value', + 'required': True, + 'description': '', + 'type': ['incore:bridgeDamageValue'] + }, + { + 'id': 'unrepaired_bridge', + 'required': True, + 'description': '', + 'type': ['incore:unrepairedBridge'] + }, + { + 'id': 'ADT', + 'required': True, + 'description': '', + 'type': ['incore:ADT'] + } + + ], + 'output_datasets': [ + { + 'id': 'optimal_solution_of_bridge_repair_schedule', + 'description': 'List the Bridge id and its ending repair time.', + 'type': 'incore:transportationRepairSchedule' + }, + { + 'id': 'overall_traffic_flow_recovery_trajectory', + 'description': 'shows the overall recovery trajectory of the ' + + 'traffic flow system. List the ending time and ' + + 'travel efficiency for the whole network.', + 'type': 'incore:trafficFlowRecovery' + } + ] + } diff --git a/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py b/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py new file mode 100644 index 000000000..453dead6b --- /dev/null +++ b/pyincore/analyses/trafficflowrecovery/trafficflowrecoveryutil.py @@ -0,0 +1,164 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +from __future__ import division +import csv +import pandas as pd +import networkx as nx +import copy +import math +from pyincore import GeoUtil, InventoryDataset + + +class TrafficFlowRecoveryUtil: + + @staticmethod + def NBI_coordinate_mapping(NBI_file): + """Coordinate in NBI is in format of xx(degree)xx(minutes)xx.xx(seconds) + map it to traditional xx.xxxx in order to create shapefile. + + Args: + NBI_file (str): Filename of a NBI file. + + Returns: + dict: NBI. + + """ + NBI = pd.read_csv(NBI_file) + NBI['LONG_017'] = NBI['LONG_017'].apply(lambda x: -1 * (GeoUtil.degree_to_decimal(x))) + NBI['LAT_016'] = NBI['LAT_016'].apply(lambda x: GeoUtil.degree_to_decimal(x)) + + return NBI + + @staticmethod + def get_average_daily_traffic(bridges, NBI_shapefile): + NBI = InventoryDataset(NBI_shapefile) + NBI_features = list(NBI.inventory_set) + + ADT = {} + for bridge in bridges: + # convert lon and lat to the right format + bridge_coord = GeoUtil.get_location(bridge) + nearest_feature, distance = GeoUtil.find_nearest_feature(NBI_features, bridge_coord) + + ADT[bridge['properties']['guid']] = nearest_feature['properties']['ADT_029'] + + return ADT + + @staticmethod + def convert_dmg_prob2state(dmg_results_filename): + """Upstream bridge damage analysis will generate a dmg result file with the probability + of each damage state; here determine what state using the maximum probability. + + Args: + dmg_results_filename (str): Filename of a damage results file. + + Returns: + dict: Bridge damage values. + list: Unrepaired bridge. + + """ + bridge_damage_value = {} + unrepaired_bridge = [] + + with open(dmg_results_filename, 'r') as f: + reader = csv.reader(f) + next(reader) + for row in reader: + state_id = row[0] + mean_damage = float(row[10]) + + if mean_damage > 0 and mean_damage < 0.25: + bridge_damage_value[state_id] = 1 + elif mean_damage >= 0.25 and mean_damage < 0.5: + bridge_damage_value[state_id] = 2 + elif mean_damage >= 0.5 and mean_damage < 0.75: + bridge_damage_value[state_id] = 3 + elif mean_damage >= 0.75 and mean_damage <= 1: + bridge_damage_value[state_id] = 4 + else: + raise ValueError('mean damage should not larger than 1!') + + unrepaired_bridge = list(bridge_damage_value.keys()) + + return bridge_damage_value, unrepaired_bridge + + @staticmethod + def nw_reconstruct(node_df, arc_df, adt_data): + """ + + Args: + node_df (pd.DataFrame): A node in _node_.csv. + arc_df (pd.DataFrame): A node in edge in _edge_.csv. + adt_data (pd.DataFrame): Average daily traffic flow. + + Returns: + obj: Network + + """ + # create network + network = nx.Graph() + + # add nodes to the network + network.add_nodes_from(node_df['guid']) + + # add arcs to the network + for i in range(len(arc_df)): + fromnode = \ + node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[ + 0] + tonode = \ + node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] + dis = arc_df['len_mile'][i] / arc_df['freeflowsp'][i] + network.add_edge(fromnode, tonode, distance=dis, adt=adt_data[arc_df['guid'][i]]) + + return network + + @staticmethod + def traveltime_freeflow(temp_network): + """A travel time calculation. + + Args: + temp_network (obj): The investigated network. + + Returns: + float: Travel efficiency. + + """ + network = copy.deepcopy(temp_network) + + for Ed in temp_network.edges(): + if network.edges[Ed[0], Ed[1]]['Damage_Status'] > 2: + network.remove_edge(Ed[0], Ed[1]) + elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 2: + network.edges[Ed[0], Ed[1]]['distance'] \ + = network.edges[Ed[0], Ed[1]]['distance'] / 0.5 + elif network.edges[Ed[0], Ed[1]]['Damage_Status'] == 1: + network.edges[Ed[0], Ed[1]]['distance'] \ + = network.edges[Ed[0], Ed[1]]['distance'] / 0.75 + + num_node = len(network.nodes()) + distance = [[0 for x in range(num_node)] for y in range(num_node)] + + tdistance = dict(nx.all_pairs_dijkstra_path_length(network, + weight='distance')) + i = 0 + for key1, value1 in tdistance.items(): + j = 0 + for key2 in list(value1.keys()): + distance[i][j] = tdistance[key1][key2] + j += 1 + i += 1 + + travel_efficiency = 0 + for i in range(num_node): + for j in range(num_node): + if i != j: + if distance[i][j] == 0: + distance[i][j] = math.inf + travel_efficiency += 1 / distance[i][j] + + return travel_efficiency diff --git a/pyincore/analyses/transportationrecovery/test_transportation_recovery.py b/pyincore/analyses/transportationrecovery/test_transportation_recovery.py deleted file mode 100644 index b287aaccc..000000000 --- a/pyincore/analyses/transportationrecovery/test_transportation_recovery.py +++ /dev/null @@ -1,39 +0,0 @@ -from pyincore import IncoreClient -from pyincore.analyses.transportationrecovery import TransportationRecovery - - -def run_with_base_class(): - client = IncoreClient() - transportation_recovery = TransportationRecovery(client) - - nodes = "5c5de1dec5c0e488fc0355f7" - transportation_recovery.load_remote_input_dataset("nodes", nodes) - - links = "5c5de25ec5c0e488fc035613" - transportation_recovery.load_remote_input_dataset("links", links) - - bridges = "5a284f2dc7d30d13bc082040" - transportation_recovery.load_remote_input_dataset('bridges', bridges) - - bridge_damage = "5c5ddff0c5c0e488fc0355df" - transportation_recovery.load_remote_input_dataset('bridge_damage_value', bridge_damage) - - unrepaired = "5c5de0c5c5c0e488fc0355eb" - transportation_recovery.load_remote_input_dataset('unrepaired_bridge', unrepaired) - - ADT_data = "5c5dde00c5c0e488fc032d7f" - transportation_recovery.load_remote_input_dataset('ADT', ADT_data) - - transportation_recovery.set_parameter("num_cpu", 4) - transportation_recovery.set_parameter("pm", 1) - transportation_recovery.set_parameter('ini_num_population', 5) - transportation_recovery.set_parameter("population_size", 3) - transportation_recovery.set_parameter("num_generation", 2) - transportation_recovery.set_parameter("mutation_rate", 0.1) - transportation_recovery.set_parameter("crossover_rate", 1.0) - - transportation_recovery.run_analysis() - - -if __name__ == '__main__': - run_with_base_class() diff --git a/pyincore/analyses/transportationrecovery/transportationrecovery.py b/pyincore/analyses/transportationrecovery/transportationrecovery.py index 6f146cca8..ec63b5202 100644 --- a/pyincore/analyses/transportationrecovery/transportationrecovery.py +++ b/pyincore/analyses/transportationrecovery/transportationrecovery.py @@ -1,304 +1,21 @@ -from __future__ import division -import pandas as pd -import copy -import random +# Copyright (c) 2018 University of Illinois and others. All rights reserved. -from pyincore.analyses.transportationrecovery.transportationrecoveryutil import TransportationRecoveryUtil -from pyincore.analyses.transportationrecovery.nsga2 import NSGAII -from pyincore.analyses.transportationrecovery import WIPW as WIPW -from pyincore.analyses.transportationrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution -from pyincore import BaseAnalysis +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ +from deprecated.sphinx import deprecated -class TransportationRecovery(BaseAnalysis): +from pyincore.analyses.trafficflowrecovery import TrafficFlowRecovery - def run(self): - """ Executes transportation recovery analysis""" - # read the nodes in transportation - node_set = self.get_input_dataset("nodes").get_inventory_reader() - nodes = list(node_set) - node_df = pd.DataFrame(columns=nodes[0]['properties'].keys()) - for node in nodes: - node_properties_df = pd.DataFrame.from_dict(node['properties'], orient='index').T - node_df = pd.concat([node_df, node_properties_df], ignore_index=True) - - # read the link in transportation - link_set = self.get_input_dataset("links").get_inventory_reader() - links = list(link_set) - arc_df = pd.DataFrame(columns=links[0]['properties'].keys()) - for link in links: - link_properties_df = pd.DataFrame.from_dict(link['properties'], orient='index').T - arc_df = pd.concat([arc_df, link_properties_df], ignore_index=True) - - # read bridge information - bridge_set = self.get_input_dataset("bridges").get_inventory_reader() - bridges = list(bridge_set) - bridge_df = pd.DataFrame(columns=bridges[0]['properties'].keys()) - for bridge in bridges: - bridge_properties_df = pd.DataFrame.from_dict(bridge['properties'], orient='index').T - bridge_df = pd.concat([bridge_df, bridge_properties_df], ignore_index=True) - - # read bridge damage information - bridge_damage_value = self.get_input_dataset("bridge_damage_value").get_json_reader() - unrepaired_bridge = self.get_input_dataset("unrepaired_bridge").get_json_reader() - - seed = 333 - random.seed(seed) - - # record the average daily traffic (ADT) data of bridge, a road has the - # same ADT value as its crossing bridge - adt_data = self.get_input_dataset("ADT").get_json_reader() - ADT = copy.deepcopy(adt_data) - - # if there is no bridge across a road, the ADT of the road equals to - # the maximum value of ADTs - for i in range(len(arc_df)): - if arc_df["guid"][i] not in list(adt_data.keys()): - adt_data[arc_df["guid"][i]] = max(ADT.values()) - - pm = self.get_parameter("pm") - ini_num_population = self.get_parameter("ini_num_population") - population_size = self.get_parameter("population_size") - num_generation = self.get_parameter("num_generation") - mutation_rate = self.get_parameter("mutation_rate") - crossover_rate = self.get_parameter("crossover_rate") - - # create network - network = TransportationRecoveryUtil.nw_reconstruct(node_df, arc_df, adt_data) - - if pm == 0: - # calculate the WIPW (Weighted independent pathways) index - all_ipw, all_ipw_length = WIPW.ipw_search(network.nodes(), network.edges()) - # Calculate the ADT for each path - path_adt = {} - for node in range(len(network.nodes()) - 1): - for pairnode in range(node + 1, len(network.nodes())): - if (node, pairnode) in all_ipw.keys(): - path_adt[node, pairnode] = {} - path_adt[pairnode, node] = {} - for key, value in all_ipw[node, pairnode].items(): - path_adt[node, pairnode][key] \ - = WIPW.path_adt_from_edges(network, value) - path_adt[pairnode, node][key] \ - = path_adt[node, pairnode][key] - else: - all_ipw = None - all_ipw_length = None - path_adt = None - - num_objectives = 2 - - # implement NSGA for transportation network post-disaster recovery - nsga2 = NSGAII(num_objectives, mutation_rate, crossover_rate) - - p = [] - for i in range(ini_num_population): - p.append(PostDisasterLongTermSolution(unrepaired_bridge, node_df, - arc_df, bridge_df, - bridge_damage_value, network, - pm, all_ipw, path_adt)) - - first_front = nsga2.run(p, population_size, num_generation) - - # output the NSGA result - order = len(first_front) - 1 - obj0 = first_front[order].objectives[0] - obj1 = first_front[order].objectives[1] - repair_schedule = first_front[order].sch[obj0, obj1] - - # extract the time just finishing repairing a bridge - ending_time = [] - bridge_set = [] - for key, value in repair_schedule.items(): - bridge_set.append(key) - ending_time.append(value[1]) - - # output the optimal solution of bridge repair schedule based on NSGA - bridge_recovery = pd.DataFrame( - {"Bridge ID": bridge_set, "Ending Time": ending_time}) - self.set_result_csv_data( - "optimal_solution_of_bridge_repair_schedule", bridge_recovery, - name="optimal_solution_of_bridge_repair_schedule", source="dataframe") - - network = TransportationRecoveryUtil.nw_reconstruct(node_df, arc_df, adt_data) - - temp_bridge_damage_value = copy.deepcopy(bridge_damage_value) - - # record the non-recurrence ending time of bridges - schedule_time = [] - end = {} - for key, value in repair_schedule.items(): - if value[1] not in schedule_time: - schedule_time.append(value[1]) - end[key] = value[1] - schedule_time.sort() - - # label whether the bridge has been repaired - bridge_repair = list(repair_schedule.keys()) - fg = {} - for bridge in bridge_repair: - fg[bridge] = 0 - - # calculate the transportation network efficiency - efficiency = [] - for ii in range(len(schedule_time)): - - # update the damage status of bridge - for bridge in bridge_repair: - if fg[bridge] == 0: - if end[bridge] <= schedule_time[ii]: - temp_bridge_damage_value[bridge] = 0 - fg[bridge] = 1 - - for i in range(len(arc_df)): - nod1 = node_df.loc[node_df['ID'] == arc_df['fromnode'][i], 'guid'].values[0] - nod2 = node_df.loc[node_df['ID'] == arc_df['tonode'][i], 'guid'].values[0] - network.edges[nod1, nod2]['Damage_Status'] = 0 - - for key, val in temp_bridge_damage_value.items(): - linknwid = bridge_df.loc[bridge_df['guid'] == key, 'linkID'].values[0] - - nod_id1 = arc_df[arc_df['id'] == linknwid]['fromnode'].values[0] - nod1 = node_df.loc[node_df['ID'] == nod_id1, 'guid'].values[0] - - nod_id2 = arc_df[arc_df['id'] == linknwid]['tonode'].values[0] - nod2 = node_df.loc[node_df['ID'] == nod_id2, 'guid'].values[0] - - network.edges[nod1, nod2]['Damage_Status'] = val - - # calculate different travel efficiency based on different - # performance metrics - current_te = None - if pm == 1: - current_te = TransportationRecoveryUtil.traveltime_freeflow(network) - - elif pm == 0: - current_te = WIPW.tipw_index(network, all_ipw, path_adt) - - efficiency.append(current_te) - - # normalize the efficiency at each time of repairing a bridge - try: - maxe = max(efficiency) - for i in range(len(efficiency)): - efficiency[i] = efficiency[i] / maxe - except ValueError as e: - print(e) - - # output the recovery trajectory - recovery_trajectory = pd.DataFrame( - {"Ending Time": schedule_time, "Travel Efficiency": efficiency}) - self.set_result_csv_data("overall_transportation_recovery_trajectory", recovery_trajectory, - name="overall_transportation_recovery_trajectory", source="dataframe") - - return None - - def get_spec(self): - """Get specifications of the transportation recovery model. - - Returns: - obj: A JSON object of specifications of the transportation recovery model. +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use TrafficFlowRecovery instead.") +class TransportationRecovery(): + def __init__(self, incore_client): + self._delegate = TrafficFlowRecovery(incore_client) + def __getattr__(self, name): """ - return { - 'name': 'transportation-recovery', - 'description': 'transportation recovery model', - 'input_parameters': [ - { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - }, - { - 'id': 'pm', - 'required': True, - 'description': 'transportation performance metrics 0: WIPW, 1:Free flow travel time', - 'type': int - }, - { - 'id': 'ini_num_population', - 'required': True, - 'description': 'ini_num_population: 5 or 50', - 'type': int - }, - { - 'id': 'population_size', - 'required': True, - 'description': 'population_size: 3 or 30', - 'type': int - }, - { - 'id': 'num_generation', - 'required': True, - 'description': 'num_generation: 2 or 250', - 'type': int - }, - { - 'id': 'mutation_rate', - 'required': True, - 'description': '0.1', - 'type': float - }, - { - 'id': 'crossover_rate', - 'required': True, - 'description': '1.0', - 'type': float - } - ], - 'input_datasets': [ - { - 'id': 'nodes', - 'required': True, - 'description': 'road nodes', - 'type': ['ergo:roadNetwork'], - }, - { - 'id': 'links', - 'required': True, - 'description': 'road links', - 'type': ['ergo:roadNetwork'], - }, - { - 'id': 'bridges', - 'required': True, - 'description': 'bridges', - 'type': ['ergo:bridges', 'ergo:bridgesVer2', 'ergo:bridgesVer3'], - }, - { - 'id': 'bridge_damage_value', - 'required': True, - 'description': '', - 'type': ['incore:bridgeDamageValue'] - }, - { - 'id': 'unrepaired_bridge', - 'required': True, - 'description': '', - 'type': ['incore:unrepairedBridge'] - }, - { - 'id': 'ADT', - 'required': True, - 'description': '', - 'type': ['incore:ADT'] - } - - ], - 'output_datasets': [ - { - 'id': 'optimal_solution_of_bridge_repair_schedule', - 'description': 'List the Bridge id and its ending repair time.', - 'type': 'incore:transportationRepairSchedule' - }, - { - 'id': 'overall_transportation_recovery_trajectory', - 'description': 'shows the overall recovery trajectory of the ' + - 'transportation system. List the ending time and ' + - 'travel efficiency for the whole network.', - 'type': 'incore:transportationRecovery' - } - ] - } + Delegate attribute access to the TrafficFlowRecovery instance. + """ + return getattr(self._delegate, name) diff --git a/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py b/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py new file mode 100644 index 000000000..357fa81bd --- /dev/null +++ b/tests/pyincore/analyses/trafficflowrecovery/test_trafficflowrecovery.py @@ -0,0 +1,39 @@ +from pyincore import IncoreClient +from pyincore.analyses.trafficflowrecovery import TrafficFlowRecovery + + +def run_with_base_class(): + client = IncoreClient() + traffic_flow_recovery = TrafficFlowRecovery(client) + + nodes = "603d37ec34f29a7fa4211fc4" + traffic_flow_recovery.load_remote_input_dataset("nodes", nodes) + + links = "5c5de25ec5c0e488fc035613" + traffic_flow_recovery.load_remote_input_dataset("links", links) + + bridges = "5a284f2dc7d30d13bc082040" + traffic_flow_recovery.load_remote_input_dataset('bridges', bridges) + + bridge_damage = "5c5ddff0c5c0e488fc0355df" + traffic_flow_recovery.load_remote_input_dataset('bridge_damage_value', bridge_damage) + + unrepaired = "5c5de0c5c5c0e488fc0355eb" + traffic_flow_recovery.load_remote_input_dataset('unrepaired_bridge', unrepaired) + + ADT_data = "5c5dde00c5c0e488fc032d7f" + traffic_flow_recovery.load_remote_input_dataset('ADT', ADT_data) + + traffic_flow_recovery.set_parameter("num_cpu", 4) + traffic_flow_recovery.set_parameter("pm", 1) + traffic_flow_recovery.set_parameter('ini_num_population', 5) + traffic_flow_recovery.set_parameter("population_size", 3) + traffic_flow_recovery.set_parameter("num_generation", 2) + traffic_flow_recovery.set_parameter("mutation_rate", 0.1) + traffic_flow_recovery.set_parameter("crossover_rate", 1.0) + + traffic_flow_recovery.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() From 203201924a67e29558f949c7d64e281306bb17a3 Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Thu, 23 May 2024 12:11:48 -0500 Subject: [PATCH 06/17] Added copyrights to transporation recovery (#580) * Added copyrights to transporation recovery * Updated changelog --- CHANGELOG.md | 3 ++- pyincore/analyses/transportationrecovery/WIPW.py | 2 ++ pyincore/analyses/transportationrecovery/__init__.py | 3 ++- pyincore/analyses/transportationrecovery/nsga2.py | 2 ++ .../post_disaster_long_term_solution.py | 2 ++ .../transportationrecovery/transportationrecoveryutil.py | 6 ++++++ 6 files changed, 16 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de2bd2b92..3a7407190 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) +- Copyrights to transportation recovery analysis [#579](https://github.com/IN-CORE/pyincore/issues/579) ### Changed -- join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) +- Join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) diff --git a/pyincore/analyses/transportationrecovery/WIPW.py b/pyincore/analyses/transportationrecovery/WIPW.py index bb86f8c7f..e89d4f1f9 100644 --- a/pyincore/analyses/transportationrecovery/WIPW.py +++ b/pyincore/analyses/transportationrecovery/WIPW.py @@ -1,3 +1,5 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ diff --git a/pyincore/analyses/transportationrecovery/__init__.py b/pyincore/analyses/transportationrecovery/__init__.py index 1ddce3b9f..944a1706c 100644 --- a/pyincore/analyses/transportationrecovery/__init__.py +++ b/pyincore/analyses/transportationrecovery/__init__.py @@ -1,8 +1,9 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ - from pyincore.analyses.transportationrecovery.nsga2 import Solution from pyincore.analyses.transportationrecovery.nsga2 import NSGAII from pyincore.analyses.transportationrecovery.post_disaster_long_term_solution import PostDisasterLongTermSolution diff --git a/pyincore/analyses/transportationrecovery/nsga2.py b/pyincore/analyses/transportationrecovery/nsga2.py index 65d8d4c73..c041b0427 100644 --- a/pyincore/analyses/transportationrecovery/nsga2.py +++ b/pyincore/analyses/transportationrecovery/nsga2.py @@ -1,3 +1,5 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ diff --git a/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py b/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py index 9a14d5e16..5681b5505 100644 --- a/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py +++ b/pyincore/analyses/transportationrecovery/post_disaster_long_term_solution.py @@ -1,3 +1,5 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ diff --git a/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py b/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py index 52e5baa9a..477dfa134 100644 --- a/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py +++ b/pyincore/analyses/transportationrecovery/transportationrecoveryutil.py @@ -1,3 +1,9 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + from __future__ import division import csv import pandas as pd From 10404b9c77dc1b16e869bfa5e0f7f9fdcedbd1e8 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Fri, 24 May 2024 10:18:29 -0500 Subject: [PATCH 07/17] 560 renaming housing recovery to housing valuation recovery (#565) * housing valuation recovery new classes * the old method call the new one * change log and sphinx docs * add deprecation tag to sphinx * add deprecated library * update more requirements * add copyright year --- CHANGELOG.md | 6 + docs/source/modules.rst | 13 +- .../housingrecovery/housingrecovery.py | 381 +---------------- .../housingrecovery/housingrecoveryutil.py | 2 + .../housingvaluationrecovery/__init__.py | 8 + .../housingvaluationrecovery.py | 384 ++++++++++++++++++ .../housingvaluationrecoveryutil.py | 130 ++++++ pyincore/baseanalysis.py | 1 - .../test_housingvaluationrecovery.py | 183 +++++++++ 9 files changed, 732 insertions(+), 376 deletions(-) create mode 100644 pyincore/analyses/housingvaluationrecovery/__init__.py create mode 100755 pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py create mode 100644 pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py create mode 100644 tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a7407190..0121a20ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,12 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed - Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) +## [Unreleased] + +### Changed +- Rename Housing Recovery to Housing Valuation Recovery Analysis [#560](https://github.com/IN-CORE/pyincore/issues/560) + + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/docs/source/modules.rst b/docs/source/modules.rst index 440e90374..afe10f545 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -127,10 +127,8 @@ analyses/gasfacilitydamage analyses/housingrecovery ======================== -.. autoclass:: housingrecovery.housingrecovery.HousingRecovery - :members: -.. autoclass:: housingrecovery.housingrecoveryutil.HousingRecoveryUtil - :members: +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`housingvaluationrecovery.HousingValuationRecovery` instead. analyses/housingrecoverysequential ================================== @@ -142,6 +140,13 @@ analyses/housingunitallocation .. autoclass:: housingunitallocation.housingunitallocation.HousingUnitAllocation :members: +analyses/housingvaluationrecovery +======================== +.. autoclass:: housingvaluationrecovery.housingvaluationrecovery.HousingValuationRecovery + :members: +.. autoclass:: housingvaluationrecovery.housingvaluationrecoveryutil.HousingValuationRecoveryUtil + :members: + analyses/indp ============================== .. autoclass:: indp.indp.INDP diff --git a/pyincore/analyses/housingrecovery/housingrecovery.py b/pyincore/analyses/housingrecovery/housingrecovery.py index ecc5263d0..2395b9bc1 100755 --- a/pyincore/analyses/housingrecovery/housingrecovery.py +++ b/pyincore/analyses/housingrecovery/housingrecovery.py @@ -1,382 +1,21 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import numpy as np -import pandas as pd +from deprecated.sphinx import deprecated -from pyincore import BaseAnalysis -from pyincore.analyses.housingrecovery.housingrecoveryutil import HousingRecoveryUtil +from pyincore.analyses.housingvaluationrecovery import HousingValuationRecovery -class HousingRecovery(BaseAnalysis): - """ The analysis predicts building values and value changes over time following a disaster event. - The model is calibrated with respect to demographics, parcel data, and building value trajectories - following Hurricane Ike (2008) in Galveston, Texas. The model predicts building value at the parcel - level for 8 years of observation. The models rely on Census (Decennial or American Community Survey, ACS) - and parcel data immediately prior to the disaster event (year -1) as inputs for prediction. - - The Galveston, TX example makes use of 2010 Decennial Census and Galveston County Appraisal District (GCAD) - tax assessor data and outputs from other analysis (i.e., Building Damage, Housing Unit Allocation, - Population Dislocation) . - - The CSV outputs of the building values for the 6 years following the disaster event (with year 0 being - the impact year). - - Contributors - | Science: Wayne Day, Sarah Hamideh - | Implementation: Michal Ondrejcek, Santiago Núñez-Corrales, and NCSA IN-CORE Dev Team - - Related publications - Hamideh, S., Peacock, W. G., & Van Zandt, S. (2018). Housing recovery after disasters: Primary versus - seasonal/vacation housing markets in coastal communities. Natural Hazards Review. - - Args: - incore_client (IncoreClient): Service authentication. - - """ +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use HousingValuationRecovery instead.") +class HousingRecovery(): def __init__(self, incore_client): - super(HousingRecovery, self).__init__(incore_client) - - def get_spec(self): - return { - "name": "housing-recovery", - "description": "Housing Recovery Analysis", - "input_parameters": [ - { - "id": "base_year", - "required": False, - "description": "Base year is used to calculate improvement age. It needs to be set to the tax " - "assessment year representing pre-disaster building values. For example for GCAD " - "data which represents improvement valuation before Hurricane Ike impacts." - "Deafult 2008", - "type": int - }, - { - "id": "result_name", - "required": True, - "description": "Result CSV dataset name", - "type": str - } - ], - "input_datasets": [ - { - "id": "population_dislocation", - "required": True, - "description": "Population Dislocation aggregated to the block group level", - "type": ["incore:popDislocation"] - }, - { - "id": "building_area", - "required": True, - "description": "Building square footage and damage. Damage is the actual building value loss " - "in percentage terms observed through the County Appraisal District (GCAD) data", - "type": ["incore:buildingInventoryArea"] - }, - { - "id": "census_block_groups_data", - "required": True, - "description": "Census ACS data, 2010 5yr data for block groups available at IPUMS NHGIS " - "website.", - "type": ["incore:censusBlockGroupsData"] - }, - { - "id": "census_appraisal_data", - "required": True, - "description": "Census data, 2010 Decennial Census District (GCAD) Census data", - "type": ["incore:censusAppraisalData"] - } - ], - "output_datasets": [ - { - "id": "result", - "description": "A csv file with the building values for the 6 years following the disaster" - "event (year -1 denotes pre-impact conditions and 0 being the impact year). " - "Index year values represent building values against a base, pre-impact value.", - "type": "incore:buildingValues" - } - ] - } - - def run(self): - """ Executes the Housing recovery analysis. - - Returns: - bool: True if successful, False otherwise. - - """ - hru = HousingRecoveryUtil() - - # Get base year - self.base_year = self.get_parameter("base_year") - if self.base_year is None: - self.base_year = hru.BASEYEAR - # Get desired result name - result_name = self.get_parameter("result_name") - - # Datasets - pop_disl = self.get_input_dataset("population_dislocation").get_dataframe_from_csv(low_memory=False) - addl_structure_info = self.get_input_dataset("building_area").get_dataframe_from_csv(low_memory=False) - bg_mhhinc = self.get_input_dataset("census_block_groups_data").get_dataframe_from_csv(low_memory=False) - - # Census data - vac_status = self.get_input_dataset("census_appraisal_data").get_dataframe_from_csv(low_memory=False) - - # Calculate the percent vacation or seasonal housing of all housing units within a census tract - vac_status = self.get_vac_season_housing(vac_status) - - # Get ownership by filtering vacancy codes - pop_disl["d_ownerocc"] = self.get_owneship(pop_disl) - - # Keep only parcels with 1 building - building_level = pop_disl.loc[(pop_disl.bldgobs == 1)] - single_family = building_level.loc[(building_level.d_sf == 1)] - - # Read in and clean block group level median household income - bg_mhhinc["mhhinck"] = bg_mhhinc["mhhinc"] / 1000 - - # Read in and clean additional building information, NOTE add to building inventory - # Create structure id for merging data - addl_structure_info["strctid"] = addl_structure_info["xref"].apply(lambda x: "XREF"+x) - hse_recov = self.merge_add_inv(single_family, addl_structure_info) - - # Merge with seasonal/vacation housing Census ACS data - hse_recov = self.merge_seasonal_data(hse_recov, vac_status) - - # Merge with BG median HH income - hse_recov = self.merge_block_data(hse_recov, bg_mhhinc) - - # Generate minority variable - hse_recov["pminoritybg"] = hse_recov["phispbg"] + hse_recov["pblackbg"] - - # Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009) - hse_recov = self.value_loss(hse_recov) - - # Primary Housing Market (PHM) - hse_rec_phm = self.assemble_phm_coefs(hru, hse_recov) - # Seasonal/Vacation housing market (SVHM) - hse_rec_svhm = self.assemble_svhm_coefs(hru, hse_recov) - - d_vac_np = np.tile(hse_recov["d_vacationct"], (8, 1)).T - # Vacation condition for all years - - hse_rec_fin = np.empty(hse_recov.shape) - hse_rec_fin[:] = np.NaN - hse_rec_fin = np.where(d_vac_np == 0, np.exp(hse_rec_phm), np.NaN) - hse_rec_fin = np.where(d_vac_np == 1, np.exp(hse_rec_svhm), hse_rec_fin) - - # Index building values against a pre-impact (-1) value - hse_rec_fin_index = hse_rec_fin / hse_rec_fin[:, 0:1] - - # Name columns, use list of damage years (range from -1 to n). - bval_yr = ["bval_year_{0}".format(i) for i in hru.DMG_YEARS] - index_yr = ["index_{0}".format(i) for i in hru.DMG_YEARS] - - hse_rec_fin1 = pd.DataFrame(hse_rec_fin, columns=bval_yr) - hse_rec_fin2 = pd.DataFrame(hse_rec_fin_index, columns=index_yr) - hse_recov = pd.concat([hse_recov, hse_rec_fin1, hse_rec_fin2], axis=1) - - columns_to_save = ["guid", "d_vacationct", "mhhinck", "pminoritybg", "dmg", "value_loss"] + bval_yr + index_yr - self.set_result_csv_data("result", hse_recov[columns_to_save], result_name, "dataframe") - - return True - - def get_owneship(self, popd): - """ Filter ownership based on the vacancy codes - Assumption: - Where ownershp is "missing", let vacancy codes 0/3/4 be considered owner-occupied, - and 1/2/5/6/7 be considered renter-occupied. - It is uncertain whether vacancy codes 3,4,5,6,7 will become owner- or renter-occupied or primarily - one or the other. - . - Args: - popd (pd.DataFrame): Population dislocation results with ownership information. - - Returns: - pd.DataFrame: Ownership data. - - """ - # Create ownership dummy variable from popd.ownership - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 7, 0, np.nan) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 6, 0, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 5, 0, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 2, 0, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 1, 0, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 4, 1, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 3, 1, own) - own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 0, 1, own) - own = np.where(popd["ownershp"] == 2, 0, own) - own = np.where(popd["ownershp"] == 1, 1, own) - - return own - - def get_vac_season_housing(self, vac_status): - """ Calculate the percent vacation or seasonal housing of all housing units within a census tract and - add dummy variable for census tract as a seasonal/vacation housing submarket. -. - Args: - vac_status (obj): Seasonal/vacation housing Census ACS data from json reader. - - Returns: - pd.DataFrame: Seasonal/vacation housing data. + self._delegate = HousingValuationRecovery(incore_client) + def __getattr__(self, name): """ - vac_status["B25004_006E"] = vac_status["B25004_006E"].astype(int) - vac_status["B25004_006M"] = vac_status["B25004_006M"].astype(int) - vac_status["B25002_001E"] = vac_status["B25002_001E"].astype(int) - vac_status["B25002_001M"] = vac_status["B25002_001M"].astype(int) - - # Calculate the percent vacation or seasonal housing of all housing units within a census tract - vac_status["pvacationct_moe"] = vac_status["B25004_006E"] / vac_status["B25002_001E"] - vac_status["pvacationct"] = 100 * vac_status["pvacationct_moe"] - vac_status["pvacationct_moe"] = vac_status["pvacationct_moe"] ** 2 * vac_status["B25002_001M"] ** 2 - vac_status["pvacationct_moe"] = vac_status["B25004_006M"] ** 2 - vac_status["pvacationct_moe"] - vac_status["pvacationct_moe"] = 100 * (1 / vac_status["B25002_001E"]) * vac_status["pvacationct_moe"] ** 0.5 - - # dummy variable for census tract as a seasonal/vacation housing submarket - vac_status["d_vacationct"] = np.where(vac_status["pvacationct"] >= 50, 1, 0) - - vac_status.sort_values(by="pvacationct", inplace=True, ascending=False) - - return vac_status - - def merge_add_inv(self, hse_rec, addl_struct): - """Merge study area and additional structure information. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - addl_struct (pd.DataFrame): Additional infrastructure inventory. - - Returns: - pd.DataFrame: Final merge of two inventories. - - """ - hse_rec_merged = pd.merge(hse_rec, addl_struct, on="strctid", how="inner") - return hse_rec_merged - - def merge_seasonal_data(self, hse_rec, vac_status): - """ Merge study area and with seasonal/vacation housing Census ACS data. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - vac_status (pd.DataFrame): Seasonal/vacation housing Census ACS data. - - Returns: - pd.DataFrame: Final merge of two inventories. - - """ - hse_rec["tractid"] = hse_rec["tractid"].astype(str) - # Add county and state to trac to match hse_rec tracid (Galveston - 723900 to 48167723900) - vac_status["tractid"] = \ - vac_status["state"].astype(str) + vac_status["county"].astype(str) + vac_status["tract"].astype(str) - - hse_rec_merged = pd.merge(hse_rec, vac_status, left_on="tractid", right_on="tractid", how='inner') - return hse_rec_merged - - def merge_block_data(self, hse_rec, bg_mhhinc): - """ Merge block group level median household income. -. - Args: - hse_rec (pd.DataFrame): Area inventory. - bg_mhhinc (pd.DataFrame): Block data. - - Returns: - pd.DataFrame: Final merge of two inventories. - - """ - hse_rec_merged = pd.merge(hse_rec, bg_mhhinc, left_on="bgidstr", right_on="bgidstr", how="inner") - return hse_rec_merged - - def value_loss(self, hse_rec): - """ Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009). - . - Args: - hse_rec (pd.DataFrame): Area inventory. - - Returns: - pd.DataFrame: Inventory with value losses. - - """ - hse_rec["value_loss"] = 100 * (hse_rec["DS_0"] * hse_rec["rploss_0"] + - hse_rec["DS_1"] * hse_rec["rploss_1"] + - hse_rec["DS_2"] * hse_rec["rploss_2"] + - hse_rec["DS_3"] * hse_rec["rploss_3"]) - return hse_rec - - def assemble_phm_coefs(self, hru, hse_rec): - """ Assemble Primary Housing Market (PHM) data for full inventory and all damage-related years. - . - Args: - hru (obj): Housing recovery utility. - hse_rec (pd.DataFrame): Area inventory including losses. - - Returns: - np.array: Final coefficients for all damage years. - + Delegate attribute access to the HousingValuationRecovery instance. """ - dmg_years = np.array(hru.DMG_YEARS) - dmg_years_size = len(hru.DMG_YEARS) - - coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) - coef_fin[:] = hru.B_PHM_intercept + np.fromiter(hru.B_PHM_year.values(), dtype=float) - - # Adjust build year year with damage years - yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) - yrbl_all[:] = self.base_year + dmg_years + 1 - yrbuilt = hru.B_PHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) - - # Square meters, use vector (1x8) with B_PHM_sqm - sqmeter = np.full((1, dmg_years_size), hru.B_PHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] - - if "dmg" in hse_rec.columns: - dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ - hse_rec["dmg"].to_numpy()[:, np.newaxis] - else: - dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ - hse_rec["value_loss"].to_numpy()[:, np.newaxis] - d_owner = \ - np.fromiter(hru.B_PHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] - mhhinck = \ - np.fromiter(hru.B_PHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] - pminrbg = \ - np.fromiter(hru.B_PHM_min_year.values(), dtype=float) * hse_rec["pminoritybg"].to_numpy()[:, np.newaxis] - - return coef_fin + yrbuilt + sqmeter + d_owner + dmg_loss + mhhinck + pminrbg - - def assemble_svhm_coefs(self, hru, hse_rec): - """ Assemble Seasonal/Vacation housing market (SVHM) data for full inventory and all damage-related years. - . - Args: - hru (obj): Housing recovery utility. - hse_rec (pd.DataFrame): Area inventory including losses. - - Returns: - np.array: Final coefficients for all damage years. - - """ - dmg_years = np.array(hru.DMG_YEARS) - dmg_years_size = len(hru.DMG_YEARS) - - coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) - coef_fin[:] = hru.B_SVHM_intercept + np.fromiter(hru.B_SVHM_year.values(), dtype=float) - - # Adjust build year year with damage years - yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) - yrbl_all[:] = self.base_year + dmg_years + 1 - yrbuilt = hru.B_SVHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) - - # Square meters, use vector (1x8) with B_PHM_sqm - sqmeter = np.full((1, dmg_years_size), hru.B_SVHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] - - if "dmg" in hse_rec.columns: - dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ - hse_rec["dmg"].to_numpy()[:, np.newaxis] - else: - dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ - hse_rec["value_loss"].to_numpy()[:, np.newaxis] - d_owner = \ - np.fromiter(hru.B_SVHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] - mhhinck = \ - np.fromiter(hru.B_SVHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] - - return coef_fin + yrbuilt + sqmeter + dmg_loss + d_owner + mhhinck + return getattr(self._delegate, name) diff --git a/pyincore/analyses/housingrecovery/housingrecoveryutil.py b/pyincore/analyses/housingrecovery/housingrecoveryutil.py index 3e30bac15..d7406a14b 100644 --- a/pyincore/analyses/housingrecovery/housingrecoveryutil.py +++ b/pyincore/analyses/housingrecovery/housingrecoveryutil.py @@ -1,3 +1,5 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ diff --git a/pyincore/analyses/housingvaluationrecovery/__init__.py b/pyincore/analyses/housingvaluationrecovery/__init__.py new file mode 100644 index 000000000..ad70e3263 --- /dev/null +++ b/pyincore/analyses/housingvaluationrecovery/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import HousingValuationRecovery diff --git a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py new file mode 100755 index 000000000..36ff94667 --- /dev/null +++ b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecovery.py @@ -0,0 +1,384 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import numpy as np +import pandas as pd + +from pyincore import BaseAnalysis +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecoveryutil import HousingValuationRecoveryUtil + + +class HousingValuationRecovery(BaseAnalysis): + """ The analysis predicts building values and value changes over time following a disaster event. + The model is calibrated with respect to demographics, parcel data, and building value trajectories + following Hurricane Ike (2008) in Galveston, Texas. The model predicts building value at the parcel + level for 8 years of observation. The models rely on Census (Decennial or American Community Survey, ACS) + and parcel data immediately prior to the disaster event (year -1) as inputs for prediction. + + The Galveston, TX example makes use of 2010 Decennial Census and Galveston County Appraisal District (GCAD) + tax assessor data and outputs from other analysis (i.e., Building Damage, Housing Unit Allocation, + Population Dislocation) . + + The CSV outputs of the building values for the 6 years following the disaster event (with year 0 being + the impact year). + + Contributors + | Science: Wayne Day, Sarah Hamideh + | Implementation: Michal Ondrejcek, Santiago Núñez-Corrales, and NCSA IN-CORE Dev Team + + Related publications + Hamideh, S., Peacock, W. G., & Van Zandt, S. (2018). Housing valuation recovery after disasters: Primary versus + seasonal/vacation housing markets in coastal communities. Natural Hazards Review. + + Args: + incore_client (IncoreClient): Service authentication. + + """ + def __init__(self, incore_client): + super(HousingValuationRecovery, self).__init__(incore_client) + + def get_spec(self): + return { + "name": "housing-valuation-recovery", + "description": "Housing Valuation Recovery Analysis", + "input_parameters": [ + { + "id": "base_year", + "required": False, + "description": "Base year is used to calculate improvement age. It needs to be set to the tax " + "assessment year representing pre-disaster building values. For example for GCAD " + "data which represents improvement valuation before Hurricane Ike impacts." + "Deafult 2008", + "type": int + }, + { + "id": "result_name", + "required": True, + "description": "Result CSV dataset name", + "type": str + } + ], + "input_datasets": [ + { + "id": "population_dislocation", + "required": True, + "description": "Population Dislocation aggregated to the block group level", + "type": ["incore:popDislocation"] + }, + { + "id": "building_area", + "required": True, + "description": "Building square footage and damage. Damage is the actual building value loss " + "in percentage terms observed through the County Appraisal District (GCAD) data", + "type": ["incore:buildingInventoryArea"] + }, + { + "id": "census_block_groups_data", + "required": True, + "description": "Census ACS data, 2010 5yr data for block groups available at IPUMS NHGIS " + "website.", + "type": ["incore:censusBlockGroupsData"] + }, + { + "id": "census_appraisal_data", + "required": True, + "description": "Census data, 2010 Decennial Census District (GCAD) Census data", + "type": ["incore:censusAppraisalData"] + } + ], + "output_datasets": [ + { + "id": "result", + "description": "A csv file with the building values for the 6 years following the disaster" + "event (year -1 denotes pre-impact conditions and 0 being the impact year). " + "Index year values represent building values against a base, pre-impact value.", + "type": "incore:buildingValues" + } + ] + } + + def run(self): + """ Executes the housing valuation recovery analysis. + + Returns: + bool: True if successful, False otherwise. + + """ + hru = HousingValuationRecoveryUtil() + + # Get base year + self.base_year = self.get_parameter("base_year") + if self.base_year is None: + self.base_year = hru.BASEYEAR + # Get desired result name + result_name = self.get_parameter("result_name") + + # Datasets + pop_disl = self.get_input_dataset("population_dislocation").get_dataframe_from_csv(low_memory=False) + addl_structure_info = self.get_input_dataset("building_area").get_dataframe_from_csv(low_memory=False) + bg_mhhinc = self.get_input_dataset("census_block_groups_data").get_dataframe_from_csv(low_memory=False) + + # Census data + vac_status = self.get_input_dataset("census_appraisal_data").get_dataframe_from_csv(low_memory=False) + + # Calculate the percent vacation or seasonal housing of all housing units within a census tract + vac_status = self.get_vac_season_housing(vac_status) + + # Get ownership by filtering vacancy codes + pop_disl["d_ownerocc"] = self.get_owneship(pop_disl) + + # Keep only parcels with 1 building + building_level = pop_disl.loc[(pop_disl.bldgobs == 1)] + single_family = building_level.loc[(building_level.d_sf == 1)] + + # Read in and clean block group level median household income + bg_mhhinc["mhhinck"] = bg_mhhinc["mhhinc"] / 1000 + + # Read in and clean additional building information, NOTE add to building inventory + # Create structure id for merging data + addl_structure_info["strctid"] = addl_structure_info["xref"].apply(lambda x: "XREF"+x) + hse_recov = self.merge_add_inv(single_family, addl_structure_info) + + # Merge with seasonal/vacation housing Census ACS data + hse_recov = self.merge_seasonal_data(hse_recov, vac_status) + + # Merge with BG median HH income + hse_recov = self.merge_block_data(hse_recov, bg_mhhinc) + + # Generate minority variable + hse_recov["pminoritybg"] = hse_recov["phispbg"] + hse_recov["pblackbg"] + + # Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009) + hse_recov = self.value_loss(hse_recov) + + # Primary Housing Market (PHM) + hse_rec_phm = self.assemble_phm_coefs(hru, hse_recov) + # Seasonal/Vacation housing market (SVHM) + hse_rec_svhm = self.assemble_svhm_coefs(hru, hse_recov) + + d_vac_np = np.tile(hse_recov["d_vacationct"], (8, 1)).T + # Vacation condition for all years + + hse_rec_fin = np.empty(hse_recov.shape) + hse_rec_fin[:] = np.NaN + hse_rec_fin = np.where(d_vac_np == 0, np.exp(hse_rec_phm), np.NaN) + hse_rec_fin = np.where(d_vac_np == 1, np.exp(hse_rec_svhm), hse_rec_fin) + + # Index building values against a pre-impact (-1) value + hse_rec_fin_index = hse_rec_fin / hse_rec_fin[:, 0:1] + + # Name columns, use list of damage years (range from -1 to n). + bval_yr = ["bval_year_{0}".format(i) for i in hru.DMG_YEARS] + index_yr = ["index_{0}".format(i) for i in hru.DMG_YEARS] + + hse_rec_fin1 = pd.DataFrame(hse_rec_fin, columns=bval_yr) + hse_rec_fin2 = pd.DataFrame(hse_rec_fin_index, columns=index_yr) + hse_recov = pd.concat([hse_recov, hse_rec_fin1, hse_rec_fin2], axis=1) + + columns_to_save = ["guid", "d_vacationct", "mhhinck", "pminoritybg", "dmg", "value_loss"] + bval_yr + index_yr + self.set_result_csv_data("result", hse_recov[columns_to_save], result_name, "dataframe") + + return True + + def get_owneship(self, popd): + """ Filter ownership based on the vacancy codes + Assumption: + Where ownershp is "missing", let vacancy codes 0/3/4 be considered owner-occupied, + and 1/2/5/6/7 be considered renter-occupied. + It is uncertain whether vacancy codes 3,4,5,6,7 will become owner- or renter-occupied or primarily + one or the other. + . + Args: + popd (pd.DataFrame): Population dislocation results with ownership information. + + Returns: + pd.DataFrame: Ownership data. + + """ + # Create ownership dummy variable from popd.ownership + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 7, 0, np.nan) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 6, 0, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 5, 0, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 2, 0, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 1, 0, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 4, 1, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 3, 1, own) + own = np.where(popd["ownershp"].isnull() & popd["vacancy"] == 0, 1, own) + own = np.where(popd["ownershp"] == 2, 0, own) + own = np.where(popd["ownershp"] == 1, 1, own) + + return own + + def get_vac_season_housing(self, vac_status): + """ Calculate the percent vacation or seasonal housing of all housing units within a census tract and + add dummy variable for census tract as a seasonal/vacation housing submarket. +. + Args: + vac_status (obj): Seasonal/vacation housing Census ACS data from json reader. + + Returns: + pd.DataFrame: Seasonal/vacation housing data. + + """ + vac_status["B25004_006E"] = vac_status["B25004_006E"].astype(int) + vac_status["B25004_006M"] = vac_status["B25004_006M"].astype(int) + vac_status["B25002_001E"] = vac_status["B25002_001E"].astype(int) + vac_status["B25002_001M"] = vac_status["B25002_001M"].astype(int) + + # Calculate the percent vacation or seasonal housing of all housing units within a census tract + vac_status["pvacationct_moe"] = vac_status["B25004_006E"] / vac_status["B25002_001E"] + vac_status["pvacationct"] = 100 * vac_status["pvacationct_moe"] + vac_status["pvacationct_moe"] = vac_status["pvacationct_moe"] ** 2 * vac_status["B25002_001M"] ** 2 + vac_status["pvacationct_moe"] = vac_status["B25004_006M"] ** 2 - vac_status["pvacationct_moe"] + vac_status["pvacationct_moe"] = 100 * (1 / vac_status["B25002_001E"]) * vac_status["pvacationct_moe"] ** 0.5 + + # dummy variable for census tract as a seasonal/vacation housing submarket + vac_status["d_vacationct"] = np.where(vac_status["pvacationct"] >= 50, 1, 0) + + vac_status.sort_values(by="pvacationct", inplace=True, ascending=False) + + return vac_status + + def merge_add_inv(self, hse_rec, addl_struct): + """Merge study area and additional structure information. +. + Args: + hse_rec (pd.DataFrame): Area inventory. + addl_struct (pd.DataFrame): Additional infrastructure inventory. + + Returns: + pd.DataFrame: Final merge of two inventories. + + """ + hse_rec_merged = pd.merge(hse_rec, addl_struct, on="strctid", how="inner") + return hse_rec_merged + + def merge_seasonal_data(self, hse_rec, vac_status): + """ Merge study area and with seasonal/vacation housing Census ACS data. +. + Args: + hse_rec (pd.DataFrame): Area inventory. + vac_status (pd.DataFrame): Seasonal/vacation housing Census ACS data. + + Returns: + pd.DataFrame: Final merge of two inventories. + + """ + hse_rec["tractid"] = hse_rec["tractid"].astype(str) + # Add county and state to trac to match hse_rec tracid (Galveston - 723900 to 48167723900) + vac_status["tractid"] = \ + vac_status["state"].astype(str) + vac_status["county"].astype(str) + vac_status["tract"].astype(str) + + hse_rec_merged = pd.merge(hse_rec, vac_status, left_on="tractid", right_on="tractid", how='inner') + return hse_rec_merged + + def merge_block_data(self, hse_rec, bg_mhhinc): + """ Merge block group level median household income. +. + Args: + hse_rec (pd.DataFrame): Area inventory. + bg_mhhinc (pd.DataFrame): Block data. + + Returns: + pd.DataFrame: Final merge of two inventories. + + """ + hse_rec_merged = pd.merge(hse_rec, bg_mhhinc, left_on="bgidstr", right_on="bgidstr", how="inner") + return hse_rec_merged + + def value_loss(self, hse_rec): + """ Estimate value_loss for each parcel based on parameters from Bai, Hueste, & Gardoni (2009). + . + Args: + hse_rec (pd.DataFrame): Area inventory. + + Returns: + pd.DataFrame: Inventory with value losses. + + """ + hse_rec["value_loss"] = 100 * (hse_rec["DS_0"] * hse_rec["rploss_0"] + + hse_rec["DS_1"] * hse_rec["rploss_1"] + + hse_rec["DS_2"] * hse_rec["rploss_2"] + + hse_rec["DS_3"] * hse_rec["rploss_3"]) + return hse_rec + + def assemble_phm_coefs(self, hru, hse_rec): + """ Assemble Primary Housing Market (PHM) data for full inventory and all damage-related years. + . + Args: + hru (obj): Housing valuation recovery utility. + hse_rec (pd.DataFrame): Area inventory including losses. + + Returns: + np.array: Final coefficients for all damage years. + + """ + dmg_years = np.array(hru.DMG_YEARS) + dmg_years_size = len(hru.DMG_YEARS) + + coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) + coef_fin[:] = hru.B_PHM_intercept + np.fromiter(hru.B_PHM_year.values(), dtype=float) + + # Adjust build year year with damage years + yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) + yrbl_all[:] = self.base_year + dmg_years + 1 + yrbuilt = hru.B_PHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) + + # Square meters, use vector (1x8) with B_PHM_sqm + sqmeter = np.full((1, dmg_years_size), hru.B_PHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + + if "dmg" in hse_rec.columns: + dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ + hse_rec["dmg"].to_numpy()[:, np.newaxis] + else: + dmg_loss = np.fromiter(hru.B_PHM_dmg_year.values(), dtype=float) * \ + hse_rec["value_loss"].to_numpy()[:, np.newaxis] + d_owner = \ + np.fromiter(hru.B_PHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] + mhhinck = \ + np.fromiter(hru.B_PHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] + pminrbg = \ + np.fromiter(hru.B_PHM_min_year.values(), dtype=float) * hse_rec["pminoritybg"].to_numpy()[:, np.newaxis] + + return coef_fin + yrbuilt + sqmeter + d_owner + dmg_loss + mhhinck + pminrbg + + def assemble_svhm_coefs(self, hru, hse_rec): + """ Assemble Seasonal/Vacation housing market (SVHM) data for full inventory and all damage-related years. + . + Args: + hru (obj): Housing valution recovery utility. + hse_rec (pd.DataFrame): Area inventory including losses. + + Returns: + np.array: Final coefficients for all damage years. + + """ + dmg_years = np.array(hru.DMG_YEARS) + dmg_years_size = len(hru.DMG_YEARS) + + coef_fin = np.empty((hse_rec.shape[0], dmg_years_size)) + coef_fin[:] = hru.B_SVHM_intercept + np.fromiter(hru.B_SVHM_year.values(), dtype=float) + + # Adjust build year year with damage years + yrbl_all = np.empty((hse_rec.shape[0], dmg_years_size)) + yrbl_all[:] = self.base_year + dmg_years + 1 + yrbuilt = hru.B_SVHM_age * (yrbl_all - hse_rec["effyrbuilt"].to_numpy()[:, np.newaxis]) + + # Square meters, use vector (1x8) with B_PHM_sqm + sqmeter = np.full((1, dmg_years_size), hru.B_SVHM_sqm) * hse_rec["sqmeter"].to_numpy()[:, np.newaxis] + + if "dmg" in hse_rec.columns: + dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ + hse_rec["dmg"].to_numpy()[:, np.newaxis] + else: + dmg_loss = np.fromiter(hru.B_SVHM_dmg_year.values(), dtype=float) * \ + hse_rec["value_loss"].to_numpy()[:, np.newaxis] + d_owner = \ + np.fromiter(hru.B_SVHM_own_year.values(), dtype=float) * hse_rec["d_ownerocc"].to_numpy()[:, np.newaxis] + mhhinck = \ + np.fromiter(hru.B_SVHM_inc_year.values(), dtype=float) * hse_rec["mhhinck"].to_numpy()[:, np.newaxis] + + return coef_fin + yrbuilt + sqmeter + dmg_loss + d_owner + mhhinck diff --git a/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py new file mode 100644 index 000000000..01c3078e8 --- /dev/null +++ b/pyincore/analyses/housingvaluationrecovery/housingvaluationrecoveryutil.py @@ -0,0 +1,130 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +class HousingValuationRecoveryUtil: + BASEYEAR = 2008 + + DMG_YEARS = [-1, 0, 1, 2, 3, 4, 5, 6] + + # The models presented in this section are based on models developed in Hamideh et al. (2018). + # The modifications from the original Galveston models include (1) combining non-Hispanic Black + # and Hispanic block group percentages into a minority block group percentage variable, + # and (2) having both parcel and block group level intercepts. The original model only had parcel level + # intercepts. Considering that multiple levels of data are used (parcel and block group), it is appropriate + # to have both levels of intercepts. + + # Primary housing market (PHM) model coefficients + B_PHM_intercept = 11.407594 + + # Year indicator dummy variables + B_PHM_year = {} + B_PHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster + B_PHM_year[0] = 0.263500 # year 0, tax assessment immediately after disaster, damage year + B_PHM_year[1] = 0.147208 # year +1 + B_PHM_year[2] = 0.110004 # year +2 + B_PHM_year[3] = 0.122228 # year +3 + B_PHM_year[4] = 0.102886 # year +4 + B_PHM_year[5] = 0.102806 # year +5 + B_PHM_year[6] = 0.276548 # year +6 + + # House Characteristics + B_PHM_age = -0.030273 + B_PHM_sqm = 0.005596 + + # Damage and year dummy interactions + B_PHM_dmg_year = {} + B_PHM_dmg_year[-1] = 0.000000 + B_PHM_dmg_year[0] = -0.037458 # base effect + B_PHM_dmg_year[1] = 0.009310 + B_PHM_dmg_year[0] + B_PHM_dmg_year[2] = 0.009074 + B_PHM_dmg_year[0] + B_PHM_dmg_year[3] = 0.010340 + B_PHM_dmg_year[0] + B_PHM_dmg_year[4] = 0.011293 + B_PHM_dmg_year[0] + B_PHM_dmg_year[5] = 0.012178 + B_PHM_dmg_year[0] + B_PHM_dmg_year[6] = 0.012188 + B_PHM_dmg_year[0] + + # Owner-occupied and year dummy interactions + B_PHM_own_year = {} + B_PHM_own_year[-1] = 0.017153 # base effect + B_PHM_own_year[0] = 0.129077 + B_PHM_own_year[-1] + B_PHM_own_year[1] = 0.188217 + B_PHM_own_year[-1] + B_PHM_own_year[2] = 0.235435 + B_PHM_own_year[-1] + B_PHM_own_year[3] = 0.246437 + B_PHM_own_year[-1] + B_PHM_own_year[4] = 0.261220 + B_PHM_own_year[-1] + B_PHM_own_year[5] = 0.281015 + B_PHM_own_year[-1] + B_PHM_own_year[6] = 0.265465 + B_PHM_own_year[-1] + + # Median household income and year dummy interactions + B_PHM_inc_year = {} + B_PHM_inc_year[-1] = 0.002724 # base effect + B_PHM_inc_year[0] = 0.001190 + B_PHM_inc_year[-1] + B_PHM_inc_year[1] = 0.001480 + B_PHM_inc_year[-1] + B_PHM_inc_year[2] = 0.001746 + B_PHM_inc_year[-1] + B_PHM_inc_year[3] = 0.001494 + B_PHM_inc_year[-1] + B_PHM_inc_year[4] = 0.001480 + B_PHM_inc_year[-1] + B_PHM_inc_year[5] = 0.001849 + B_PHM_inc_year[-1] + B_PHM_inc_year[6] = 0.001400 + B_PHM_inc_year[-1] + + # Block Group percent Minority and year dummy interactions + B_PHM_min_year = {} + B_PHM_min_year[-1] = -0.004783 # base effect + B_PHM_min_year[0] = 0.005609 + B_PHM_min_year[-1] + B_PHM_min_year[1] = 0.007343 + B_PHM_min_year[-1] + B_PHM_min_year[2] = 0.007459 + B_PHM_min_year[-1] + B_PHM_min_year[3] = 0.006421 + B_PHM_min_year[-1] + B_PHM_min_year[4] = 0.006308 + B_PHM_min_year[-1] + B_PHM_min_year[5] = 0.005770 + B_PHM_min_year[-1] + B_PHM_min_year[6] = 0.005620 + B_PHM_min_year[-1] + + # Seasonal/Vacation Housing Market (SVHM) model coefficients + B_SVHM_intercept = 11.125636 + + # Year indicator dummy variables + B_SVHM_year = {} + B_SVHM_year[-1] = 0.000000 # year -1, tax assessment immediately before disaster + B_SVHM_year[0] = 1.489008 # year 0, tax assessment immediately after disaster, damage year + B_SVHM_year[1] = 1.858770 # year +1 + B_SVHM_year[2] = 2.163492 # year +2 + B_SVHM_year[3] = 2.071690 # year +3 + B_SVHM_year[4] = 2.110245 # year +4 + B_SVHM_year[5] = 2.208577 # year +5 + B_SVHM_year[6] = 2.273867 # year +6 + + # House Characteristics + B_SVHM_age = -0.014260 + B_SVHM_sqm = 0.005505 + + # Damage and year dummy interactions + B_SVHM_dmg_year = {} + B_SVHM_dmg_year[-1] = 0.000000 + B_SVHM_dmg_year[0] = -0.051913 # base effect + B_SVHM_dmg_year[1] = 0.001504 + B_SVHM_dmg_year[0] + B_SVHM_dmg_year[2] = -0.004172 + B_SVHM_dmg_year[0] + B_SVHM_dmg_year[3] = -0.002016 + B_SVHM_dmg_year[0] + B_SVHM_dmg_year[4] = 0.001687 + B_SVHM_dmg_year[0] + B_SVHM_dmg_year[5] = 0.000876 + B_SVHM_dmg_year[0] + B_SVHM_dmg_year[6] = 0.001129 + B_SVHM_dmg_year[0] + + # Owner-occupied and year dummy interactions + B_SVHM_own_year = {} + B_SVHM_own_year[-1] = -0.017167 # base effect + B_SVHM_own_year[0] = 0.043263 + B_SVHM_own_year[-1] + B_SVHM_own_year[1] = 0.003315 + B_SVHM_own_year[-1] + B_SVHM_own_year[2] = 0.034372 + B_SVHM_own_year[-1] + B_SVHM_own_year[3] = -0.014929 + B_SVHM_own_year[-1] + B_SVHM_own_year[4] = -0.021078 + B_SVHM_own_year[-1] + B_SVHM_own_year[5] = 0.001161 + B_SVHM_own_year[-1] + B_SVHM_own_year[6] = 0.017562 + B_SVHM_own_year[-1] + + # Median household income and year dummy interactions + B_SVHM_inc_year = {} + B_SVHM_inc_year[-1] = 0.003786 # base effect + B_SVHM_inc_year[0] = -0.013662 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[1] = -0.017401 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[2] = -0.021541 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[3] = -0.019756 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[4] = -0.020789 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[5] = -0.021555 + B_SVHM_inc_year[-1] + B_SVHM_inc_year[6] = -0.019781 + B_SVHM_inc_year[-1] diff --git a/pyincore/baseanalysis.py b/pyincore/baseanalysis.py index 60d7af84c..49dba1d9a 100644 --- a/pyincore/baseanalysis.py +++ b/pyincore/baseanalysis.py @@ -33,7 +33,6 @@ def __init__(self, incore_client): self.input_datasets = {} for input_dataset in self.spec['input_datasets']: self.input_datasets[input_dataset['id']] = {'spec': input_dataset, 'value': None} - self.input_hazards = {} if 'input_hazards' in self.spec: for input_hazards in self.spec['input_hazards']: diff --git a/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py b/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py new file mode 100644 index 000000000..a722556aa --- /dev/null +++ b/tests/pyincore/analyses/housingvaluationrecovery/test_housingvaluationrecovery.py @@ -0,0 +1,183 @@ +from pyincore import IncoreClient +from pyincore import MappingSet, FragilityService +from pyincore.analyses.buildingdamage.buildingdamage import BuildingDamage +from pyincore.analyses.housingunitallocation import HousingUnitAllocation +from pyincore.analyses.populationdislocation import PopulationDislocation +from pyincore.analyses.housingvaluationrecovery.housingvaluationrecovery import HousingValuationRecovery +import pyincore.globals as pyglobals + + +def run_with_base_class(chained): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + if chained: + # ************************************************************* + # Set up and run Building damage, Galveston, TX analysis + # ************************************************************* + hazard_type = "hurricane" + # Galveston deterministic Hurricane, 3 datasets - Kriging + # hazard_id = "5fa5a228b6429615aeea4410" # prod + hazard_id = "5fa472033c1f0c73fe81461a" + + # Galveston, TX Building inventory + # bldg_inv_id = "60354b6c123b4036e6837ef7" # prod 19k buildings with age_group + bldg_inv_id = "602eba8bb1db9c28aef01358" + + # Hurricane building mapping (with equation) + mapping_id = "602c381a1d85547cdc9f0675" # dev & prod + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + + bldg_dmg = BuildingDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_inv_id) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + + result_name = "Galveston_bldg_dmg_result" + + bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + + # Run Building damage + bldg_dmg.run_analysis() + + # Retrieve result dataset + building_dmg_result = bldg_dmg.get_output_dataset("ds_result") + + # Convert dataset to Pandas DataFrame + # bdmg_df = building_dmg_result.get_dataframe_from_csv(low_memory=False) + # print(bdmg_df[["guid", "DS_0", "DS_1", "DS_2", "DS_3", "haz_expose"]]) + + # ************************************************************* + # Set up and run Housing Unit Allocation (HUA) analysis + # ************************************************************* + # Housing unit inventory + # housing_unit_inv = "5fc6ab1cd2066956f49e7a03" # prod + housing_unit_inv = "5fc6a757eba0eb743dc7739f" + # Address point inventory + # address_point_inv = "5fc6aadcc38a0722f563392e" + address_point_inv = "5fc6a4ddeba0eb743dc77301" + + # Create housing allocation + hua = HousingUnitAllocation(client) + + # Load input dataset + hua.load_remote_input_dataset("housing_unit_inventory", housing_unit_inv) + hua.load_remote_input_dataset("address_point_inventory", address_point_inv) + hua.load_remote_input_dataset("buildings", bldg_inv_id) + + # Specify the result name + result_name = "Galveston_HUA" + + seed = 1238 + iterations = 1 + + # Set analysis parameters + hua.set_parameter("result_name", result_name) + hua.set_parameter("seed", seed) + hua.set_parameter("iterations", iterations) + + # Run Housing unit allocation analysis + hua.run_analysis() + + # Retrieve result dataset + hua_result = hua.get_output_dataset("result") + + # Convert dataset to Pandas DataFrame + # hua_df = hua_result.get_dataframe_from_csv(low_memory=False) + # Display top 5 rows of output data + # print(hua_df[["guid", "numprec", "incomegroup"]].head()) + + # ************************************************************* + # Set up and run Population Dislocation analysis + # ************************************************************* + + # Block group data, IN-CORE_BGMAP_2021-01-19_GalvestonTX + # bg_data = "603545f2dcda03378087e708" # prod + bg_data = "6035445b1e456929c86094c8" + # Value loss parameters DS 0-3 + # value_loss = "60354810e379f22e16560dbd" + value_loss = "613901d9ca423a005e422feb" + + pop_dis = PopulationDislocation(client) + + pop_dis.load_remote_input_dataset("block_group_data", bg_data) + pop_dis.load_remote_input_dataset("value_loss_param", value_loss) + + pop_dis.set_input_dataset("building_dmg", building_dmg_result) + pop_dis.set_input_dataset("housing_unit_allocation", hua_result) + + result_name = "galveston-pop-disl-results" + seed = 1111 + + pop_dis.set_parameter("result_name", result_name) + pop_dis.set_parameter("seed", seed) + + pop_dis.run_analysis() + + # Retrieve result dataset + pd_result = pop_dis.get_output_dataset("result") + + # Convert dataset to Pandas DataFrame + # pd_df = pd_result.get_dataframe_from_csv(low_memory=False) + # Display top 5 rows of output data + # print(pd_df[["guid", "dislocated"]].head()) + + # ************************************************************* + # Set up and run Housing Recovery analysis + # ************************************************************* + + # Additional inventory data (assessed damage, square area) + bldg_sqft_id = "62193c19a42a3e546ae2d9f8" + # Census block groups data (required) + census_bg_id = "62193b7ca42a3e546ae2d9f2" + + # Census appraisal file; id of external Census json + census_appr_id = "6241fbd153302c512d685181" # dev + result_name = "Galveston_building_values_chained" + + housing_rec = HousingValuationRecovery(client) + + housing_rec.set_input_dataset("population_dislocation", pd_result) + housing_rec.load_remote_input_dataset("building_area", bldg_sqft_id) + housing_rec.load_remote_input_dataset("census_block_groups_data", census_bg_id) + # Census appraisal data + housing_rec.load_remote_input_dataset("census_appraisal_data", census_appr_id) + + housing_rec.set_parameter("result_name", result_name) + + housing_rec.run_analysis() + + else: + # Run Housing Recovery analysis without chaining + + # Additional inventory data (assessed damage, square area) + bldg_add_id = "62193c19a42a3e546ae2d9f8" + # bldg_add_id = "6243638e89dbb809c6432501" # no dmg, uses value_loss + # Census block groups data (required) + census_bg_id = "62193b7ca42a3e546ae2d9f2" + + # Census appraisal file; id of external Census json + census_appr_id = "62c4956d861e370172c5578b" + result_name = "Galveston_building_values" + + housing_rec = HousingValuationRecovery(client) + + pop_disl_id = "623d1e1ca42a3e546aeba25f" # dev + housing_rec.load_remote_input_dataset("population_dislocation", pop_disl_id) + housing_rec.load_remote_input_dataset("building_area", bldg_add_id) + housing_rec.load_remote_input_dataset("census_block_groups_data", census_bg_id) + # Census appraisal data + housing_rec.load_remote_input_dataset("census_appraisal_data", census_appr_id) + + housing_rec.set_parameter("result_name", result_name) + + housing_rec.run_analysis() + + +if __name__ == '__main__': + chained = False + + run_with_base_class(chained) From f3c78def3c162812494e290951ca9269c1cd4aed Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Fri, 24 May 2024 12:13:18 -0500 Subject: [PATCH 08/17] Rename portfolio to cluster recovery (#569) * rename and changelog * more renaming * typo * add copyrights --- CHANGELOG.md | 6 +++ .../buildingclusterrecovery/__init__.py | 10 +++++ .../buildingclusterrecovery.py} | 24 ++++++----- .../buildingdamage.py} | 7 +++ .../buildingdata.py} | 7 +++ .../analyses/buildingportfolio/__init__.py | 3 -- .../test_buildingclusterrecovery.py | 43 +++++++++++++++++++ .../test_buildingportfolio.py | 43 ------------------- 8 files changed, 86 insertions(+), 57 deletions(-) create mode 100644 pyincore/analyses/buildingclusterrecovery/__init__.py rename pyincore/analyses/{buildingportfolio/BuildingPortfolioRecoveryAnalysis.py => buildingclusterrecovery/buildingclusterrecovery.py} (97%) rename pyincore/analyses/{buildingportfolio/BuildingDamage.py => buildingclusterrecovery/buildingdamage.py} (62%) rename pyincore/analyses/{buildingportfolio/BuildingData.py => buildingclusterrecovery/buildingdata.py} (69%) delete mode 100644 pyincore/analyses/buildingportfolio/__init__.py create mode 100644 tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py delete mode 100644 tests/pyincore/analyses/buildingportfolio/test_buildingportfolio.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0121a20ef..5a6a98ae8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,12 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Rename Housing Recovery to Housing Valuation Recovery Analysis [#560](https://github.com/IN-CORE/pyincore/issues/560) +## [Unreleased] + +### Changed +- Rename Building Portfolio Analysis to Building Cluster Recovery Analysis [#559](https://github.com/IN-CORE/pyincore/issues/559) + + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/pyincore/analyses/buildingclusterrecovery/__init__.py b/pyincore/analyses/buildingclusterrecovery/__init__.py new file mode 100644 index 000000000..863b9caf1 --- /dev/null +++ b/pyincore/analyses/buildingclusterrecovery/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.buildingclusterrecovery.buildingclusterrecovery import BuildingClusterRecovery +from pyincore.analyses.buildingclusterrecovery.buildingdamage import BuildingDamage +from pyincore.analyses.buildingclusterrecovery.buildingdata import BuildingData diff --git a/pyincore/analyses/buildingportfolio/BuildingPortfolioRecoveryAnalysis.py b/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py similarity index 97% rename from pyincore/analyses/buildingportfolio/BuildingPortfolioRecoveryAnalysis.py rename to pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py index 81f8d9a7b..82e9ca9cc 100644 --- a/pyincore/analyses/buildingportfolio/BuildingPortfolioRecoveryAnalysis.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingclusterrecovery.py @@ -1,3 +1,5 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. +# # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ @@ -10,11 +12,11 @@ import scipy.stats import concurrent.futures -from pyincore.analyses.buildingportfolio.BuildingData import BuildingData +from pyincore.analyses.buildingclusterrecovery.buildingdata import BuildingData from pyincore import BaseAnalysis, Dataset -class BuildingPortfolioRecoveryAnalysis(BaseAnalysis): +class BuildingClusterRecovery(BaseAnalysis): """The Building Portfolio Recovery analysis uses damage probabilities of structural components, nonstructural drift-sensitive components, and nonstructural acceleration-sensitive components to calculate building’s initial functionality loss. @@ -24,12 +26,12 @@ class BuildingPortfolioRecoveryAnalysis(BaseAnalysis): """ def __init__(self, incore_client): - super(BuildingPortfolioRecoveryAnalysis, self).__init__(incore_client) + super(BuildingClusterRecovery, self).__init__(incore_client) def get_spec(self): return { - 'name': 'building-portfolio-damage-analysis', - 'description': 'Building Portfolio Damage Analysis (with uncertainty)', + 'name': 'building-cluster-recovery-analysis', + 'description': 'Building Cluster Recovery Analysis (with uncertainty)', 'input_parameters': [ { 'id': 'result_name', @@ -116,9 +118,9 @@ def get_spec(self): 'output_datasets': [ { 'id': 'result', - 'parent_type': 'buildingPortfolio', - 'description': 'Building portfolio recovery result.', - 'type': 'incore:portfolioRecovery' + 'parent_type': 'buildingClusterRecovery', + 'description': 'Building cluster recovery result.', + 'type': 'incore:clusterRecovery' } ] } @@ -343,7 +345,7 @@ def run(self): upper_bound95[t] = 1 # END: Additional Code for uncertainty Analysis - with open(output_base_name + '_portfolio-recovery.csv', 'w+', newline='') as output_file: + with open(output_base_name + '_cluster-recovery.csv', 'w+', newline='') as output_file: spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', '75P_Upper_Bound', '75P_Lower_Bound', '95P_Upper_Bound', '95P_Lower_Bound', @@ -354,14 +356,14 @@ def run(self): lower_bound95[i], upper_bound95[i]] + list(mean_recovery[i]) + [pdf[i]]) else: - with open(output_base_name + '_portfolio-recovery.csv', 'w+', newline='') as output_file: + with open(output_base_name + '_cluster-recovery.csv', 'w+', newline='') as output_file: spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', 'RecPercent_RE', 'RecPercent_RU', 'RecPercent_RO', 'RecPercent_BF', 'RecPercent_FF']) for i in range(time_steps): spam_writer.writerow([i + 1, mean_recovery_output[i]] + list(mean_recovery[i])) - self.set_output_dataset("result", Dataset.from_file(output_base_name + '_portfolio-recovery.csv', + self.set_output_dataset("result", Dataset.from_file(output_base_name + '_cluster-recovery.csv', data_type=self.output_datasets["result"]["spec"]["type"])) print("INFO: Finished executing Building Portfolio Recovery Analysis") diff --git a/pyincore/analyses/buildingportfolio/BuildingDamage.py b/pyincore/analyses/buildingclusterrecovery/buildingdamage.py similarity index 62% rename from pyincore/analyses/buildingportfolio/BuildingDamage.py rename to pyincore/analyses/buildingclusterrecovery/buildingdamage.py index 11003dd47..fe4ec4c72 100644 --- a/pyincore/analyses/buildingportfolio/BuildingDamage.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingdamage.py @@ -1,3 +1,10 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + class BuildingDamage(object): def __init__(self, distance_to_center, restricted_entry, restricted_use, reoccupancy, best_line_functionality, diff --git a/pyincore/analyses/buildingportfolio/BuildingData.py b/pyincore/analyses/buildingclusterrecovery/buildingdata.py similarity index 69% rename from pyincore/analyses/buildingportfolio/BuildingData.py rename to pyincore/analyses/buildingclusterrecovery/buildingdata.py index 9695e78f8..cbd5b0e46 100644 --- a/pyincore/analyses/buildingportfolio/BuildingData.py +++ b/pyincore/analyses/buildingclusterrecovery/buildingdata.py @@ -1,3 +1,10 @@ +# Copyright (c) 2018 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + class BuildingData: def __init__(self, tract_id, lon, lat, structural, code_level, epsa_node_id, pwsa_node_id, tep_id, build_id_x, epsa_id, pwsa_id, finance, ep_pw_id, occupation_code): diff --git a/pyincore/analyses/buildingportfolio/__init__.py b/pyincore/analyses/buildingportfolio/__init__.py deleted file mode 100644 index 8ef9ff126..000000000 --- a/pyincore/analyses/buildingportfolio/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from pyincore.analyses.buildingportfolio.BuildingPortfolioRecoveryAnalysis import BuildingPortfolioRecoveryAnalysis -from pyincore.analyses.buildingportfolio.BuildingDamage import BuildingDamage -from pyincore.analyses.buildingportfolio.BuildingData import BuildingData diff --git a/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py b/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py new file mode 100644 index 000000000..b079819ff --- /dev/null +++ b/tests/pyincore/analyses/buildingclusterrecovery/test_buildingclusterrecovery.py @@ -0,0 +1,43 @@ +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.buildingclusterrecovery import BuildingClusterRecovery +from pyincore import IncoreClient +import pyincore.globals as pyglobals + +if __name__ == "__main__": + cred = None + try: + client = IncoreClient(pyglobals.INCORE_API_PROD_URL) + bldg_data_dataset = "5c756966c11bb369a33a0b0a" + occupancy_dataset = "5c7569f9c11bb369a33a0b16" + bldg_damage_dataset = "5c756a2fc11bb369a33a0b22" + mean_repair_dataset = "5c756ac5c11bb369a33a0b34" + utility_dataset = "5c756af4c11bb369a33a0b40" + utility_partial_dataset = "5c756b1ec11bb369a33a0b4c" + coefFL_dataset = "5c756b56c11bb369a33a0b58" + + bldg_cluster_recovery = BuildingClusterRecovery(client) + bldg_cluster_recovery.set_parameter("uncertainty", True) + bldg_cluster_recovery.set_parameter("sample_size", 35) # default none. Gets size form input dataset + bldg_cluster_recovery.set_parameter("random_sample_size", 50) # default 10000 + bldg_cluster_recovery.set_parameter("no_of_weeks", 100) # default 250 + bldg_cluster_recovery.set_parameter("num_cpu", 1) + bldg_cluster_recovery.set_parameter("result_name", "memphis") + + bldg_cluster_recovery.load_remote_input_dataset("building_data", bldg_data_dataset) + bldg_cluster_recovery.load_remote_input_dataset("occupancy_mapping", occupancy_dataset) + bldg_cluster_recovery.load_remote_input_dataset("building_damage", bldg_damage_dataset) + bldg_cluster_recovery.load_remote_input_dataset("dmg_ratios", mean_repair_dataset) + bldg_cluster_recovery.load_remote_input_dataset("utility", utility_dataset) + bldg_cluster_recovery.load_remote_input_dataset("utility_partial", utility_partial_dataset) + bldg_cluster_recovery.load_remote_input_dataset("coefFL", coefFL_dataset) + + bldg_cluster_recovery.run_analysis() + print(bldg_cluster_recovery.get_output_dataset("result").get_dataframe_from_csv().head()) + + except EnvironmentError: + raise + # traceback.print_exc() diff --git a/tests/pyincore/analyses/buildingportfolio/test_buildingportfolio.py b/tests/pyincore/analyses/buildingportfolio/test_buildingportfolio.py deleted file mode 100644 index 761d5d38c..000000000 --- a/tests/pyincore/analyses/buildingportfolio/test_buildingportfolio.py +++ /dev/null @@ -1,43 +0,0 @@ -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -from pyincore.analyses.buildingportfolio import BuildingPortfolioRecoveryAnalysis -from pyincore import IncoreClient -import pyincore.globals as pyglobals - -if __name__ == "__main__": - cred = None - try: - client = IncoreClient(pyglobals.INCORE_API_PROD_URL) - bldg_data_dataset = "5c756966c11bb369a33a0b0a" - occupancy_dataset = "5c7569f9c11bb369a33a0b16" - bldg_damage_dataset = "5c756a2fc11bb369a33a0b22" - mean_repair_dataset = "5c756ac5c11bb369a33a0b34" - utility_dataset = "5c756af4c11bb369a33a0b40" - utility_partial_dataset = "5c756b1ec11bb369a33a0b4c" - coefFL_dataset = "5c756b56c11bb369a33a0b58" - - bldg_portfolio_recovery = BuildingPortfolioRecoveryAnalysis(client) - bldg_portfolio_recovery.set_parameter("uncertainty", True) - bldg_portfolio_recovery.set_parameter("sample_size", 35) # default none. Gets size form input dataset - bldg_portfolio_recovery.set_parameter("random_sample_size", 50) # default 10000 - bldg_portfolio_recovery.set_parameter("no_of_weeks", 100) # default 250 - bldg_portfolio_recovery.set_parameter("num_cpu", 1) - bldg_portfolio_recovery.set_parameter("result_name", "memphis") - - bldg_portfolio_recovery.load_remote_input_dataset("building_data", bldg_data_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("occupancy_mapping", occupancy_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("building_damage", bldg_damage_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("dmg_ratios", mean_repair_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("utility", utility_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("utility_partial", utility_partial_dataset) - bldg_portfolio_recovery.load_remote_input_dataset("coefFL", coefFL_dataset) - - bldg_portfolio_recovery.run_analysis() - print(bldg_portfolio_recovery.get_output_dataset("result").get_dataframe_from_csv().head()) - - except EnvironmentError: - raise - # traceback.print_exc() From 45750d788a35a66f211f77a934476d17b9d0e3e5 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Fri, 24 May 2024 13:51:41 -0500 Subject: [PATCH 09/17] 562 update building nonstructural damage to support flood (#571) * rewrite nonstructrual * rewrite pytest * move flood damage to nonstructural building damage * add changelog entry * switch back * update changelog * 566 update flood input to non structural damage for combined wind wave surge building damage (#577) * update to flood to non structural * update tests * changelog * overwrite the default fragility key * include hurricane; limit liquefaction to only eq * add hurricane * update output key * remove unused import * another place to use result * add retrofit strategy * fix the test * fix test buildling damage * building dataset --- CHANGELOG.md | 19 +- .../combinedwindwavesurgebuildingdamage.py | 2 +- .../combinedwindwavesurgebuildingloss.py | 2 +- .../nonstructbuildingdamage.py | 230 +++++++----------- .../nonstructbuildingutil.py | 22 -- .../buildingdamage/test_buildingdamage.py | 31 +-- .../test_buildingdamage_legacy.py | 33 +-- .../test_buildingdamage_retrofit.py | 33 +-- ...est_combinedwindwavesurgebuildingdamage.py | 6 +- .../test_combinedwindwavesurgebuildingloss.py | 6 +- .../test_flood_nonstructbuildingdamage.py | 33 +++ .../test_nonstructbuildingdamage.py | 49 ++-- ...st_nonstructbuildingdamage_w_hazard_obj.py | 46 ++-- 13 files changed, 191 insertions(+), 321 deletions(-) create mode 100644 tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a6a98ae8..6c9b7ca19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,25 +7,20 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Changed +- Rename Social Vulnerability Analysis to Social Vulnerability Index Analysis [#556](https://github.com/IN-CORE/pyincore/issues/556) +- Join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) +- Update Non-structural Building Damage to support flood [#562](https://github.com/IN-CORE/pyincore/issues/562) +- Update flood input to non-structural building damage for combined wind-wave-surge building [#566](https://github.com/IN-CORE/pyincore/issues/566) +- Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) + ### Added - Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) - Copyrights to transportation recovery analysis [#579](https://github.com/IN-CORE/pyincore/issues/579) -### Changed -- Join dataset has an option for only keeping the table dataset fields [#299](https://github.com/IN-CORE/pyincore/issues/299) - ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) -## [Unreleased] - -### Changed -- Rename Social Vulnerability Analysis to Social Vulnerability Index Analysis [#556](https://github.com/IN-CORE/pyincore/issues/556) - -## [Unreleased] - -### Changed -- Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) ## [Unreleased] diff --git a/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py b/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py index 75753f6fa..a153f40aa 100755 --- a/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingdamage/combinedwindwavesurgebuildingdamage.py @@ -164,7 +164,7 @@ def get_spec(self): 'id': 'flood_damage', 'required': True, 'description': 'Flood damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + 'type': ['ergo:nsBuildingInventoryDamageVer4'] }, ], diff --git a/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py b/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py index 84919fadb..4da14a672 100755 --- a/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py +++ b/pyincore/analyses/combinedwindwavesurgebuildingloss/combinedwindwavesurgebuildingloss.py @@ -182,7 +182,7 @@ def get_spec(self): 'id': 'flood_damage', 'required': True, 'description': 'Flood damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer6'] + 'type': ['ergo:nsBuildingInventoryDamageVer4'] }, { 'id': 'structural_cost', diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py index d8d37a5eb..757a0e58d 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py @@ -12,6 +12,7 @@ from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingutil import \ NonStructBuildingUtil from pyincore.models.dfr3curve import DFR3Curve +from pyincore.utils.datasetutil import DatasetUtil class NonStructBuildingDamage(BaseAnalysis): @@ -31,19 +32,27 @@ def __init__(self, incore_client): def run(self): """Executes building damage analysis.""" # Building dataset - building_set = self.get_input_dataset("buildings").get_inventory_reader() + building_dataset = self.get_input_dataset("buildings") + + # building retrofit strategy + retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") + + # mapping + dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") + + # Update the building inventory dataset if applicable + bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(building_dataset, + add_info_dataset=retrofit_strategy_dataset, + mapping=dfr3_mapping_set) + building_set = bldg_dataset.get_inventory_reader() # get input hazard hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() # set Default Fragility key - fragility_key_as = self.get_parameter("fragility_key_as") - if fragility_key_as is None: - self.set_parameter("fragility_key_as", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) - - fragility_key_ds = self.get_parameter("fragility_key_ds") - if fragility_key_ds is None: - self.set_parameter("fragility_key_ds", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) + fragility_key = self.get_parameter("fragility_key") + if fragility_key is None: + self.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) # Set Default Hazard Uncertainty use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") @@ -97,14 +106,14 @@ def building_damage_concurrent_future(self, function_name, num_workers, *args): dict: An ordered dictionary with building data/metadata. """ - output_ds = [] + output = [] output_dmg = [] with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: for ret1, ret2 in executor.map(function_name, *args): - output_ds.extend(ret1) + output.extend(ret1) output_dmg.extend(ret2) - return output_ds, output_dmg + return output, output_dmg def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, hazard_dataset_id): """Run analysis for multiple buildings. @@ -131,42 +140,28 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha building_results = [] damage_results = [] - fragility_sets_as = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - self.get_parameter("fragility_key_as")) - fragility_sets_ds = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - self.get_parameter("fragility_key_ds")) - values_payload_as = [] - values_payload_ds = [] + fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, + self.get_parameter("fragility_key")) + values_payload = [] values_payload_liq = [] mapped_buildings = [] unmapped_buildings = [] liquefaction_resp = None for building in buildings: - if building["id"] in fragility_sets_as and building["id"] in fragility_sets_ds: - fragility_set_as = fragility_sets_as[building["id"]] - fragility_set_ds = fragility_sets_ds[building["id"]] + if building["id"] in fragility_sets: + fragility_set = fragility_sets[building["id"]] location = GeoUtil.get_location(building) loc = str(location.y) + "," + str(location.x) # Acceleration-Sensitive - demands_as, units_as, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set_as, - hazard_type, allowed_demand_types) - value_as = { - "demands": demands_as, - "units": units_as, - "loc": loc - } - values_payload_as.append(value_as) - - # Drift-Sensitive - demands_ds, units_ds, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set_ds, - hazard_type, allowed_demand_types) - value_ds = { - "demands": demands_ds, - "units": units_ds, + demands, units, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set, hazard_type, + allowed_demand_types) + value = { + "demands": demands, + "units": units, "loc": loc } - values_payload_ds.append(value_ds) + values_payload.append(value) # liquefaction if use_liquefaction: @@ -185,8 +180,7 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha # get hazard values and liquefaction if hazard_type == 'earthquake': - hazard_resp_as = hazard.read_hazard_values(values_payload_as, self.hazardsvc) - hazard_resp_ds = hazard.read_hazard_values(values_payload_ds, self.hazardsvc) + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) # adjust dmg probability for liquefaction if use_liquefaction: @@ -197,17 +191,19 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha else: raise ValueError('Hazard does not support liquefaction! Check to make sure you defined the ' 'liquefaction portion of your scenario earthquake.') + elif hazard_type == 'flood': + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) + elif hazard_type == 'hurricane': + # include hurricane flood + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) else: raise ValueError("The provided hazard type is not supported yet by this analysis") # calculate LS and DS for i, building in enumerate(mapped_buildings): - dmg_probability_as = {"LS_0": None, "LS_1": None, "LS_2": None} - dmg_interval_as = {"DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None} - dmg_probability_ds = {"LS_0": None, "LS_1": None, "LS_2": None} - dmg_interval_ds = {"DS_0": None, "DS_1": None, "DS_2": None, "DS_3": None} - fragility_set_as = fragility_sets_as[building["id"]] - fragility_set_ds = fragility_sets_ds[building["id"]] + dmg_probability = dict() + dmg_interval = dict() + fragility_set = fragility_sets[building["id"]] # TODO this value needs to come from the hazard service # adjust dmg probability for hazard uncertainty @@ -215,100 +211,49 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha raise ValueError('Uncertainty has not yet been implemented!') ############### - # AS - if isinstance(fragility_set_as.fragility_curves[0], DFR3Curve): - hazard_vals_as = AnalysisUtil.update_precision_of_lists(hazard_resp_as[i]["hazardValues"]) - demand_types_as = hazard_resp_as[i]["demands"] - demand_units_as = hazard_resp_as[i]["units"] - hval_dict_as = dict() - for j, d in enumerate(fragility_set_as.demand_types): - hval_dict_as[d] = hazard_vals_as[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp_as[i]["hazardValues"]): - building_args = fragility_set_as.construct_expression_args_from_inventory(building) - dmg_probability_as = fragility_set_as. \ - calculate_limit_state(hval_dict_as, inventory_type="building", - **building_args) - # adjust dmg probability for liquefaction - if use_liquefaction: - if liq_geology_dataset_id is not None: - liquefaction_dmg = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i][ - "groundFailureProb"]) - dmg_probability_as = AnalysisUtil.update_precision_of_dicts( - NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_as, - liquefaction_dmg)) - dmg_interval_as = fragility_set_ds.calculate_damage_interval(dmg_probability_as, - hazard_type=hazard_type, - inventory_type="building") - else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") - - ############### - # DS - if isinstance(fragility_set_ds.fragility_curves[0], DFR3Curve): - hazard_vals_ds = AnalysisUtil.update_precision_of_lists(hazard_resp_ds[i]["hazardValues"]) - demand_types_ds = hazard_resp_ds[i]["demands"] - demand_units_ds = hazard_resp_ds[i]["units"] - hval_dict_ds = dict() - for j, d in enumerate(fragility_set_ds.demand_types): - hval_dict_ds[d] = hazard_vals_ds[j] - - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp_ds[i]["hazardValues"]): - building_args = fragility_set_ds.construct_expression_args_from_inventory(building) - dmg_probability_ds = fragility_set_ds. \ - calculate_limit_state(hval_dict_ds, inventory_type="building", + if isinstance(fragility_set.fragility_curves[0], DFR3Curve): + hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + demand_types = hazard_resp[i]["demands"] + demand_units = hazard_resp[i]["units"] + hval_dict = dict() + for j, d in enumerate(fragility_set.demand_types): + hval_dict[d] = hazard_vals[j] + if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): + building_args = fragility_set.construct_expression_args_from_inventory(building) + dmg_probability = fragility_set. \ + calculate_limit_state(hval_dict, inventory_type="building", **building_args) # adjust dmg probability for liquefaction - if use_liquefaction: - if liq_geology_dataset_id is not None: - liquefaction_dmg = AnalysisUtil.update_precision_of_lists( - liquefaction_resp[i]["groundFailureProb"]) - dmg_probability_ds = AnalysisUtil.update_precision_of_dicts( - NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability_ds, - liquefaction_dmg)) - dmg_interval_ds = fragility_set_ds.calculate_damage_interval(dmg_probability_ds, - hazard_type=hazard_type, - inventory_type="building") + if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: + liquefaction_dmg = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i][ + "groundFailureProb"]) + dmg_probability = AnalysisUtil.update_precision_of_dicts( + NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability, + liquefaction_dmg)) + + dmg_interval = fragility_set.calculate_damage_interval(dmg_probability, + hazard_type=hazard_type, + inventory_type="building") else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") # put results in dictionary - # AS denotes acceleration-sensitive fragility assigned to the building. - # DS denotes drift-sensitive fragility assigned to the building. building_result = dict() building_result['guid'] = building['properties']['guid'] - building_result['AS_LS_0'] = dmg_probability_as['LS_0'] - building_result['AS_LS_1'] = dmg_probability_as['LS_1'] - building_result['AS_LS_2'] = dmg_probability_as['LS_2'] - building_result['AS_DS_0'] = dmg_interval_as['DS_0'] - building_result['AS_DS_1'] = dmg_interval_as['DS_1'] - building_result['AS_DS_2'] = dmg_interval_as['DS_2'] - building_result['AS_DS_3'] = dmg_interval_as['DS_3'] - building_result['DS_LS_0'] = dmg_probability_ds['LS_0'] - building_result['DS_LS_1'] = dmg_probability_ds['LS_1'] - building_result['DS_LS_2'] = dmg_probability_ds['LS_2'] - building_result['DS_DS_0'] = dmg_interval_ds['DS_0'] - building_result['DS_DS_1'] = dmg_interval_ds['DS_1'] - building_result['DS_DS_2'] = dmg_interval_ds['DS_2'] - building_result['DS_DS_3'] = dmg_interval_ds['DS_3'] - hazard_exposure_as = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals_as, hazard_type) - hazard_exposure_ds = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals_ds, hazard_type) - haz_expose = NonStructBuildingUtil.determine_haz_exposure(hazard_exposure_as, hazard_exposure_ds) - building_result['haz_expose'] = haz_expose + building_result.update(dmg_probability) + building_result.update(dmg_interval) + hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + building_result['haz_expose'] = hazard_exposure # put damage results in dictionary damage_result = dict() damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id_as'] = fragility_set_as.id - damage_result['demandtypes_as'] = demand_types_as - damage_result['demandunits_as'] = demand_units_as - damage_result['fragility_id_ds'] = fragility_set_ds.id - damage_result['demandtypes_ds'] = demand_types_ds - damage_result['demandunits_ds'] = demand_units_ds + damage_result['fragility_id'] = fragility_set.id + damage_result['demandtypes'] = demand_types + damage_result['demandunits'] = demand_units damage_result['hazardtype'] = hazard_type - damage_result['hazardvals_as'] = hazard_vals_as - damage_result['hazardvals_ds'] = hazard_vals_ds + damage_result['hazardvals'] = hazard_vals building_results.append(building_result) damage_results.append(damage_result) @@ -319,15 +264,11 @@ def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, ha damage_result = dict() damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id_as'] = None - damage_result['demandtypes_as'] = None - damage_result['demandunits_as'] = None - damage_result['fragility_id_ds'] = None - damage_result['demandtypes_ds'] = None - damage_result['demandunits_ds'] = None + damage_result['fragility_id'] = None + damage_result['demandtypes'] = None + damage_result['demandunits'] = None damage_result['hazardtype'] = None - damage_result['hazardvals_as'] = None - damage_result['hazardvals_ds'] = None + damage_result['hazardvals'] = None building_results.append(building_result) damage_results.append(damage_result) @@ -354,7 +295,7 @@ def get_spec(self): { 'id': 'hazard_type', 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', + 'description': 'Hazard Type (e.g. earthquake, flood, hurricane)', 'type': str }, { @@ -364,15 +305,9 @@ def get_spec(self): 'type': str }, { - 'id': 'fragility_key_as', - 'required': False, - 'description': 'Acceleration-Sensitive Fragility key to use in mapping dataset', - 'type': str - }, - { - 'id': 'fragility_key_ds', + 'id': 'fragility_key', 'required': False, - 'description': 'Drift-Sensitive Fragility key to use in mapping dataset', + 'description': 'Non-structural Fragility key to use in mapping dataset', 'type': str }, { @@ -406,7 +341,7 @@ def get_spec(self): 'id': 'hazard', 'required': False, 'description': 'Hazard object', - 'type': ["earthquake"] + 'type': ["earthquake", "flood", "hurricane"] }, ], 'input_datasets': [ @@ -414,13 +349,20 @@ def get_spec(self): 'id': 'buildings', 'required': True, 'description': 'building Inventory', - 'type': ['ergo:buildingInventoryVer4'], + 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', + 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], }, { 'id': 'dfr3_mapping_set', 'required': True, 'description': 'DFR3 Mapping Set Object', 'type': ['incore:dfr3MappingSet'], + }, + { + 'id': 'retrofit_strategy', + 'required': False, + 'description': 'Building retrofit strategy that contains guid and retrofit method', + 'type': ['incore:retrofitStrategy'] } ], 'output_datasets': [ @@ -428,7 +370,7 @@ def get_spec(self): 'id': 'result', 'parent_type': 'buildings', 'description': 'CSV file of damage states for building non-structural damage', - 'type': 'ergo:nsBuildingInventoryDamageVer3' + 'type': 'ergo:nsBuildingInventoryDamageVer4' }, { 'id': 'damage_result', diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py index 4655d566e..21e9f25a5 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py @@ -60,25 +60,3 @@ def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_pro + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure return adjusted_limit_state_probabilities - - @staticmethod - def determine_haz_exposure(hazard_exposure_as, hazard_exposure_ds): - """ - Determine the hazard exposure of the building based on the - Args: - hazard_exposure_as: - hazard_exposure_ds: - - Returns: - - """ - if hazard_exposure_as == "yes" and hazard_exposure_ds == "yes": - haz_expose = "yes" - elif hazard_exposure_as == "error" or hazard_exposure_ds == "error": - haz_expose = "error" - elif hazard_exposure_as == "no" and hazard_exposure_ds == "no": - haz_expose = "no" - else: - haz_expose = "partial" - - return haz_expose diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py index 95ccdfae9..8c91969fb 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage.py @@ -1,4 +1,4 @@ -from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane, Flood, \ +from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane, \ Tornado from pyincore.analyses.buildingdamage import BuildingDamage import pyincore.globals as pyglobals @@ -89,31 +89,6 @@ def run_with_base_class(): bldg_dmg.set_parameter("num_cpu", 4) bldg_dmg.run_analysis() - ########################################################## - # lumberton flood - flood = Flood.from_hazard_service("5f4d02e99f43ee0dde768406", hazardsvc) - - # lumberton building inventory v7 - # bldg_dataset_id = "603010f7b1db9c28aef53214" # 40 building subset - bldg_dataset_id = "603010a4b1db9c28aef5319f" # 21k full building - - bldg_dmg = BuildingDamage(client) - bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - - # lumberton building mapping (with equation) - mapping_id = "602f3cf981bd2c09ad8f4f9d" - fragility_service = FragilityService(client) - mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") - - bldg_dmg.set_input_hazard("hazard", flood) - - result_name = "lumberton_flood_dmg_result" - bldg_dmg.set_parameter("result_name", result_name) - bldg_dmg.set_parameter("num_cpu", 4) - bldg_dmg.run_analysis() - ########################################################## # joplin tornado without strategy bldg_dataset_id = "5df7d0de425e0b00092d0082" # joplin building v6 @@ -121,12 +96,10 @@ def run_with_base_class(): bldg_dmg = BuildingDamage(client) bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - mapping_id = "6091d9fbb53ed4646fd276ca" # 19 archetype with retrofit - # mapping_id = "60994a1906d63d5ded1d6dcc" # 19 archetype with retrofit new format mapping + mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Fragility ID Code") tornado = Tornado.from_hazard_service("5dfa32bbc0601200080893fb", hazardsvc) bldg_dmg.set_input_hazard("hazard", tornado) diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py index 82a12ac74..b7b2c3da4 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_legacy.py @@ -97,47 +97,18 @@ def run_with_base_class(): bldg_dmg.set_parameter("num_cpu", 4) bldg_dmg.run_analysis() - # lumberton flood - hazard_type = "flood" - hazard_id = "5f4d02e99f43ee0dde768406" - - # lumberton building inventory v7 - # bldg_dataset_id = "603010f7b1db9c28aef53214" # 40 building subset - bldg_dataset_id = "603010a4b1db9c28aef5319f" # 21k full building - - bldg_dmg = BuildingDamage(client) - bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - - # lumberton building mapping (with equation) - mapping_id = "602f3cf981bd2c09ad8f4f9d" - fragility_service = FragilityService(client) - mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") - - result_name = os.path.join(result_folder, "lumberton_flood_dmg_result") - bldg_dmg.set_parameter("result_name", result_name) - bldg_dmg.set_parameter("hazard_type", hazard_type) - bldg_dmg.set_parameter("hazard_id", hazard_id) - bldg_dmg.set_parameter("num_cpu", 4) - bldg_dmg.run_analysis() - # joplin tornado with retrofit strategy bldg_dataset_id = "5df7d0de425e0b00092d0082" # joplin building v6 - retrofit_strategy_id = "6091d5a8daa06e14ee96d502" # plan 1 - # retrofit_strategy_id = "6091d5ffdaa06e14ee96d5ef" # plan 2 + retrofit_strategy_id = "660ab8f8ce705a7e54748557" # plan 1 bldg_dmg = BuildingDamage(client) bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) bldg_dmg.load_remote_input_dataset("retrofit_strategy", retrofit_strategy_id) - # lumberton building mapping (with equation) - mapping_id = "6091d9fbb53ed4646fd276ca" # 19 archetype with retrofit - # mapping_id = "60994a1906d63d5ded1d6dcc" # 19 archetype with retrofit new format mapping + mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype with retrofit fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - bldg_dmg.set_parameter("fragility_key", "Fragility ID Code") hazard_type = "tornado" hazard_id = "5dfa32bbc0601200080893fb" diff --git a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py index b1be028f7..3c7ac52bd 100644 --- a/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py +++ b/tests/pyincore/analyses/buildingdamage/test_buildingdamage_retrofit.py @@ -1,6 +1,6 @@ import os -from pyincore import IncoreClient, MappingSet, Tornado, Dataset, HazardService, Flood +from pyincore import IncoreClient, MappingSet, Tornado, Dataset, HazardService from pyincore.analyses.buildingdamage import BuildingDamage import pyincore.globals as pyglobals import time @@ -53,37 +53,6 @@ def run_with_base_class(): end_time_1 = time.time() print(f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds") - ############################## - # lumberton flood - flood = Flood.from_hazard_service("5f4d02e99f43ee0dde768406", dev_hazardsvc) - - flood_fragility_mapping_set = MappingSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/flood_retrofit_mapping.json")) - # lumberton building inventory v7 - bldg_dataset_id = "603010a4b1db9c28aef5319f" # 21k full building - - flood_bldg_dmg = BuildingDamage(dev_client) - flood_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) - - # lumberton building mapping (with equation) - flood_bldg_dmg.set_input_dataset("dfr3_mapping_set", flood_fragility_mapping_set) - flood_bldg_dmg.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") - - flood_bldg_dmg.set_input_hazard("hazard", flood) - - retrofit_strategy_plan = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, - "retrofit/flood_retrofit_plan.csv"), - data_type="incore:retrofitStrategy") - flood_bldg_dmg.set_input_dataset("retrofit_strategy", retrofit_strategy_plan) - - result_name = "lumberton_flood_dmg_result_w_retrofit" - flood_bldg_dmg.set_parameter("result_name", os.path.join(result_folder, result_name)) - flood_bldg_dmg.set_parameter("num_cpu", 8) - flood_bldg_dmg.run_analysis() - - end_time_2 = time.time() - print(f"Lumberton Flood Retrofit execution time: {end_time_2 - end_time_1:.5f} seconds") - if __name__ == '__main__': run_with_base_class() diff --git a/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py b/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py index 6545ed3fd..832feba09 100644 --- a/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py +++ b/tests/pyincore/analyses/combinedwindwavesurgebuildingdamage/test_combinedwindwavesurgebuildingdamage.py @@ -1,6 +1,7 @@ from pyincore import IncoreClient, FragilityService, MappingSet from pyincore.analyses.combinedwindwavesurgebuildingdamage import CombinedWindWaveSurgeBuildingDamage from pyincore.analyses.buildingdamage import BuildingDamage +from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage import pyincore.globals as pyglobals @@ -50,10 +51,11 @@ def run_with_base_class(): mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) # flood building damage - f_bldg_dmg = BuildingDamage(client) + f_bldg_dmg = NonStructBuildingDamage(client) f_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) f_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) f_bldg_dmg.set_parameter("result_name", "Galveston-flood-dmg") + f_bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") f_bldg_dmg.set_parameter("hazard_type", hazard_type) f_bldg_dmg.set_parameter("hazard_id", hazard_id) f_bldg_dmg.set_parameter("num_cpu", 4) @@ -62,7 +64,7 @@ def run_with_base_class(): # Get damage outputs from different hazards surge_wave_damage = sw_bldg_dmg.get_output_dataset("ds_result") wind_damage = w_bldg_dmg.get_output_dataset("ds_result") - flood_damage = f_bldg_dmg.get_output_dataset("ds_result") + flood_damage = f_bldg_dmg.get_output_dataset("result") # Combined building damage to determine maximum damage state combined_bldg_dmg = CombinedWindWaveSurgeBuildingDamage(client) diff --git a/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py b/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py index 7a6c6c38e..39e30cdb5 100644 --- a/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py +++ b/tests/pyincore/analyses/combinedwindwavesurgebuildingloss/test_combinedwindwavesurgebuildingloss.py @@ -1,6 +1,7 @@ from pyincore import IncoreClient, FragilityService, MappingSet from pyincore.analyses.combinedwindwavesurgebuildingloss import CombinedWindWaveSurgeBuildingLoss from pyincore.analyses.buildingdamage import BuildingDamage +from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage import pyincore.globals as pyglobals from timeit import default_timer as timer @@ -52,10 +53,11 @@ def run_with_base_class(): mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) # flood building damage - f_bldg_dmg = BuildingDamage(client) + f_bldg_dmg = NonStructBuildingDamage(client) f_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) f_bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) f_bldg_dmg.set_parameter("result_name", "Galveston-flood-dmg") + f_bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") f_bldg_dmg.set_parameter("hazard_type", hazard_type) f_bldg_dmg.set_parameter("hazard_id", hazard_id) f_bldg_dmg.set_parameter("num_cpu", 4) @@ -64,7 +66,7 @@ def run_with_base_class(): # Get damage outputs from different hazards surge_wave_damage = sw_bldg_dmg.get_output_dataset("ds_result") wind_damage = w_bldg_dmg.get_output_dataset("ds_result") - flood_damage = f_bldg_dmg.get_output_dataset("ds_result") + flood_damage = f_bldg_dmg.get_output_dataset("result") start = timer() diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py new file mode 100644 index 000000000..8297309d3 --- /dev/null +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_flood_nonstructbuildingdamage.py @@ -0,0 +1,33 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, Flood, HazardService +from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + hazardsvc = HazardService(client) + + # lumberton flood + flood = Flood.from_hazard_service("5f4d02e99f43ee0dde768406", hazardsvc) + + # lumberton building inventory v7 + # bldg_dataset_id = "603010f7b1db9c28aef53214" # 40 building subset + bldg_dataset_id = "603010a4b1db9c28aef5319f" # 21k full building + + # lumberton building mapping (with equation) + mapping_id = "602f3cf981bd2c09ad8f4f9d" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + + non_structural_building_dmg_flood = NonStructBuildingDamage(client) + non_structural_building_dmg_flood.load_remote_input_dataset("buildings", bldg_dataset_id) + non_structural_building_dmg_flood.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_flood.set_parameter("result_name", "non_structural_building_dmg_result_flood") + non_structural_building_dmg_flood.set_input_hazard("hazard", flood) + non_structural_building_dmg_flood.set_parameter("num_cpu", 4) + non_structural_building_dmg_flood.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") + non_structural_building_dmg_flood.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py index 34866d595..637698d97 100644 --- a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage.py @@ -1,5 +1,5 @@ from pyincore import IncoreClient, FragilityService, MappingSet -from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage +from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage, NonStructBuildingUtil import pyincore.globals as pyglobals @@ -15,36 +15,39 @@ def run_with_base_class(): # Default Building Fragility Mapping v1.0 mapping_id = "5b47b350337d4a3629076f2c" - - non_structural_building_dmg = NonStructBuildingDamage(client) - - # Load input datasets - non_structural_building_dmg.load_remote_input_dataset("buildings", building_dataset_id) - - # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - non_structural_building_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - - # Specify the result name - result_name = "non_structural_building_dmg_result" - - # Set analysis parameters - non_structural_building_dmg.set_parameter("result_name", result_name) - non_structural_building_dmg.set_parameter("hazard_type", hazard_type) - non_structural_building_dmg.set_parameter("hazard_id", hazard_id) - non_structural_building_dmg.set_parameter("num_cpu", 4) # use liquefaction (slow) # Shelby County Liquefaction Susceptibility use_liquefaction = True liq_geology_dataset_id = "5a284f55c7d30d13bc0824ba" - non_structural_building_dmg.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) - - # Run analysis - non_structural_building_dmg.run_analysis() + # Acceleration sensitive + non_structural_building_dmg_as = NonStructBuildingDamage(client) + non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_as") + non_structural_building_dmg_as.set_parameter("hazard_type", hazard_type) + non_structural_building_dmg_as.set_parameter("hazard_id", hazard_id) + non_structural_building_dmg_as.set_parameter("num_cpu", 4) + non_structural_building_dmg_as.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.run_analysis() + + # Drift sensitive + non_structural_building_dmg_ds = NonStructBuildingDamage(client) + non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_ds") + non_structural_building_dmg_ds.set_parameter("hazard_type", hazard_type) + non_structural_building_dmg_ds.set_parameter("hazard_id", hazard_id) + non_structural_building_dmg_ds.set_parameter("num_cpu", 4) + non_structural_building_dmg_ds.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.run_analysis() if __name__ == '__main__': diff --git a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py index 4cbef73a6..dcc5107a4 100644 --- a/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/nonstructbuildingdamage/test_nonstructbuildingdamage_w_hazard_obj.py @@ -1,5 +1,5 @@ from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake -from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage +from pyincore.analyses.nonstructbuildingdamage import NonStructBuildingDamage, NonStructBuildingUtil import pyincore.globals as pyglobals @@ -15,36 +15,38 @@ def run_with_base_class(): # Default Building Fragility Mapping v1.0 mapping_id = "5b47b350337d4a3629076f2c" - - non_structural_building_dmg = NonStructBuildingDamage(client) - - # Load input datasets - non_structural_building_dmg.load_remote_input_dataset("buildings", building_dataset_id) - # Load fragility mapping fragility_service = FragilityService(client) mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) - non_structural_building_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) - - # Specify the result name - result_name = "non_structural_building_dmg_result_w_hazard_obj" - - non_structural_building_dmg.set_input_hazard("hazard", earthquake) - - # Set analysis parameters - non_structural_building_dmg.set_parameter("result_name", result_name) - non_structural_building_dmg.set_parameter("num_cpu", 4) # use liquefaction (slow) # Shelby County Liquefaction Susceptibility use_liquefaction = True liq_geology_dataset_id = "5a284f55c7d30d13bc0824ba" - non_structural_building_dmg.set_parameter("use_liquefaction", use_liquefaction) - non_structural_building_dmg.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) - - # Run analysis - non_structural_building_dmg.run_analysis() + # Acceleration sensitive + non_structural_building_dmg_as = NonStructBuildingDamage(client) + non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.set_input_hazard("hazard", earthquake) + non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_as") + non_structural_building_dmg_as.set_parameter("num_cpu", 4) + non_structural_building_dmg_as.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.run_analysis() + + # Drift sensitive + non_structural_building_dmg_ds = NonStructBuildingDamage(client) + non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_ds") + non_structural_building_dmg_ds.set_input_hazard("hazard", earthquake) + non_structural_building_dmg_ds.set_parameter("num_cpu", 4) + non_structural_building_dmg_ds.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.run_analysis() if __name__ == '__main__': From 57cb035c3d6e80b172ca9ad14fdaaf5cc9cf5729 Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Thu, 30 May 2024 15:28:52 -0500 Subject: [PATCH 10/17] Fixed DFR3 service's out of index error (#564) * Fixed DFR3 service's out of index error * modified the situation of the rule is empty list * reversted the road damage test file name * modified test roaddamage to fix the PR * revert road damage test to fix the PR * recreated galveston road and updated the test with new id * changed the test road dataset id in road damage test * fixed the dataset id in test_roaddamage_w_hazard_obj --- CHANGELOG.md | 5 +++++ pyincore/dfr3service.py | 7 +++++++ tests/pyincore/analyses/roaddamage/test_roaddamage.py | 2 +- .../analyses/roaddamage/test_roaddamage_w_hazard_obj.py | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) mode change 100755 => 100644 tests/pyincore/analyses/roaddamage/test_roaddamage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c9b7ca19..c1ed2536f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Rename Building Portfolio Analysis to Building Cluster Recovery Analysis [#559](https://github.com/IN-CORE/pyincore/issues/559) +## [Unrelased] + +### Fixed +- Out of index error in dfr3 service's property conversion when the rule is not found [#555](https://github.com/IN-CORE/pyincore/issues/555) + ## [1.18.1] - 2024-04-30 ### Changed diff --git a/pyincore/dfr3service.py b/pyincore/dfr3service.py index 68480b24f..a7bc56b41 100644 --- a/pyincore/dfr3service.py +++ b/pyincore/dfr3service.py @@ -395,10 +395,17 @@ def _convert_properties_to_dict(rules, properties): matched_properties = {} # Handle legacy rules if isinstance(rules, list): + # If the rules are empty, return the matched properties + if not rules or rules == [[]] or rules == [None]: + return matched_properties for i, and_rules in enumerate(rules): for j, rule in enumerate(and_rules): matched_properties.update(Dfr3Service._eval_property_from_inventory(rule, properties)) elif isinstance(rules, dict): + # If the rules are empty, return the matched properties + if not rules or rules == [[]] or rules == [None]: + return matched_properties + # Handles new style of rules boolean = list(rules.keys())[0] # AND or OR criteria = rules[boolean] diff --git a/tests/pyincore/analyses/roaddamage/test_roaddamage.py b/tests/pyincore/analyses/roaddamage/test_roaddamage.py old mode 100755 new mode 100644 index 667770050..810b0001a --- a/tests/pyincore/analyses/roaddamage/test_roaddamage.py +++ b/tests/pyincore/analyses/roaddamage/test_roaddamage.py @@ -63,7 +63,7 @@ def run_with_base_class(): # test galveston hurricane road failure # road inventory for Galveston island - road_dataset_id = "60ba5ee94219ee25a56b6999" + road_dataset_id = "664e5812efdc9f1ed5dc2f7f" # road damage by hurricane inundation mapping mapping_id = "60ba583b1f2b7d4a916faf03" # Galveston Deterministic Hurricane - Kriging inundationDuration diff --git a/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py b/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py index c10beae53..efb14356d 100755 --- a/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py +++ b/tests/pyincore/analyses/roaddamage/test_roaddamage_w_hazard_obj.py @@ -75,7 +75,7 @@ def run_with_base_class(): # test galveston hurricane road failure # road inventory for Galveston island - road_dataset_id = "60ba5ee94219ee25a56b6999" + road_dataset_id = "664e5812efdc9f1ed5dc2f7f" # road damage by hurricane inundation mapping mapping_id = "60ba583b1f2b7d4a916faf03" # Galveston Deterministic Hurricane - Kriging inundationDuration From 5fe7f6a87f328a54b1f57c4d47fd8817480c8233 Mon Sep 17 00:00:00 2001 From: YONG WOOK KIM Date: Tue, 4 Jun 2024 16:53:33 -0500 Subject: [PATCH 11/17] Add Google Analytics to pyincore documentation (#548) * Add Google Analytics to pyincore documentation * modified conf.py * added copy analytics.js in Dockerfile * Fixed typo * modified Dockerfile * modified index.rst * modified conf.py * added js in conf.py * changed the link to analytics.js * changed the position of the script * added insert ga python script * modified doc.yaml * modified requirements.txt's location in yaml * modified github action to install sphinx * modified github action for the location of requirements.txt * modified github action to install sphinx rtd_theme * moved the location of ga head script * modified script location * insert bs4 * insert print out statments in the script to see how it goes * added more logs in python script * added more logs in yaml * removed copy html part for test * modified html copied directory * make src/docs/html folder * make dockerfile to copy * print out modified html * modified dockerfile to copy not from builder * moved copy section from github action * changed to use the secret key for GA key * removed unnecessary lines * Updated changelog * changed to use configmap * revert gitignore back * modified Dockerfile * Removed unused comment --- CHANGELOG.md | 5 ++++ Dockerfile | 5 ++++ config/config.json | 3 ++ config/googleAnalytics.js | 31 ++++++++++++++++++++ docs/source/conf.py | 2 +- docs/source/index.rst | 3 +- docs/source/insert_ga_to_header.py | 46 ++++++++++++++++++++++++++++++ 7 files changed, 93 insertions(+), 2 deletions(-) create mode 100644 config/config.json create mode 100644 config/googleAnalytics.js create mode 100644 docs/source/insert_ga_to_header.py diff --git a/CHANGELOG.md b/CHANGELOG.md index c1ed2536f..cb106ff02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Unnecessary dependency in setup.py [#519](https://github.com/IN-CORE/pyincore/issues/519) +## [Unreleased] + +### Added +- Google Analytics to the documentation site [#547](https://github.com/IN-CORE/pyincore/issues/547) + ## [1.18.0] - 2024-04-03 ### Added diff --git a/Dockerfile b/Dockerfile index 284adc242..52cb6340e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,6 +11,7 @@ COPY requirements.txt . ENV PATH "$MAMBA_ROOT_PREFIX/bin:$PATH" RUN micromamba install -y -n base -c conda-forge \ + beautifulsoup4 \ sphinx sphinx_rtd_theme \ -f requirements.txt @@ -18,9 +19,13 @@ RUN micromamba install -y -n base -c conda-forge \ COPY . ./ RUN sphinx-build -v -b html docs/source docs/build +# Run the insert_ga_to_header.py script to insert Google Analytics code +RUN python /src/docs/source/insert_ga_to_header.py + # ---------------------------------------------------------------------- # Building actual container # ---------------------------------------------------------------------- FROM nginx COPY --from=builder /src/docs/build/ /usr/share/nginx/html/doc/pyincore/ +COPY config /usr/share/nginx/html/doc/pyincore/config diff --git a/config/config.json b/config/config.json new file mode 100644 index 000000000..279dd30ca --- /dev/null +++ b/config/config.json @@ -0,0 +1,3 @@ +{ + "GA_KEY": "Test-Google-Analytics-Key-Replace-Me" +} \ No newline at end of file diff --git a/config/googleAnalytics.js b/config/googleAnalytics.js new file mode 100644 index 000000000..6e43a7e93 --- /dev/null +++ b/config/googleAnalytics.js @@ -0,0 +1,31 @@ +// analytics.js +(function() { + // Fetch the runtime configuration + fetch('config/config.json') + .then(response => { + if (!response.ok) { + throw new Error('Configuration file not found'); + } + return response.json(); + }) + .then(config => { + if (!config.GA_KEY) { + throw new Error('GA_KEY is missing in the configuration'); + } + + // Create the script tag for Google Tag Manager + const scriptTag = document.createElement('script'); + scriptTag.async = true; + scriptTag.src = `https://www.googletagmanager.com/gtag/js?id=${config.GA_KEY}`; + document.head.appendChild(scriptTag); + + // Initialize Google Analytics + window.dataLayer = window.dataLayer || []; + + function gtag() { dataLayer.push(arguments); } + + gtag('js', new Date()); + gtag('config', config.GA_KEY); + }) + .catch(error => console.warn('GA setup skipped:', error.message)); +})(); \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 70e559d8e..5cd8c1f43 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -237,4 +237,4 @@ # -- Options for intersphinx extension --------------------------------------- # Example configuration for inter sphinx: refer to the Python standard library. -# intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} +# intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index b69d992c6..6d01973f8 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -18,4 +18,5 @@ To **update** pyIncore run the following command: :name: mastertoc mod_top - refs \ No newline at end of file + refs + diff --git a/docs/source/insert_ga_to_header.py b/docs/source/insert_ga_to_header.py new file mode 100644 index 000000000..94e7f4fd5 --- /dev/null +++ b/docs/source/insert_ga_to_header.py @@ -0,0 +1,46 @@ +import os +from bs4 import BeautifulSoup + +# Directory containing the built HTML files +build_dir = "docs/build" + +# Google Analytics code snippet to insert into the HTML files +ga_code = f""" + +""" + +# Loop through each HTML file in the build directory +for filename in os.listdir(build_dir): + if filename.endswith(".html"): + filepath = os.path.join(build_dir, filename) + print(f"Processing file: {filepath}") + + # Read the content of the HTML file + with open(filepath, "r", encoding="utf-8") as file: + html_content = file.read() + + # Parse HTML content using BeautifulSoup + soup = BeautifulSoup(html_content, "html.parser") + + # Find the tag and insert the Google Analytics code before it + head_tag = soup.find("head") + if head_tag: + print(f"Found tag in {filename}:") + print("Inserting Google Analytics code...") + head_tag.insert(0, BeautifulSoup(ga_code, "html.parser")) + + # Write the modified HTML content back to the file + with open(filepath, "w", encoding="utf-8") as file: + file.write(str(soup)) + +print("Google Analytics code insertion completed.") From 16932f8d451d54721deb10298278aa4a5ceb40fd Mon Sep 17 00:00:00 2001 From: Vismayak Mohanarajan Date: Wed, 5 Jun 2024 14:20:39 -0500 Subject: [PATCH 12/17] Buyout Model Implementation (#553) * Buyout Model Implementation * Update CHANGELOG.md * Suggested fixes * add to sphinx doc and pep8 reformat --------- Co-authored-by: Chen Wang --- CHANGELOG.md | 1 + docs/source/modules.rst | 5 + pyincore/analyses/buyoutdecision/__init__.py | 7 + .../analyses/buyoutdecision/buyoutdecision.py | 177 ++++++++++++++++++ .../buyoutdecision/test_buyoutdecision.py | 46 +++++ 5 files changed, 236 insertions(+) create mode 100644 pyincore/analyses/buyoutdecision/__init__.py create mode 100644 pyincore/analyses/buyoutdecision/buyoutdecision.py create mode 100644 tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py diff --git a/CHANGELOG.md b/CHANGELOG.md index cb106ff02..15bfa29e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) - Copyrights to transportation recovery analysis [#579](https://github.com/IN-CORE/pyincore/issues/579) +- Buyout Model Analyses [#539](https://github.com/IN-CORE/pyincore/issues/539) ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) diff --git a/docs/source/modules.rst b/docs/source/modules.rst index afe10f545..e8b9bf809 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -35,6 +35,11 @@ analyses/buildingportfolio .. autoclass:: pyincore.analyses.buildingportfolio.recovery.BuildingData :members: +analyses/buyoutdecision +======================= +.. autoclass:: buyoutdecision.buyoutdecision.BuyoutDecision + :members: + analyses/capitalshocks ====================== diff --git a/pyincore/analyses/buyoutdecision/__init__.py b/pyincore/analyses/buyoutdecision/__init__.py new file mode 100644 index 000000000..390600fff --- /dev/null +++ b/pyincore/analyses/buyoutdecision/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2024 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +from pyincore.analyses.buyoutdecision.buyoutdecision import BuyoutDecision diff --git a/pyincore/analyses/buyoutdecision/buyoutdecision.py b/pyincore/analyses/buyoutdecision/buyoutdecision.py new file mode 100644 index 000000000..0fa352f77 --- /dev/null +++ b/pyincore/analyses/buyoutdecision/buyoutdecision.py @@ -0,0 +1,177 @@ +# Copyright (c) 2024 University of Illinois and others. All rights reserved. + +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import pandas as pd +from pyincore import BaseAnalysis +from pyincore.utils.dataprocessutil import DataProcessUtil + + +class BuyoutDecision(BaseAnalysis): + """A framework to select households for buyout based on past and future flood damaged. + + Args: + incore_client(IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + super(BuyoutDecision, self).__init__(incore_client) + + def run(self): + # Get input parameters + fema_buyout_cap = self.get_parameter('fema_buyout_cap') + residential_archetypes = self.get_parameter('residential_archetypes') + + # Get input datasets + past_building_damage = self.get_input_dataset('past_building_damage').get_dataframe_from_csv(low_memory=False) + future_building_damage = self.get_input_dataset('future_building_damage').get_dataframe_from_csv( + low_memory=False) + + building_inventory = self.get_input_dataset('buildings').get_dataframe_from_shapefile() + + hua = (self.get_input_dataset('housing_unit_allocation').get_dataframe_from_csv(low_memory=False)) + pop_dislocation = self.get_input_dataset('population_dislocation').get_dataframe_from_csv(low_memory=False) + + buyout_decision_df = self.buyout_decision(past_building_damage, future_building_damage, building_inventory, hua, + pop_dislocation, fema_buyout_cap, residential_archetypes) + # Create the result dataset + self.set_result_csv_data("result", buyout_decision_df, self.get_parameter("result_name") + "_loss", + "dataframe") + + def buyout_decision(self, past_building_damage, future_building_damage, building_inventory, hua, pop_dislocation, + fema_buyout_cap, residential_archetpyes): + """Select households for buyout based on past and future flood damaged. + + Args: + past_building_damage (DataFrame): Past building damage. + future_building_damage (DataFrame): Future event building damage. + building_inventory (DataFrame): Building inventory. + hua (DataFrame): Housing unit allocation. + pop_dislocation (DataFrame): Population dislocation from past hazard event. + fema_buyout_cap (float): FEMA buyout cap. + residential_archetpyes (list): Residential archetypes. + + Returns: + buyout_decision_df (DataFrame): A dataframe with buyout decision for each household. + """ + + past_building_max_damage = DataProcessUtil.get_max_damage_state(past_building_damage) + future_building_max_damage = DataProcessUtil.get_max_damage_state(future_building_damage) + + # Criterion 1: Filter only residential buildings with damage state DS3 from past building damage + buyout_inventory = pd.merge(building_inventory, past_building_max_damage, on='guid', how='outer') + buyout_inventory = buyout_inventory[buyout_inventory['arch_wind'].isin(residential_archetpyes) + & (buyout_inventory['max_state'] == 'DS_3')] + buyout_inventory.rename(columns={'max_state': 'max_state_past_damage'}, inplace=True) + + # Criterion 2: Filter only residential buildings with damage state DS3 from predicted future building damage + buyout_inventory = pd.merge(buyout_inventory, future_building_max_damage, on='guid', how='inner') + buyout_inventory = buyout_inventory[buyout_inventory['max_state'] == 'DS_3'] + buyout_inventory.rename(columns={'max_state': 'max_state_future_damage'}, inplace=True) + + # Criterion 3: Fall within the FEMA buyout cap + buyout_inventory = buyout_inventory[buyout_inventory['appr_bldg'] <= fema_buyout_cap] + buyout_inventory = buyout_inventory[ + ["guid", "appr_bldg", "max_state_future_damage", "max_state_past_damage", "geometry"]] + + # Criterion 4: Use HUA to filter out buildings with 0 occupants + buyout_inventory = pd.merge(buyout_inventory, hua, on='guid', how='left') + buyout_inventory = buyout_inventory[(buyout_inventory['numprec'] != 0) & (~buyout_inventory['numprec'].isna())] + + # Removing any rows with NAN values in column "Race" + buyout_inventory = buyout_inventory.dropna(subset=['race']) + + # Merging with population dislocation + buyout_inventory = pd.merge(buyout_inventory, pop_dislocation[['huid', 'dislocated']], on='huid', how='left') + + # Create a new column showing the appraisal value of each building ('appr_bldg' divided by the number of times + # a guid is repeated) + # For the instances that a structure has more than one housing units. + buyout_inventory['count'] = buyout_inventory.groupby('guid')['guid'].transform('count') + buyout_inventory['housing_unit_appraisal_value'] = buyout_inventory['appr_bldg'] / buyout_inventory['count'] + + # Cleaning the dataframe + buyout_inventory.drop(['blockid', 'bgid', 'tractid', 'FIPScounty', + 'gqtype', 'BLOCKID10_str', 'placeNAME10', 'geometry_y'], axis=1, inplace=True) + buyout_inventory.rename(columns={'appr_bldg': 'building_appraisal_value', 'ownershp': 'ownership', + 'dislocated_combined_dmg': 'dislocated', 'count': 'number_of_housing_units', + 'geometry_x': 'geometry'}, + inplace=True) + buyout_inventory = buyout_inventory[ + ['guid', 'huid', 'building_appraisal_value', 'housing_unit_appraisal_value', 'geometry', + 'number_of_housing_units', 'numprec', 'ownership', 'race', 'hispan', 'family', 'vacancy', 'incomegroup', + 'hhinc', 'randincome', 'poverty', 'huestimate', 'dislocated', 'max_state_future_damage', + 'max_state_past_damage', 'x', 'y', ]] + + return buyout_inventory + + def get_spec(self): + return { + "name": "buyout-decision", + "description": "Buyout decision framework", + "input_parameters": [ + { + 'id': 'fema_buyout_cap', + 'required': True, + 'description': 'FEMA buyout cap', + 'type': float, + }, + { + 'id': 'residential_archetypes', + 'required': True, + 'description': 'Residential archetypes', + 'type': list, + }, + { + 'id': 'result_name', + 'required': True, + 'description': 'Result name', + 'type': str, + } + ], + "input_datasets": [ + { + 'id': 'past_building_damage', + 'required': True, + 'description': 'Building Damage Results', + 'type': ['ergo:buildingDamageVer6'], + }, + { + 'id': 'future_building_damage', + 'required': True, + 'description': 'Building Damage Results', + 'type': ['ergo:buildingDamageVer6'], + }, + { + 'id': 'buildings', + 'required': True, + 'description': 'Building Inventory', + 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', + 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + }, + { + 'id': 'housing_unit_allocation', + 'required': True, + 'description': 'A csv file with the merged dataset of the inputs, aka Probabilistic' + 'House Unit Allocation', + 'type': ['incore:housingUnitAllocation'] + }, + { + 'id': 'population_dislocation', + 'required': True, + 'description': 'Population Dislocation from past hazard event', + 'type': ['incore:popDislocation'] + } + ], + "output_datasets": [ + { + 'id': 'result', + 'label': 'Buyout Decision Results', + 'description': 'Buyout Decision Results', + 'type': ['incore:buyoutDecision'] + } + ] + } diff --git a/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py b/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py new file mode 100644 index 000000000..843f65ca0 --- /dev/null +++ b/tests/pyincore/analyses/buyoutdecision/test_buyoutdecision.py @@ -0,0 +1,46 @@ +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.buyoutdecision import BuyoutDecision +from pyincore import IncoreClient, Dataset +import pyincore.globals as pyglobals +import pandas as pd + + +def BuyoutDecisionTest(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + past_building_damage_id = "6632d2605da5fd22b268511f" + future_building_damage_id = "6632d45b5da5fd22b2685136" + past_pop_dislocation_id = "6632d5205da5fd22b26878bb" + + hua_id = "64227016b18d026e7c80d2bc" + + buildings_id = "63ff69a96d3b2a308baaca12" + + fema_buyout_cap = 321291.600 + residential_archetypes = [1, 2, 3, 4, 5, 17] + + buyout_decision = BuyoutDecision(client) + buyout_decision.set_parameter("fema_buyout_cap", fema_buyout_cap) + buyout_decision.set_parameter("residential_archetypes", residential_archetypes) + buyout_decision.set_parameter("result_name", "galveston_buyout") + + buyout_decision.load_remote_input_dataset("buildings", buildings_id) + buyout_decision.load_remote_input_dataset("housing_unit_allocation", hua_id) + buyout_decision.load_remote_input_dataset("past_building_damage", past_building_damage_id) + buyout_decision.load_remote_input_dataset("future_building_damage", future_building_damage_id) + buyout_decision.load_remote_input_dataset("population_dislocation", past_pop_dislocation_id) + + buyout_decision.run_analysis() + + result = buyout_decision.get_output_dataset("result") + result_df = result.get_dataframe_from_csv() + print(result_df.head()) + print(len(result_df)) + + +if __name__ == "__main__": + BuyoutDecisionTest() From 07c4dba405e22b5b76fee8c795b1f52b5b248388 Mon Sep 17 00:00:00 2001 From: Ya-Lan Yang <63822845+ylyangtw@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:35:22 -0500 Subject: [PATCH 13/17] Rename building damage to building structural damage (#583) * rename * match length of = --- CHANGELOG.md | 1 + docs/source/modules.rst | 9 + .../analyses/buildingdamage/buildingdamage.py | 427 +---------------- .../buildingstructuraldamage/__init__.py | 9 + .../buildingstructuraldamage.py | 430 ++++++++++++++++++ .../buildingstructuraldamage/buildingutil.py | 17 + .../test_buildingstructuraldamage.py | 115 +++++ .../test_buildingstructuraldamage_legacy.py | 125 +++++ ...st_buildingstructuraldamage_multihazard.py | 38 ++ .../test_buildingstructuraldamage_offline.py | 74 +++ .../test_buildingstructuraldamage_retrofit.py | 58 +++ ...buildingstructuraldamage_w_local_hazard.py | 82 ++++ .../test_slc_buildingstructuraldamage.py | 36 ++ 13 files changed, 1002 insertions(+), 419 deletions(-) create mode 100644 pyincore/analyses/buildingstructuraldamage/__init__.py create mode 100755 pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py create mode 100644 pyincore/analyses/buildingstructuraldamage/buildingutil.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py create mode 100644 tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 15bfa29e7..403eef3c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Update Non-structural Building Damage to support flood [#562](https://github.com/IN-CORE/pyincore/issues/562) - Update flood input to non-structural building damage for combined wind-wave-surge building [#566](https://github.com/IN-CORE/pyincore/issues/566) - Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) +- Rename building damage to building structural damage [#561](https://github.com/IN-CORE/pyincore/issues/561) ### Added - Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) diff --git a/docs/source/modules.rst b/docs/source/modules.rst index e8b9bf809..e2a9434a0 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -14,6 +14,15 @@ analyses/buildingdamage :members: .. autoclass:: buildingdamage.buildingutil.BuildingUtil :members: +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`buildingstructuraldamage.BuildingStructuralDamage` instead. + +analyses/buildingstructuraldamage +================================= +.. autoclass:: buildingstructuraldamage.buildingstructuraldamage.BuildingStructuralDamage + :members: +.. autoclass:: buildingstructuraldamage.buildingutil.BuildingUtil + :members: analyses/buildingeconloss ========================= diff --git a/pyincore/analyses/buildingdamage/buildingdamage.py b/pyincore/analyses/buildingdamage/buildingdamage.py index e776e71c7..807aea5ac 100755 --- a/pyincore/analyses/buildingdamage/buildingdamage.py +++ b/pyincore/analyses/buildingdamage/buildingdamage.py @@ -4,427 +4,16 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ +from deprecated.sphinx import deprecated +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage -import concurrent.futures -from itertools import repeat - -from pyincore import BaseAnalysis, HazardService, \ - FragilityService, AnalysisUtil, GeoUtil -from pyincore.analyses.buildingdamage.buildingutil import BuildingUtil -from pyincore.models.dfr3curve import DFR3Curve -from pyincore.utils.datasetutil import DatasetUtil - - -class BuildingDamage(BaseAnalysis): - """Building Damage Analysis calculates the probability of building damage based on - different hazard type such as earthquake, tsunami, and tornado. - - Args: - incore_client (IncoreClient): Service authentication. - - """ - +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use BuildingStructuralDamage instead.") +class BuildingDamage(): def __init__(self, incore_client): - self.hazardsvc = HazardService(incore_client) - self.fragilitysvc = FragilityService(incore_client) - - super(BuildingDamage, self).__init__(incore_client) - - def run(self): - """Executes building damage analysis.""" - - # Building dataset - bldg_dataset = self.get_input_dataset("buildings") - - # building retrofit strategy - retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") - - # mapping - dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") - - # Update the building inventory dataset if applicable - bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(bldg_dataset, - add_info_dataset=retrofit_strategy_dataset, - mapping=dfr3_mapping_set) - - bldg_set = bldg_dataset.get_inventory_reader() - - # Accommodating to multi-hazard - hazards = [] # hazard objects - hazard_object = self.get_input_hazard("hazard") - - # To use local hazard - if hazard_object is not None: - # Right now only supports single hazard for local hazard object - hazard_types = [hazard_object.hazard_type] - hazard_dataset_ids = [hazard_object.id] - hazards = [hazard_object] - # To use remote hazard - elif self.get_parameter("hazard_id") is not None and self.get_parameter("hazard_type") is not None: - hazard_dataset_ids = self.get_parameter("hazard_id").split("+") - hazard_types = self.get_parameter("hazard_type").split("+") - for hazard_type, hazard_dataset_id in zip(hazard_types, hazard_dataset_ids): - hazards.append(BaseAnalysis._create_hazard_object(hazard_type, hazard_dataset_id, self.hazardsvc)) - else: - raise ValueError("Either hazard object or hazard id + hazard type must be provided") - - # Get Fragility key - fragility_key = self.get_parameter("fragility_key") - if fragility_key is None: - fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if 'tsunami' in hazard_types else \ - BuildingUtil.DEFAULT_FRAGILITY_KEY - self.set_parameter("fragility_key", fragility_key) - - user_defined_cpu = 1 - - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: - user_defined_cpu = self.get_parameter("num_cpu") - - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu) - - avg_bulk_input_size = int(len(bldg_set) / num_workers) - inventory_args = [] - count = 0 - inventory_list = list(bldg_set) - while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) - count += avg_bulk_input_size - - (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazards), - repeat(hazard_types), - repeat(hazard_dataset_ids)) - - self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("damage_result", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") - - # clean up temp folder if applicable - if tmpdirname is not None: - bldg_dataset.delete_temp_folder() - - return True - - def building_damage_concurrent_future(self, function_name, parallelism, *args): - """Utilizes concurrent.future module. - - Args: - function_name (function): The function to be parallelized. - parallelism (int): Number of workers in parallelization. - *args: All the arguments in order to pass into parameter function_name. - - Returns: - list: A list of ordered dictionaries with building damage values and other data/metadata. - - """ - output_ds = [] - output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: - for ret1, ret2 in executor.map(function_name, *args): - output_ds.extend(ret1) - output_dmg.extend(ret2) - - return output_ds, output_dmg - - def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, hazard_dataset_ids): - """Run analysis for multiple buildings. - - Args: - buildings (list): Multiple buildings from input inventory set. - hazards (list): List of hazard objects. - hazard_types (list): List of Hazard type, either earthquake, tornado, or tsunami. - hazard_dataset_ids (list): List of id of the hazard exposure. - - Returns: - list: A list of ordered dictionaries with building damage values and other data/metadata. + self._delegate = BuildingStructuralDamage(incore_client) + def __getattr__(self, name): """ - - fragility_key = self.get_parameter("fragility_key") - fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - fragility_key) - use_liquefaction = False - liquefaction_resp = None - # Get geology dataset id containing liquefaction susceptibility - geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") - - multihazard_vals = {} - adjust_demand_types_mapping = {} - - for hazard, hazard_type, hazard_dataset_id in zip(hazards, hazard_types, hazard_dataset_ids): - # get allowed demand types for the hazard type - allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( - hazard_type)] - - # Liquefaction - if hazard_type == "earthquake" and self.get_parameter("use_liquefaction") is not None: - use_liquefaction = self.get_parameter("use_liquefaction") - - values_payload = [] - values_payload_liq = [] # for liquefaction, if used - - # Pre-filter buildings that are in fragility_sets to reduce the number of iterations - mapped_buildings = [b for b in buildings if b["id"] in fragility_sets] - unmapped_buildings = [b for b in buildings if b["id"] not in fragility_sets] - - for b in mapped_buildings: - bldg_id = b["id"] - location = GeoUtil.get_location(b) - loc = str(location.y) + "," + str(location.x) - demands, units, adjusted_to_original = \ - AnalysisUtil.get_hazard_demand_types_units(b, - fragility_sets[bldg_id], - hazard_type, - allowed_demand_types) - adjust_demand_types_mapping.update(adjusted_to_original) - value = { - "demands": demands, - "units": units, - "loc": loc - } - values_payload.append(value) - - if use_liquefaction and geology_dataset_id is not None: - value_liq = { - "demands": [""], - "units": [""], - "loc": loc - } - values_payload_liq.append(value_liq) - - hazard_vals = hazard.read_hazard_values(values_payload, self.hazardsvc) - - # map demand type from payload to response - # worst code I have ever written - # e.g. 1.04 Sec Sa --> 1.04 SA --> 1.0 SA - for payload, response in zip(values_payload, hazard_vals): - adjust_demand_types_mapping.update({ - response_demand: adjust_demand_types_mapping[payload_demand] - for payload_demand, response_demand in zip(payload["demands"], response["demands"]) - }) - - # record hazard value for each hazard type for later calcu - multihazard_vals[hazard_type] = hazard_vals - - # Check if liquefaction is applicable - if hazard_type == "earthquake" and use_liquefaction and geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, - values_payload_liq) - - # not needed anymore as they are already split into mapped and unmapped - del buildings - - ds_results = [] - damage_results = [] - - i = 0 - for b in mapped_buildings: - ds_result = dict() - damage_result = dict() - dmg_probability = dict() - dmg_interval = dict() - b_id = b["id"] - selected_fragility_set = fragility_sets[b_id] - ground_failure_prob = None - - # TODO: Once all fragilities are migrated to new format, we can remove this condition - if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): - # Supports multiple hazard and multiple demand types in same fragility - hval_dict = dict() - b_multihaz_vals = dict() - b_demands = dict() - b_units = dict() - for hazard_type in hazard_types: - b_haz_vals = AnalysisUtil.update_precision_of_lists(multihazard_vals[hazard_type][i][ - "hazardValues"]) - b_demands[hazard_type] = multihazard_vals[hazard_type][i]["demands"] - b_units[hazard_type] = multihazard_vals[hazard_type][i]["units"] - b_multihaz_vals[hazard_type] = b_haz_vals - # To calculate damage, use demand type name from fragility that will be used in the expression, - # instead of using what the hazard service returns. There could be a difference "SA" in DFR3 vs - # "1.07 SA" from hazard - j = 0 - for adjusted_demand_type in multihazard_vals[hazard_type][i]["demands"]: - d = adjust_demand_types_mapping[adjusted_demand_type] - hval_dict[d] = b_haz_vals[j] - j += 1 - - # catch any of the hazard values error - hazard_values_errors = False - for hazard_type in hazard_types: - hazard_values_errors = hazard_values_errors or AnalysisUtil.do_hazard_values_have_errors( - b_multihaz_vals[hazard_type]) - - if not hazard_values_errors: - building_args = selected_fragility_set.construct_expression_args_from_inventory(b) - - building_period = selected_fragility_set.fragility_curves[0].get_building_period( - selected_fragility_set.curve_parameters, **building_args) - - dmg_probability = selected_fragility_set.calculate_limit_state( - hval_dict, **building_args, period=building_period) - - if use_liquefaction and geology_dataset_id is not None and liquefaction_resp is not None: - ground_failure_prob = liquefaction_resp[i][BuildingUtil.GROUND_FAILURE_PROB] - dmg_probability = AnalysisUtil.update_precision_of_dicts( - AnalysisUtil.adjust_damage_for_liquefaction(dmg_probability, ground_failure_prob)) - - dmg_interval = selected_fragility_set.calculate_damage_interval( - dmg_probability, hazard_type="+".join(hazard_types), inventory_type="building") - else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") - - ds_result['guid'] = b['properties']['guid'] - damage_result['guid'] = b['properties']['guid'] - - ds_result.update(dmg_probability) - ds_result.update(dmg_interval) - - # determine expose from multiple hazard - haz_expose = False - for hazard_type in hazard_types: - haz_expose = haz_expose or AnalysisUtil.get_exposure_from_hazard_values(b_multihaz_vals[ - hazard_type], hazard_type) - ds_result['haz_expose'] = haz_expose - - damage_result['fragility_id'] = selected_fragility_set.id - damage_result['demandtype'] = b_demands - damage_result['demandunits'] = b_units - damage_result['hazardval'] = b_multihaz_vals - - if use_liquefaction and geology_dataset_id is not None: - damage_result[BuildingUtil.GROUND_FAILURE_PROB] = ground_failure_prob - - ds_results.append(ds_result) - damage_results.append(damage_result) - i += 1 - - for b in unmapped_buildings: - ds_result = dict() - damage_result = dict() - ds_result['guid'] = b['properties']['guid'] - damage_result['guid'] = b['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtype'] = None - damage_result['demandunits'] = None - damage_result['hazardval'] = None - - ds_results.append(ds_result) - damage_results.append(damage_result) - - return ds_results, damage_results - - def get_spec(self): - """Get specifications of the building damage analysis. - - Returns: - obj: A JSON object of specifications of the building damage analysis. - + Delegate attribute access to the BuildingStructuralDamage instance. """ - return { - 'name': 'building-damage', - 'description': 'building damage analysis', - 'input_parameters': [ - { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str - }, - { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake)', - 'type': str - }, - { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str - }, - { - 'id': 'fragility_key', - 'required': False, - 'description': 'Fragility key to use in mapping dataset', - 'type': str - }, - { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool - }, - { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool - }, - { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - }, - { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the tornado hazard value', - 'type': int - }, - { - 'id': 'liquefaction_geology_dataset_id', - 'required': False, - 'description': 'Geology dataset id', - 'type': str, - } - ], - 'input_hazards': [ - { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"] - }, - ], - 'input_datasets': [ - { - 'id': 'buildings', - 'required': True, - 'description': 'Building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], - }, - { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - }, - { - 'id': 'retrofit_strategy', - 'required': False, - 'description': 'Building retrofit strategy that contains guid and retrofit method', - 'type': ['incore:retrofitStrategy'] - } - ], - 'output_datasets': [ - { - 'id': 'ds_result', - 'parent_type': 'buildings', - 'description': 'CSV file of damage states for building structural damage', - 'type': 'ergo:buildingDamageVer6' - }, - { - 'id': 'damage_result', - 'parent_type': 'buildings', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:buildingDamageSupplement' - } - ] - } + return getattr(self._delegate, name) \ No newline at end of file diff --git a/pyincore/analyses/buildingstructuraldamage/__init__.py b/pyincore/analyses/buildingstructuraldamage/__init__.py new file mode 100644 index 000000000..e46d58b8d --- /dev/null +++ b/pyincore/analyses/buildingstructuraldamage/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.buildingstructuraldamage.buildingutil import BuildingUtil +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage diff --git a/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py b/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py new file mode 100755 index 000000000..ef3ef2993 --- /dev/null +++ b/pyincore/analyses/buildingstructuraldamage/buildingstructuraldamage.py @@ -0,0 +1,430 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +import concurrent.futures +from itertools import repeat + +from pyincore import BaseAnalysis, HazardService, \ + FragilityService, AnalysisUtil, GeoUtil +from pyincore.analyses.buildingstructuraldamage.buildingutil import BuildingUtil +from pyincore.models.dfr3curve import DFR3Curve +from pyincore.utils.datasetutil import DatasetUtil + + +class BuildingStructuralDamage(BaseAnalysis): + """Building Damage Analysis calculates the probability of building damage based on + different hazard type such as earthquake, tsunami, and tornado. + + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + self.hazardsvc = HazardService(incore_client) + self.fragilitysvc = FragilityService(incore_client) + + super(BuildingStructuralDamage, self).__init__(incore_client) + + def run(self): + """Executes building damage analysis.""" + + # Building dataset + bldg_dataset = self.get_input_dataset("buildings") + + # building retrofit strategy + retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") + + # mapping + dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") + + # Update the building inventory dataset if applicable + bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(bldg_dataset, + add_info_dataset=retrofit_strategy_dataset, + mapping=dfr3_mapping_set) + + bldg_set = bldg_dataset.get_inventory_reader() + + # Accommodating to multi-hazard + hazards = [] # hazard objects + hazard_object = self.get_input_hazard("hazard") + + # To use local hazard + if hazard_object is not None: + # Right now only supports single hazard for local hazard object + hazard_types = [hazard_object.hazard_type] + hazard_dataset_ids = [hazard_object.id] + hazards = [hazard_object] + # To use remote hazard + elif self.get_parameter("hazard_id") is not None and self.get_parameter("hazard_type") is not None: + hazard_dataset_ids = self.get_parameter("hazard_id").split("+") + hazard_types = self.get_parameter("hazard_type").split("+") + for hazard_type, hazard_dataset_id in zip(hazard_types, hazard_dataset_ids): + hazards.append(BaseAnalysis._create_hazard_object(hazard_type, hazard_dataset_id, self.hazardsvc)) + else: + raise ValueError("Either hazard object or hazard id + hazard type must be provided") + + # Get Fragility key + fragility_key = self.get_parameter("fragility_key") + if fragility_key is None: + fragility_key = BuildingUtil.DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY if 'tsunami' in hazard_types else \ + BuildingUtil.DEFAULT_FRAGILITY_KEY + self.set_parameter("fragility_key", fragility_key) + + user_defined_cpu = 1 + + if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + user_defined_cpu = self.get_parameter("num_cpu") + + num_workers = AnalysisUtil.determine_parallelism_locally(self, len(bldg_set), user_defined_cpu) + + avg_bulk_input_size = int(len(bldg_set) / num_workers) + inventory_args = [] + count = 0 + inventory_list = list(bldg_set) + while count < len(inventory_list): + inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + count += avg_bulk_input_size + + (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazards), + repeat(hazard_types), + repeat(hazard_dataset_ids)) + + self.set_result_csv_data("ds_result", ds_results, name=self.get_parameter("result_name")) + self.set_result_json_data("damage_result", + damage_results, + name=self.get_parameter("result_name") + "_additional_info") + + # clean up temp folder if applicable + if tmpdirname is not None: + bldg_dataset.delete_temp_folder() + + return True + + def building_damage_concurrent_future(self, function_name, parallelism, *args): + """Utilizes concurrent.future module. + + Args: + function_name (function): The function to be parallelized. + parallelism (int): Number of workers in parallelization. + *args: All the arguments in order to pass into parameter function_name. + + Returns: + list: A list of ordered dictionaries with building damage values and other data/metadata. + + """ + output_ds = [] + output_dmg = [] + with concurrent.futures.ProcessPoolExecutor(max_workers=parallelism) as executor: + for ret1, ret2 in executor.map(function_name, *args): + output_ds.extend(ret1) + output_dmg.extend(ret2) + + return output_ds, output_dmg + + def building_damage_analysis_bulk_input(self, buildings, hazards, hazard_types, hazard_dataset_ids): + """Run analysis for multiple buildings. + + Args: + buildings (list): Multiple buildings from input inventory set. + hazards (list): List of hazard objects. + hazard_types (list): List of Hazard type, either earthquake, tornado, or tsunami. + hazard_dataset_ids (list): List of id of the hazard exposure. + + Returns: + list: A list of ordered dictionaries with building damage values and other data/metadata. + + """ + + fragility_key = self.get_parameter("fragility_key") + fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, + fragility_key) + use_liquefaction = False + liquefaction_resp = None + # Get geology dataset id containing liquefaction susceptibility + geology_dataset_id = self.get_parameter("liquefaction_geology_dataset_id") + + multihazard_vals = {} + adjust_demand_types_mapping = {} + + for hazard, hazard_type, hazard_dataset_id in zip(hazards, hazard_types, hazard_dataset_ids): + # get allowed demand types for the hazard type + allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( + hazard_type)] + + # Liquefaction + if hazard_type == "earthquake" and self.get_parameter("use_liquefaction") is not None: + use_liquefaction = self.get_parameter("use_liquefaction") + + values_payload = [] + values_payload_liq = [] # for liquefaction, if used + + # Pre-filter buildings that are in fragility_sets to reduce the number of iterations + mapped_buildings = [b for b in buildings if b["id"] in fragility_sets] + unmapped_buildings = [b for b in buildings if b["id"] not in fragility_sets] + + for b in mapped_buildings: + bldg_id = b["id"] + location = GeoUtil.get_location(b) + loc = str(location.y) + "," + str(location.x) + demands, units, adjusted_to_original = \ + AnalysisUtil.get_hazard_demand_types_units(b, + fragility_sets[bldg_id], + hazard_type, + allowed_demand_types) + adjust_demand_types_mapping.update(adjusted_to_original) + value = { + "demands": demands, + "units": units, + "loc": loc + } + values_payload.append(value) + + if use_liquefaction and geology_dataset_id is not None: + value_liq = { + "demands": [""], + "units": [""], + "loc": loc + } + values_payload_liq.append(value_liq) + + hazard_vals = hazard.read_hazard_values(values_payload, self.hazardsvc) + + # map demand type from payload to response + # worst code I have ever written + # e.g. 1.04 Sec Sa --> 1.04 SA --> 1.0 SA + for payload, response in zip(values_payload, hazard_vals): + adjust_demand_types_mapping.update({ + response_demand: adjust_demand_types_mapping[payload_demand] + for payload_demand, response_demand in zip(payload["demands"], response["demands"]) + }) + + # record hazard value for each hazard type for later calcu + multihazard_vals[hazard_type] = hazard_vals + + # Check if liquefaction is applicable + if hazard_type == "earthquake" and use_liquefaction and geology_dataset_id is not None: + liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, geology_dataset_id, + values_payload_liq) + + # not needed anymore as they are already split into mapped and unmapped + del buildings + + ds_results = [] + damage_results = [] + + i = 0 + for b in mapped_buildings: + ds_result = dict() + damage_result = dict() + dmg_probability = dict() + dmg_interval = dict() + b_id = b["id"] + selected_fragility_set = fragility_sets[b_id] + ground_failure_prob = None + + # TODO: Once all fragilities are migrated to new format, we can remove this condition + if isinstance(selected_fragility_set.fragility_curves[0], DFR3Curve): + # Supports multiple hazard and multiple demand types in same fragility + hval_dict = dict() + b_multihaz_vals = dict() + b_demands = dict() + b_units = dict() + for hazard_type in hazard_types: + b_haz_vals = AnalysisUtil.update_precision_of_lists(multihazard_vals[hazard_type][i][ + "hazardValues"]) + b_demands[hazard_type] = multihazard_vals[hazard_type][i]["demands"] + b_units[hazard_type] = multihazard_vals[hazard_type][i]["units"] + b_multihaz_vals[hazard_type] = b_haz_vals + # To calculate damage, use demand type name from fragility that will be used in the expression, + # instead of using what the hazard service returns. There could be a difference "SA" in DFR3 vs + # "1.07 SA" from hazard + j = 0 + for adjusted_demand_type in multihazard_vals[hazard_type][i]["demands"]: + d = adjust_demand_types_mapping[adjusted_demand_type] + hval_dict[d] = b_haz_vals[j] + j += 1 + + # catch any of the hazard values error + hazard_values_errors = False + for hazard_type in hazard_types: + hazard_values_errors = hazard_values_errors or AnalysisUtil.do_hazard_values_have_errors( + b_multihaz_vals[hazard_type]) + + if not hazard_values_errors: + building_args = selected_fragility_set.construct_expression_args_from_inventory(b) + + building_period = selected_fragility_set.fragility_curves[0].get_building_period( + selected_fragility_set.curve_parameters, **building_args) + + dmg_probability = selected_fragility_set.calculate_limit_state( + hval_dict, **building_args, period=building_period) + + if use_liquefaction and geology_dataset_id is not None and liquefaction_resp is not None: + ground_failure_prob = liquefaction_resp[i][BuildingUtil.GROUND_FAILURE_PROB] + dmg_probability = AnalysisUtil.update_precision_of_dicts( + AnalysisUtil.adjust_damage_for_liquefaction(dmg_probability, ground_failure_prob)) + + dmg_interval = selected_fragility_set.calculate_damage_interval( + dmg_probability, hazard_type="+".join(hazard_types), inventory_type="building") + else: + raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue.") + + ds_result['guid'] = b['properties']['guid'] + damage_result['guid'] = b['properties']['guid'] + + ds_result.update(dmg_probability) + ds_result.update(dmg_interval) + + # determine expose from multiple hazard + haz_expose = False + for hazard_type in hazard_types: + haz_expose = haz_expose or AnalysisUtil.get_exposure_from_hazard_values(b_multihaz_vals[ + hazard_type], hazard_type) + ds_result['haz_expose'] = haz_expose + + damage_result['fragility_id'] = selected_fragility_set.id + damage_result['demandtype'] = b_demands + damage_result['demandunits'] = b_units + damage_result['hazardval'] = b_multihaz_vals + + if use_liquefaction and geology_dataset_id is not None: + damage_result[BuildingUtil.GROUND_FAILURE_PROB] = ground_failure_prob + + ds_results.append(ds_result) + damage_results.append(damage_result) + i += 1 + + for b in unmapped_buildings: + ds_result = dict() + damage_result = dict() + ds_result['guid'] = b['properties']['guid'] + damage_result['guid'] = b['properties']['guid'] + damage_result['fragility_id'] = None + damage_result['demandtype'] = None + damage_result['demandunits'] = None + damage_result['hazardval'] = None + + ds_results.append(ds_result) + damage_results.append(damage_result) + + return ds_results, damage_results + + def get_spec(self): + """Get specifications of the building damage analysis. + + Returns: + obj: A JSON object of specifications of the building damage analysis. + + """ + return { + 'name': 'building-damage', + 'description': 'building damage analysis', + 'input_parameters': [ + { + 'id': 'result_name', + 'required': True, + 'description': 'result dataset name', + 'type': str + }, + { + 'id': 'hazard_type', + 'required': False, + 'description': 'Hazard Type (e.g. earthquake)', + 'type': str + }, + { + 'id': 'hazard_id', + 'required': False, + 'description': 'Hazard ID', + 'type': str + }, + { + 'id': 'fragility_key', + 'required': False, + 'description': 'Fragility key to use in mapping dataset', + 'type': str + }, + { + 'id': 'use_liquefaction', + 'required': False, + 'description': 'Use liquefaction', + 'type': bool + }, + { + 'id': 'use_hazard_uncertainty', + 'required': False, + 'description': 'Use hazard uncertainty', + 'type': bool + }, + { + 'id': 'num_cpu', + 'required': False, + 'description': 'If using parallel execution, the number of cpus to request', + 'type': int + }, + { + 'id': 'seed', + 'required': False, + 'description': 'Initial seed for the tornado hazard value', + 'type': int + }, + { + 'id': 'liquefaction_geology_dataset_id', + 'required': False, + 'description': 'Geology dataset id', + 'type': str, + } + ], + 'input_hazards': [ + { + 'id': 'hazard', + 'required': False, + 'description': 'Hazard object', + 'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"] + }, + ], + 'input_datasets': [ + { + 'id': 'buildings', + 'required': True, + 'description': 'Building Inventory', + 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', + 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + }, + { + 'id': 'dfr3_mapping_set', + 'required': True, + 'description': 'DFR3 Mapping Set Object', + 'type': ['incore:dfr3MappingSet'], + }, + { + 'id': 'retrofit_strategy', + 'required': False, + 'description': 'Building retrofit strategy that contains guid and retrofit method', + 'type': ['incore:retrofitStrategy'] + } + ], + 'output_datasets': [ + { + 'id': 'ds_result', + 'parent_type': 'buildings', + 'description': 'CSV file of damage states for building structural damage', + 'type': 'ergo:buildingDamageVer6' + }, + { + 'id': 'damage_result', + 'parent_type': 'buildings', + 'description': 'Json file with information about applied hazard value and fragility', + 'type': 'incore:buildingDamageSupplement' + } + ] + } diff --git a/pyincore/analyses/buildingstructuraldamage/buildingutil.py b/pyincore/analyses/buildingstructuraldamage/buildingutil.py new file mode 100644 index 000000000..07d7a4094 --- /dev/null +++ b/pyincore/analyses/buildingstructuraldamage/buildingutil.py @@ -0,0 +1,17 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +class BuildingUtil: + """Utility methods for the building damage analysis.""" + DEFAULT_FRAGILITY_KEY = "Non-Retrofit Fragility ID Code" + DEFAULT_TSUNAMI_HMAX_FRAGILITY_KEY = "Non-Retrofit Inundation Fragility ID Code" + DEFAULT_TSUNAMI_MMAX_FRAGILITY_KEY = "Non-Retrofit MomentumFlux Fragility ID Code" + DEFAULT_REPAIR_KEY = "Repair ID Code" + BLDG_STORIES = "no_stories" + PROPERTIES = "properties" + BLDG_PERIOD = "period" + GROUND_FAILURE_PROB = "groundFailureProb" diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py new file mode 100644 index 000000000..15e7d22df --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage.py @@ -0,0 +1,115 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, Tsunami, Hurricane, \ + Tornado +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + hazardsvc = HazardService(client) + + ########################################################## + # Memphis Earthquake damage + # New madrid earthquake using Atkinson Boore 1995 + eq = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazardsvc) + + # Geology dataset + liq_geology_dataset_id = "5a284f53c7d30d13bc08249c" + + # Building dataset + # 5a284f0bc7d30d13bc081a28 5kb + # 5bcf2fcbf242fe047ce79dad 300kb + # 5a284f37c7d30d13bc08219c 20mb + bldg_dataset_id = "5a284f0bc7d30d13bc081a28" + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Earthquake mapping + mapping_id = "5b47b350337d4a3629076f2c" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + bldg_dmg.set_input_hazard("hazard", eq) + + result_name = "memphis_eq_bldg_dmg_result" + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("use_liquefaction", True) + bldg_dmg.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + + # Run Analysis + bldg_dmg.run_analysis() + + ########################################################## + # TSUNAMI + tsunami = Tsunami.from_hazard_service("5bc9e25ef7b08533c7e610dc", hazardsvc) + + # Seaside building dataset + bldg_dataset_id = "5bcf2fcbf242fe047ce79dad" + + # Run seaside tsunami building damage + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Tsunami mapping + mapping_id = "5b48fb1f337d4a478e7bd54d" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_input_hazard("hazard", tsunami) + result_name = "seaside_tsunami_dmg_result" + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + ########################################################## + # Hurricane + hurricane = Hurricane.from_hazard_service("5f11e50cc6491311a814584c", hazardsvc) + + # Galveston building dataset 602eba8bb1db9c28aef01358 + bldg_dataset_id = "602eba8bb1db9c28aef01358" # 19k buildings with age_group + # bldg_dataset_id = "602d61d0b1db9c28aeedea03" # 40 buildings without age_group + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Hurricane building mapping (with equation) + mapping_id = "602c381a1d85547cdc9f0675" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + + bldg_dmg.set_input_hazard("hazard", hurricane) + + result_name = "galveston_hurr_dmg_result" + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + ########################################################## + # joplin tornado without strategy + bldg_dataset_id = "5df7d0de425e0b00092d0082" # joplin building v6 + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype mapping + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + tornado = Tornado.from_hazard_service("5dfa32bbc0601200080893fb", hazardsvc) + bldg_dmg.set_input_hazard("hazard", tornado) + + result_name = "joplin_tornado_dmg_result" + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("seed", 1000) + bldg_dmg.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py new file mode 100644 index 000000000..eeba894ce --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_legacy.py @@ -0,0 +1,125 @@ +import os + +from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + result_folder = "legacy" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # Memphis Earthquake damage + # New madrid earthquake using Atkinson Boore 1995 + hazard_type = "earthquake" + hazard_id = "5b902cb273c3371e1236b36b" + + # Geology dataset + liq_geology_dataset_id = "5a284f53c7d30d13bc08249c" + + # Building dataset + # 5a284f0bc7d30d13bc081a28 5kb + # 5bcf2fcbf242fe047ce79dad 300kb + # 5a284f37c7d30d13bc08219c 20mb + bldg_dataset_id = "5a284f0bc7d30d13bc081a28" + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Earthquake mapping + mapping_id = "5b47b350337d4a3629076f2c" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + result_name = os.path.join(result_folder, "memphis_eq_bldg_dmg_result") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("use_liquefaction", True) + bldg_dmg.set_parameter("liquefaction_geology_dataset_id", liq_geology_dataset_id) + + # Run Analysis + bldg_dmg.run_analysis() + + # TSUNAMI + + hazard_type = "tsunami" + hazard_id = "5bc9e25ef7b08533c7e610dc" + + # Seaside building dataset + bldg_dataset_id = "5bcf2fcbf242fe047ce79dad" + + # Run seaside tsunami building damage + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Tsunami mapping + mapping_id = "5b48fb1f337d4a478e7bd54d" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + result_name = os.path.join(result_folder, "seaside_tsunami_dmg_result") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + # Hurricane + + hazard_type = "hurricane" + hazard_id = "5f11e50cc6491311a814584c" + + # Galveston building dataset 602eba8bb1db9c28aef01358 + bldg_dataset_id = "602eba8bb1db9c28aef01358" # 19k buildings with age_group + # bldg_dataset_id = "602d61d0b1db9c28aeedea03" # 40 buildings without age_group + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Hurricane building mapping (with equation) + mapping_id = "602c381a1d85547cdc9f0675" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + + result_name = os.path.join(result_folder, "galveston_hurr_dmg_result") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + # joplin tornado with retrofit strategy + bldg_dataset_id = "5df7d0de425e0b00092d0082" # joplin building v6 + retrofit_strategy_id = "660ab8f8ce705a7e54748557" # plan 1 + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + bldg_dmg.load_remote_input_dataset("retrofit_strategy", retrofit_strategy_id) + + mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype with retrofit + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + hazard_type = "tornado" + hazard_id = "5dfa32bbc0601200080893fb" + result_name = os.path.join(result_folder, "joplin_tornado_dmg_result_w_retrofit") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("seed", 1000) + bldg_dmg.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py new file mode 100644 index 000000000..8baad5f39 --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_multihazard.py @@ -0,0 +1,38 @@ +import os + +from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + bldg_dmg = BuildingStructuralDamage(client) + + # seaside multi-hazard + bldg_dmg.load_remote_input_dataset("buildings", "5bcf2fcbf242fe047ce79dad") + + mapping_id = "648a3f88c687ae511a1814e2" # earthquake+tsunami mapping + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") + + hazard_type = "earthquake+tsunami" + hazard_id = "5ba8f127ec2309043520906c+5bc9eaf7f7b08533c7e610e1" + + result_folder = "mutliple_hazards" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + result_name = os.path.join(result_folder, "seaside_multihazard") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("seed", 1000) + bldg_dmg.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py new file mode 100644 index 000000000..0d402fcd2 --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_offline.py @@ -0,0 +1,74 @@ +import os + +from pyincore import IncoreClient, FragilityCurveSet, MappingSet, Tornado, Dataset, Mapping +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(offline=True) + # client = IncoreClient(pyglobals.INCORE_API_DEV_URL, offline=False) + # client.clear_cache() + + # building + buildings = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, + "building/joplin_commercial_bldg_v6_sample.shp"), + data_type="ergo:buildingInventoryVer6") + + # tornado + tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) + tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield") + # dfr3 + fragility_archetype_6 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, + "fragility_curves/fragility_archetype_6.json")) + fragility_archetype_7 = FragilityCurveSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, + "fragility_curves/fragility_archetype_7.json")) + + fragility_entry_archetype_6 = {"Non-Retrofit Fragility ID Code": fragility_archetype_6} + fragility_rules_archetype_6 = {"OR": ["int archetype EQUALS 6"]} + fragility_mapping_archetype_6 = Mapping(fragility_entry_archetype_6, fragility_rules_archetype_6) + fragility_entry_archetype_7 = {"Non-Retrofit Fragility ID Code": fragility_archetype_7} + fragility_rules_archetype_7 = {"OR": ["int archetype EQUALS 7"]} + fragility_mapping_archetype_7 = Mapping(fragility_entry_archetype_7, fragility_rules_archetype_7) + + fragility_mapping_set_definition = { + "id": "N/A", + "name": "local joplin tornado fragility mapping object", + "hazardType": "tornado", + "inventoryType": "building", + 'mappings': [ + fragility_mapping_archetype_6, + fragility_mapping_archetype_7, + ], + "mappingType": "fragility" + } + + fragility_mapping_set = MappingSet(fragility_mapping_set_definition) + + # Building Damage + # Create building damage + bldg_dmg = BuildingStructuralDamage(client) + + # Load input dataset + bldg_dmg.set_input_dataset("buildings", buildings) + + # Load fragility mapping + bldg_dmg.set_input_dataset("dfr3_mapping_set", fragility_mapping_set) + + # Set hazard + bldg_dmg.set_input_hazard("hazard", tornado) + + # Set analysis parameters + result_folder = "offline" + # result_folder = "online" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + result_name = os.path.join(result_folder, "joplin_tornado_commerical_bldg_dmg") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 2) + bldg_dmg.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py new file mode 100644 index 000000000..2267eaf9a --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_retrofit.py @@ -0,0 +1,58 @@ +import os + +from pyincore import IncoreClient, MappingSet, Tornado, Dataset, HazardService +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals +import time + + +def run_with_base_class(): + client = IncoreClient() + dev_client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + hazardsvc = HazardService(client) + dev_hazardsvc = HazardService(dev_client) + + # Set analysis parameters + result_folder = "retrofit" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + + start_time = time.time() + ############################## + # joplin tornado + # dfr3 mapping + tornado_fragility_mapping_set = MappingSet.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, + "retrofit/tornado_retrofit_mapping.json")) + + # Building Damage + # Create building damage + tornado_bldg_dmg = BuildingStructuralDamage(client) + + # Load input dataset + bldg_dataset_id = "5dbc8478b9219c06dd242c0d" # joplin building v6 prod + tornado_bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + retrofit_strategy_plan = Dataset.from_file(os.path.join(pyglobals.TEST_DATA_DIR, + "retrofit/tornado_retrofit_plan.csv"), + data_type="incore:retrofitStrategy") + tornado_bldg_dmg.set_input_dataset("retrofit_strategy", retrofit_strategy_plan) + + tornado = Tornado.from_hazard_service("608c5b17150b5e17064030df", hazardsvc) + tornado_bldg_dmg.set_input_hazard("hazard", tornado) + + # Load fragility mapping + tornado_bldg_dmg.set_input_dataset("dfr3_mapping_set", tornado_fragility_mapping_set) + + # Set hazard + tornado_bldg_dmg.set_input_hazard("hazard", tornado) + + result_name = os.path.join(result_folder, "joplin_tornado_commerical_bldg_dmg_w_retrofit") + tornado_bldg_dmg.set_parameter("result_name", result_name) + tornado_bldg_dmg.set_parameter("num_cpu", 8) + tornado_bldg_dmg.run_analysis() + + end_time_1 = time.time() + print(f"Joplin Tornado Retrofit execution time: {end_time_1 - start_time:.5f} seconds") + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py new file mode 100644 index 000000000..c28a65909 --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_buildingstructuraldamage_w_local_hazard.py @@ -0,0 +1,82 @@ +import os + +from pyincore import IncoreClient, FragilityService, MappingSet, Hurricane, Tornado +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # try local hurricane + # test with local hurricane + hurricane = Hurricane.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "hurricane-dataset.json")) + hurricane.hazardDatasets[0].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Wave_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster") + # Optional: set threshold to determine exposure or not + hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") + + hurricane.hazardDatasets[1].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Surge_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster") + # Optional: set threshold to determine exposure or not + hurricane.hazardDatasets[0].set_threshold(threshold_value=0.3, threshold_unit="m") + + hurricane.hazardDatasets[2].from_file(os.path.join(pyglobals.TEST_DATA_DIR, "Inundation_Raster.tif"), + data_type="ncsa:probabilisticHurricaneRaster") + # Optional: set threshold to determine exposure or not + hurricane.hazardDatasets[2].set_threshold(threshold_value=1, threshold_unit="hr") + + # Galveston building dataset 602eba8bb1db9c28aef01358 + bldg_dataset_id = "602eba8bb1db9c28aef01358" # 19k buildings with age_group + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + # Hurricane building mapping (with equation) + mapping_id = "602c381a1d85547cdc9f0675" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + bldg_dmg.set_parameter("fragility_key", "Hurricane SurgeLevel and WaveHeight Fragility ID Code") + + bldg_dmg.set_input_hazard("hazard", hurricane) + + result_folder = "local_hazard" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + result_name = os.path.join(result_folder, "galveston_local_hurr_dmg_result") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + ########################### + # local tornado + tornado = Tornado.from_json_file(os.path.join(pyglobals.TEST_DATA_DIR, "tornado_dataset.json")) + + # attach dataset from local file + tornado.hazardDatasets[0].from_file((os.path.join(pyglobals.TEST_DATA_DIR, "joplin_tornado/joplin_path_wgs84.shp")), + data_type="incore:tornadoWindfield") + + bldg_dataset_id = "5df7d0de425e0b00092d0082" + + bldg_dmg = BuildingStructuralDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + mapping_id = "5e8e3a21eaa8b80001f04f1c" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + bldg_dmg.set_input_hazard("hazard", tornado) + + result_folder = "local_hazard" + if not os.path.exists(result_folder): + os.mkdir(result_folder) + result_name = os.path.join(result_folder, "joplin_tornado_bldg_dmg") + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py b/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py new file mode 100644 index 000000000..0fbc23225 --- /dev/null +++ b/tests/pyincore/analyses/buildingstructuraldamage/test_slc_buildingstructuraldamage.py @@ -0,0 +1,36 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, Earthquake, HazardService, DataService +from pyincore.analyses.buildingstructuraldamage.buildingstructuraldamage import BuildingStructuralDamage +import time + + +if __name__ == "__main__": + client = IncoreClient() + + # Initiate fragility service + fragility_services = FragilityService(client) + hazard_services = HazardService(client) + data_services = DataService(client) + + # Analysis setup + start_time = time.time() + bldg_dmg = BuildingStructuralDamage(client) + + mapping_set = MappingSet(fragility_services.get_mapping("6309005ad76c6d0e1f6be081")) + bldg_dmg.set_input_dataset('dfr3_mapping_set', mapping_set) + + bldg_dmg.load_remote_input_dataset("buildings", "62fea288f5438e1f8c515ef8") # Salt Lake County All Building + bldg_dmg.set_parameter("result_name", "SLC_bldg_dmg_no_retrofit-withLIQ7.1") + + eq = Earthquake.from_hazard_service("640a03ea73a1642180262450", hazard_services) # Mw 7.1 + # eq = Earthquake.from_hazard_service("64108b6486a52d419dd69a41", hazard_services) # Mw 7.0 + bldg_dmg.set_input_hazard("hazard", eq) + + bldg_dmg.set_parameter("use_liquefaction", True) + bldg_dmg.set_parameter("liquefaction_geology_dataset_id", "62fe9ab685ac6b569e372429") + bldg_dmg.set_parameter("num_cpu", 8) + + # Run building damage without liquefaction + bldg_dmg.run_analysis() + + end_time = time.time() + print(f"total runtime: {end_time - start_time}") \ No newline at end of file From ba6e2e2b4785a8c6f874fdaf8ed774c14a8c7f0e Mon Sep 17 00:00:00 2001 From: Ya-Lan Yang <63822845+ylyangtw@users.noreply.github.com> Date: Thu, 6 Jun 2024 16:13:03 -0500 Subject: [PATCH 14/17] Rename nonstructural building damage (#582) * rename * fix typo * rename to buildingnonstructuraldamage --------- Co-authored-by: Chen Wang --- CHANGELOG.md | 1 + docs/source/modules.rst | 9 + .../buildingnonstructuraldamage/__init__.py | 9 + .../buildingnonstructuraldamage.py | 382 ++++++++++++++++++ .../buildingnonstructuralutil.py | 62 +++ .../nonstructbuildingdamage.py | 378 +---------------- .../test_buildingnonstructuraldamage.py | 54 +++ ...uildingnonstructuraldamage_w_hazard_obj.py | 53 +++ .../test_flood_buildingnonstructuraldamage.py | 33 ++ 9 files changed, 612 insertions(+), 369 deletions(-) create mode 100644 pyincore/analyses/buildingnonstructuraldamage/__init__.py create mode 100644 pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py create mode 100644 pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py create mode 100644 tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py create mode 100644 tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py create mode 100644 tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 403eef3c8..30820f42d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Update Non-structural Building Damage to support flood [#562](https://github.com/IN-CORE/pyincore/issues/562) - Update flood input to non-structural building damage for combined wind-wave-surge building [#566](https://github.com/IN-CORE/pyincore/issues/566) - Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) +- Rename nonstructural building damage [#537](https://github.com/IN-CORE/pyincore/issues/537) - Rename building damage to building structural damage [#561](https://github.com/IN-CORE/pyincore/issues/561) ### Added diff --git a/docs/source/modules.rst b/docs/source/modules.rst index e2a9434a0..afe113a84 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -34,6 +34,13 @@ analyses/buildingfunctionality .. autoclass:: buildingfunctionality.buildingfunctionality.BuildingFunctionality :members: +analyses/buildingnonstructdamage +================================ +.. autoclass:: buildingnonstructdamage.buildingnonstructdamage.BuildingNonStructDamage + :members: +.. autoclass:: buildingnonstructdamage.buildingnonstructutil.BuildingNonStructUtil + :members: + analyses/buildingportfolio ========================== @@ -245,6 +252,8 @@ analyses/nonstructbuildingdamage :members: .. autoclass:: nonstructbuildingdamage.nonstructbuildingutil.NonStructBuildingUtil :members: +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`buildingnonstructdamage.BuildingNonStructDamage` instead. analyses/pipelinedamage ======================= diff --git a/pyincore/analyses/buildingnonstructuraldamage/__init__.py b/pyincore/analyses/buildingnonstructuraldamage/__init__.py new file mode 100644 index 000000000..bd8682b04 --- /dev/null +++ b/pyincore/analyses/buildingnonstructuraldamage/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import BuildingNonStructDamage +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import BuildingNonStructUtil diff --git a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py new file mode 100644 index 000000000..2a50d6623 --- /dev/null +++ b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuraldamage.py @@ -0,0 +1,382 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import concurrent.futures +from itertools import repeat + +from pyincore import AnalysisUtil, GeoUtil +from pyincore import BaseAnalysis, HazardService, FragilityService +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuralutil import \ + BuildingNonStructUtil +from pyincore.models.dfr3curve import DFR3Curve +from pyincore.utils.datasetutil import DatasetUtil + + +class BuildingNonStructDamage(BaseAnalysis): + """Computes non-structural structural building damage for an earthquake hazard. + + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + self.hazardsvc = HazardService(incore_client) + self.fragilitysvc = FragilityService(incore_client) + + super(BuildingNonStructDamage, self).__init__(incore_client) + + def run(self): + """Executes building damage analysis.""" + # Building dataset + building_dataset = self.get_input_dataset("buildings") + + # building retrofit strategy + retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") + + # mapping + dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") + + # Update the building inventory dataset if applicable + bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(building_dataset, + add_info_dataset=retrofit_strategy_dataset, + mapping=dfr3_mapping_set) + building_set = bldg_dataset.get_inventory_reader() + + # get input hazard + hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() + + # set Default Fragility key + fragility_key = self.get_parameter("fragility_key") + if fragility_key is None: + self.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + + # Set Default Hazard Uncertainty + use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") + if use_hazard_uncertainty is None: + self.set_parameter("use_hazard_uncertainty", False) + + # Set Default Liquefaction + use_liquefaction = self.get_parameter("use_liquefaction") + if use_liquefaction is None: + self.set_parameter("use_liquefaction", False) + + user_defined_cpu = 1 + + if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: + user_defined_cpu = self.get_parameter("num_cpu") + + num_workers = AnalysisUtil.determine_parallelism_locally(self, len(building_set), user_defined_cpu) + + avg_bulk_input_size = int(len(building_set) / num_workers) + inventory_args = [] + count = 0 + inventory_list = list(building_set) + + while count < len(inventory_list): + inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) + count += avg_bulk_input_size + + (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, + num_workers, + inventory_args, + repeat(hazard), + repeat(hazard_type), + repeat(hazard_dataset_id)) + + self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) + self.set_result_json_data("damage_result", + damage_results, + name=self.get_parameter("result_name") + "_additional_info") + return True + + def building_damage_concurrent_future(self, function_name, num_workers, *args): + """Utilizes concurrent.future module. + + Args: + function_name (function): The function to be parallelized. + num_workers (int): Maximum number workers in parallelization. + *args: All the arguments in order to pass into parameter function_name. + + Returns: + dict: An ordered dictionary with building damage values. + dict: An ordered dictionary with building data/metadata. + + """ + output = [] + output_dmg = [] + with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: + for ret1, ret2 in executor.map(function_name, *args): + output.extend(ret1) + output_dmg.extend(ret2) + + return output, output_dmg + + def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, hazard_dataset_id): + """Run analysis for multiple buildings. + + Args: + buildings (list): Multiple buildings from input inventory set. + hazard (obj): Hazard object. + hazard_type (str): Hazard type. + hazard_dataset_id (str): Hazard dataset id. + + Returns: + dict: An ordered dictionary with building damage values. + dict: An ordered dictionary with building data/metadata. + + """ + # read static parameters from object self + liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") + use_liquefaction = self.get_parameter("use_liquefaction") + use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") + + # get allowed demand types for the hazard type + allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( + hazard_type)] + + building_results = [] + damage_results = [] + fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, + self.get_parameter("fragility_key")) + values_payload = [] + values_payload_liq = [] + mapped_buildings = [] + unmapped_buildings = [] + liquefaction_resp = None + for building in buildings: + if building["id"] in fragility_sets: + fragility_set = fragility_sets[building["id"]] + location = GeoUtil.get_location(building) + loc = str(location.y) + "," + str(location.x) + + # Acceleration-Sensitive + demands, units, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set, hazard_type, + allowed_demand_types) + value = { + "demands": demands, + "units": units, + "loc": loc + } + values_payload.append(value) + + # liquefaction + if use_liquefaction: + value_liq = { + "demands": ["pgd"], # implied... + "units": ["in"], + "loc": loc + } + values_payload_liq.append(value_liq) + + mapped_buildings.append(building) + else: + unmapped_buildings.append(building) + + del buildings + + # get hazard values and liquefaction + if hazard_type == 'earthquake': + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) + + # adjust dmg probability for liquefaction + if use_liquefaction: + if liq_geology_dataset_id is not None: + liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, + liq_geology_dataset_id, + values_payload_liq) + else: + raise ValueError('Hazard does not support liquefaction! Check to make sure you defined the ' + 'liquefaction portion of your scenario earthquake.') + elif hazard_type == 'flood': + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) + elif hazard_type == 'hurricane': + # include hurricane flood + hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) + else: + raise ValueError("The provided hazard type is not supported yet by this analysis") + + # calculate LS and DS + for i, building in enumerate(mapped_buildings): + dmg_probability = dict() + dmg_interval = dict() + fragility_set = fragility_sets[building["id"]] + + # TODO this value needs to come from the hazard service + # adjust dmg probability for hazard uncertainty + if use_hazard_uncertainty: + raise ValueError('Uncertainty has not yet been implemented!') + + ############### + if isinstance(fragility_set.fragility_curves[0], DFR3Curve): + hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) + demand_types = hazard_resp[i]["demands"] + demand_units = hazard_resp[i]["units"] + hval_dict = dict() + for j, d in enumerate(fragility_set.demand_types): + hval_dict[d] = hazard_vals[j] + if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): + building_args = fragility_set.construct_expression_args_from_inventory(building) + dmg_probability = fragility_set. \ + calculate_limit_state(hval_dict, inventory_type="building", + **building_args) + # adjust dmg probability for liquefaction + if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: + liquefaction_dmg = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i][ + "groundFailureProb"]) + dmg_probability = AnalysisUtil.update_precision_of_dicts( + BuildingNonStructUtil.adjust_damage_for_liquefaction(dmg_probability, + liquefaction_dmg)) + + dmg_interval = fragility_set.calculate_damage_interval(dmg_probability, + hazard_type=hazard_type, + inventory_type="building") + else: + raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " + "seeing this please report the issue.") + + # put results in dictionary + building_result = dict() + building_result['guid'] = building['properties']['guid'] + building_result.update(dmg_probability) + building_result.update(dmg_interval) + hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) + building_result['haz_expose'] = hazard_exposure + + # put damage results in dictionary + damage_result = dict() + damage_result['guid'] = building['properties']['guid'] + damage_result['fragility_id'] = fragility_set.id + damage_result['demandtypes'] = demand_types + damage_result['demandunits'] = demand_units + damage_result['hazardtype'] = hazard_type + damage_result['hazardvals'] = hazard_vals + + building_results.append(building_result) + damage_results.append(damage_result) + + for building in unmapped_buildings: + building_result = dict() + building_result['guid'] = building['properties']['guid'] + + damage_result = dict() + damage_result['guid'] = building['properties']['guid'] + damage_result['fragility_id'] = None + damage_result['demandtypes'] = None + damage_result['demandunits'] = None + damage_result['hazardtype'] = None + damage_result['hazardvals'] = None + + building_results.append(building_result) + damage_results.append(damage_result) + + return building_results, damage_results + + def get_spec(self): + """Get specifications of the building damage analysis. + + Returns: + obj: A JSON object of specifications of the building damage analysis. + + """ + return { + 'name': 'building-damage', + 'description': 'building damage analysis', + 'input_parameters': [ + { + 'id': 'result_name', + 'required': True, + 'description': 'result dataset name', + 'type': str + }, + { + 'id': 'hazard_type', + 'required': False, + 'description': 'Hazard Type (e.g. earthquake, flood, hurricane)', + 'type': str + }, + { + 'id': 'hazard_id', + 'required': False, + 'description': 'Hazard ID', + 'type': str + }, + { + 'id': 'fragility_key', + 'required': False, + 'description': 'Non-structural Fragility key to use in mapping dataset', + 'type': str + }, + { + 'id': 'use_liquefaction', + 'required': False, + 'description': 'Use liquefaction', + 'type': bool + }, + { + 'id': 'liq_geology_dataset_id', + 'required': False, + 'description': 'liquefaction geology dataset id, \ + if use liquefaction, you have to provide this id', + 'type': str + }, + { + 'id': 'use_hazard_uncertainty', + 'required': False, + 'description': 'Use hazard uncertainty', + 'type': bool + }, + { + 'id': 'num_cpu', + 'required': False, + 'description': 'If using parallel execution, the number of cpus to request', + 'type': int + }, + ], + 'input_hazards': [ + { + 'id': 'hazard', + 'required': False, + 'description': 'Hazard object', + 'type': ["earthquake", "flood", "hurricane"] + }, + ], + 'input_datasets': [ + { + 'id': 'buildings', + 'required': True, + 'description': 'building Inventory', + 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', + 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + }, + { + 'id': 'dfr3_mapping_set', + 'required': True, + 'description': 'DFR3 Mapping Set Object', + 'type': ['incore:dfr3MappingSet'], + }, + { + 'id': 'retrofit_strategy', + 'required': False, + 'description': 'Building retrofit strategy that contains guid and retrofit method', + 'type': ['incore:retrofitStrategy'] + } + ], + 'output_datasets': [ + { + 'id': 'result', + 'parent_type': 'buildings', + 'description': 'CSV file of damage states for building non-structural damage', + 'type': 'ergo:nsBuildingInventoryDamageVer4' + }, + { + 'id': 'damage_result', + 'parent_type': 'buildings', + 'description': 'Json file with information about applied hazard value and fragility', + 'type': 'incore:nsBuildingInventoryDamageSupplement' + } + ] + } diff --git a/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py new file mode 100644 index 000000000..2b5775f34 --- /dev/null +++ b/pyincore/analyses/buildingnonstructuraldamage/buildingnonstructuralutil.py @@ -0,0 +1,62 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import collections + + +class BuildingNonStructUtil: + """Utility methods for the non-structural building damage analysis.""" + BUILDING_FRAGILITY_KEYSBUILDING_FRAGILITY_KEYS = { + "drift-sensitive fragility id code": ["Drift Sensitive", "DS"], + "parametric non-retrofit fragility id code": ["Parametric Non-Retrofit", "PNR"], + "acceleration-sensitive fragility id code": ["Acceleration Sensitive", "AS"], + "non-retrofit fragility id code": ["as built", "none"] + } + + DEFAULT_FRAGILITY_KEY_DS = "Drift-Sensitive Fragility ID Code" + DEFAULT_FRAGILITY_KEY_AS = "Acceleration-Sensitive Fragility ID Code" + + @staticmethod + def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_probabilities): + """Adjusts building damage probability based on liquefaction ground failure probability + with the liq_dmg, we know that it is 3 values, the first two are the same. + The 3rd might be different. + We always want to apply the first two to all damage states except the highest. + + Args: + limit_state_probabilities (obj): Limit state probabilities. + ground_failure_probabilities (list): Ground failure probabilities. + + Returns: + OrderedDict: Adjusted limit state probability. + + """ + keys = list(limit_state_probabilities.keys()) + adjusted_limit_state_probabilities = collections.OrderedDict() + + for i in range(len(keys)): + # check and see...if we are trying to use the last ground failure + # number for something other than the + # last limit-state-probability, then we should use the + # second-to-last probability of ground failure instead. + + if i > len(ground_failure_probabilities) - 1: + prob_ground_failure = ground_failure_probabilities[len(ground_failure_probabilities) - 2] + else: + prob_ground_failure = ground_failure_probabilities[i] + + adjusted_limit_state_probabilities[keys[i]] = \ + limit_state_probabilities[keys[i]] + prob_ground_failure \ + - limit_state_probabilities[keys[i]] * prob_ground_failure + + # the final one is the last of limitStates should match with the last of ground failures + j = len(limit_state_probabilities) - 1 + prob_ground_failure = ground_failure_probabilities[-1] + adjusted_limit_state_probabilities[keys[j]] = \ + limit_state_probabilities[keys[j]] \ + + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure + + return adjusted_limit_state_probabilities diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py index 757a0e58d..f2f4518f0 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py @@ -4,379 +4,19 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import concurrent.futures -from itertools import repeat +from deprecated.sphinx import deprecated -from pyincore import AnalysisUtil, GeoUtil -from pyincore import BaseAnalysis, HazardService, FragilityService -from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingutil import \ - NonStructBuildingUtil -from pyincore.models.dfr3curve import DFR3Curve -from pyincore.utils.datasetutil import DatasetUtil +from pyincore.analyses.buildingnonstructuraldamage.buildingnonstructuraldamage import \ + BuildingNonStructDamage -class NonStructBuildingDamage(BaseAnalysis): - """Computes non-structural structural building damage for an earthquake hazard. - - Args: - incore_client (IncoreClient): Service authentication. - - """ - +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use BuildingNonStructDamage instead.") +class NonStructBuildingDamage(): def __init__(self, incore_client): - self.hazardsvc = HazardService(incore_client) - self.fragilitysvc = FragilityService(incore_client) - - super(NonStructBuildingDamage, self).__init__(incore_client) - - def run(self): - """Executes building damage analysis.""" - # Building dataset - building_dataset = self.get_input_dataset("buildings") - - # building retrofit strategy - retrofit_strategy_dataset = self.get_input_dataset("retrofit_strategy") - - # mapping - dfr3_mapping_set = self.get_input_dataset("dfr3_mapping_set") - - # Update the building inventory dataset if applicable - bldg_dataset, tmpdirname, _ = DatasetUtil.construct_updated_inventories(building_dataset, - add_info_dataset=retrofit_strategy_dataset, - mapping=dfr3_mapping_set) - building_set = bldg_dataset.get_inventory_reader() - - # get input hazard - hazard, hazard_type, hazard_dataset_id = self.create_hazard_object_from_input_params() - - # set Default Fragility key - fragility_key = self.get_parameter("fragility_key") - if fragility_key is None: - self.set_parameter("fragility_key", NonStructBuildingUtil.DEFAULT_FRAGILITY_KEY_AS) - - # Set Default Hazard Uncertainty - use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") - if use_hazard_uncertainty is None: - self.set_parameter("use_hazard_uncertainty", False) - - # Set Default Liquefaction - use_liquefaction = self.get_parameter("use_liquefaction") - if use_liquefaction is None: - self.set_parameter("use_liquefaction", False) - - user_defined_cpu = 1 - - if not self.get_parameter("num_cpu") is None and self.get_parameter("num_cpu") > 0: - user_defined_cpu = self.get_parameter("num_cpu") - - num_workers = AnalysisUtil.determine_parallelism_locally(self, len(building_set), user_defined_cpu) - - avg_bulk_input_size = int(len(building_set) / num_workers) - inventory_args = [] - count = 0 - inventory_list = list(building_set) - - while count < len(inventory_list): - inventory_args.append(inventory_list[count:count + avg_bulk_input_size]) - count += avg_bulk_input_size - - (ds_results, damage_results) = self.building_damage_concurrent_future(self.building_damage_analysis_bulk_input, - num_workers, - inventory_args, - repeat(hazard), - repeat(hazard_type), - repeat(hazard_dataset_id)) - - self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("damage_result", - damage_results, - name=self.get_parameter("result_name") + "_additional_info") - return True - - def building_damage_concurrent_future(self, function_name, num_workers, *args): - """Utilizes concurrent.future module. - - Args: - function_name (function): The function to be parallelized. - num_workers (int): Maximum number workers in parallelization. - *args: All the arguments in order to pass into parameter function_name. - - Returns: - dict: An ordered dictionary with building damage values. - dict: An ordered dictionary with building data/metadata. - - """ - output = [] - output_dmg = [] - with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) as executor: - for ret1, ret2 in executor.map(function_name, *args): - output.extend(ret1) - output_dmg.extend(ret2) - - return output, output_dmg - - def building_damage_analysis_bulk_input(self, buildings, hazard, hazard_type, hazard_dataset_id): - """Run analysis for multiple buildings. - - Args: - buildings (list): Multiple buildings from input inventory set. - hazard (obj): Hazard object. - hazard_type (str): Hazard type. - hazard_dataset_id (str): Hazard dataset id. - - Returns: - dict: An ordered dictionary with building damage values. - dict: An ordered dictionary with building data/metadata. + self._delegate = BuildingNonStructDamage(incore_client) + def __getattr__(self, name): """ - # read static parameters from object self - liq_geology_dataset_id = self.get_parameter("liq_geology_dataset_id") - use_liquefaction = self.get_parameter("use_liquefaction") - use_hazard_uncertainty = self.get_parameter("use_hazard_uncertainty") - - # get allowed demand types for the hazard type - allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands( - hazard_type)] - - building_results = [] - damage_results = [] - fragility_sets = self.fragilitysvc.match_inventory(self.get_input_dataset("dfr3_mapping_set"), buildings, - self.get_parameter("fragility_key")) - values_payload = [] - values_payload_liq = [] - mapped_buildings = [] - unmapped_buildings = [] - liquefaction_resp = None - for building in buildings: - if building["id"] in fragility_sets: - fragility_set = fragility_sets[building["id"]] - location = GeoUtil.get_location(building) - loc = str(location.y) + "," + str(location.x) - - # Acceleration-Sensitive - demands, units, _ = AnalysisUtil.get_hazard_demand_types_units(building, fragility_set, hazard_type, - allowed_demand_types) - value = { - "demands": demands, - "units": units, - "loc": loc - } - values_payload.append(value) - - # liquefaction - if use_liquefaction: - value_liq = { - "demands": ["pgd"], # implied... - "units": ["in"], - "loc": loc - } - values_payload_liq.append(value_liq) - - mapped_buildings.append(building) - else: - unmapped_buildings.append(building) - - del buildings - - # get hazard values and liquefaction - if hazard_type == 'earthquake': - hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) - - # adjust dmg probability for liquefaction - if use_liquefaction: - if liq_geology_dataset_id is not None: - liquefaction_resp = self.hazardsvc.post_liquefaction_values(hazard_dataset_id, - liq_geology_dataset_id, - values_payload_liq) - else: - raise ValueError('Hazard does not support liquefaction! Check to make sure you defined the ' - 'liquefaction portion of your scenario earthquake.') - elif hazard_type == 'flood': - hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) - elif hazard_type == 'hurricane': - # include hurricane flood - hazard_resp = hazard.read_hazard_values(values_payload, self.hazardsvc) - else: - raise ValueError("The provided hazard type is not supported yet by this analysis") - - # calculate LS and DS - for i, building in enumerate(mapped_buildings): - dmg_probability = dict() - dmg_interval = dict() - fragility_set = fragility_sets[building["id"]] - - # TODO this value needs to come from the hazard service - # adjust dmg probability for hazard uncertainty - if use_hazard_uncertainty: - raise ValueError('Uncertainty has not yet been implemented!') - - ############### - if isinstance(fragility_set.fragility_curves[0], DFR3Curve): - hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) - demand_types = hazard_resp[i]["demands"] - demand_units = hazard_resp[i]["units"] - hval_dict = dict() - for j, d in enumerate(fragility_set.demand_types): - hval_dict[d] = hazard_vals[j] - if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): - building_args = fragility_set.construct_expression_args_from_inventory(building) - dmg_probability = fragility_set. \ - calculate_limit_state(hval_dict, inventory_type="building", - **building_args) - # adjust dmg probability for liquefaction - if hazard_type == 'earthquake' and use_liquefaction and liq_geology_dataset_id is not None: - liquefaction_dmg = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i][ - "groundFailureProb"]) - dmg_probability = AnalysisUtil.update_precision_of_dicts( - NonStructBuildingUtil.adjust_damage_for_liquefaction(dmg_probability, - liquefaction_dmg)) - - dmg_interval = fragility_set.calculate_damage_interval(dmg_probability, - hazard_type=hazard_type, - inventory_type="building") - else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " - "seeing this please report the issue.") - - # put results in dictionary - building_result = dict() - building_result['guid'] = building['properties']['guid'] - building_result.update(dmg_probability) - building_result.update(dmg_interval) - hazard_exposure = AnalysisUtil.get_exposure_from_hazard_values(hazard_vals, hazard_type) - building_result['haz_expose'] = hazard_exposure - - # put damage results in dictionary - damage_result = dict() - damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id'] = fragility_set.id - damage_result['demandtypes'] = demand_types - damage_result['demandunits'] = demand_units - damage_result['hazardtype'] = hazard_type - damage_result['hazardvals'] = hazard_vals - - building_results.append(building_result) - damage_results.append(damage_result) - - for building in unmapped_buildings: - building_result = dict() - building_result['guid'] = building['properties']['guid'] - - damage_result = dict() - damage_result['guid'] = building['properties']['guid'] - damage_result['fragility_id'] = None - damage_result['demandtypes'] = None - damage_result['demandunits'] = None - damage_result['hazardtype'] = None - damage_result['hazardvals'] = None - - building_results.append(building_result) - damage_results.append(damage_result) - - return building_results, damage_results - - def get_spec(self): - """Get specifications of the building damage analysis. - - Returns: - obj: A JSON object of specifications of the building damage analysis. - + Delegate attribute access to the BuildingNonStructDamage instance. """ - return { - 'name': 'building-damage', - 'description': 'building damage analysis', - 'input_parameters': [ - { - 'id': 'result_name', - 'required': True, - 'description': 'result dataset name', - 'type': str - }, - { - 'id': 'hazard_type', - 'required': False, - 'description': 'Hazard Type (e.g. earthquake, flood, hurricane)', - 'type': str - }, - { - 'id': 'hazard_id', - 'required': False, - 'description': 'Hazard ID', - 'type': str - }, - { - 'id': 'fragility_key', - 'required': False, - 'description': 'Non-structural Fragility key to use in mapping dataset', - 'type': str - }, - { - 'id': 'use_liquefaction', - 'required': False, - 'description': 'Use liquefaction', - 'type': bool - }, - { - 'id': 'liq_geology_dataset_id', - 'required': False, - 'description': 'liquefaction geology dataset id, \ - if use liquefaction, you have to provide this id', - 'type': str - }, - { - 'id': 'use_hazard_uncertainty', - 'required': False, - 'description': 'Use hazard uncertainty', - 'type': bool - }, - { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - }, - ], - 'input_hazards': [ - { - 'id': 'hazard', - 'required': False, - 'description': 'Hazard object', - 'type': ["earthquake", "flood", "hurricane"] - }, - ], - 'input_datasets': [ - { - 'id': 'buildings', - 'required': True, - 'description': 'building Inventory', - 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', - 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], - }, - { - 'id': 'dfr3_mapping_set', - 'required': True, - 'description': 'DFR3 Mapping Set Object', - 'type': ['incore:dfr3MappingSet'], - }, - { - 'id': 'retrofit_strategy', - 'required': False, - 'description': 'Building retrofit strategy that contains guid and retrofit method', - 'type': ['incore:retrofitStrategy'] - } - ], - 'output_datasets': [ - { - 'id': 'result', - 'parent_type': 'buildings', - 'description': 'CSV file of damage states for building non-structural damage', - 'type': 'ergo:nsBuildingInventoryDamageVer4' - }, - { - 'id': 'damage_result', - 'parent_type': 'buildings', - 'description': 'Json file with information about applied hazard value and fragility', - 'type': 'incore:nsBuildingInventoryDamageSupplement' - } - ] - } + return getattr(self._delegate, name) \ No newline at end of file diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py new file mode 100644 index 000000000..fc46c1550 --- /dev/null +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage.py @@ -0,0 +1,54 @@ +from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore.analyses.buildingnonstructuraldamage import BuildingNonStructDamage, BuildingNonStructUtil +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # Memphis 7.9 AB-95 + hazard_type = "earthquake" + hazard_id = "5b902cb273c3371e1236b36b" + + # Shelby County Essential Facilities + building_dataset_id = "5a284f42c7d30d13bc0821ba" + + # Default Building Fragility Mapping v1.0 + mapping_id = "5b47b350337d4a3629076f2c" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + + # use liquefaction (slow) + # Shelby County Liquefaction Susceptibility + use_liquefaction = True + liq_geology_dataset_id = "5a284f55c7d30d13bc0824ba" + + # Acceleration sensitive + non_structural_building_dmg_as = BuildingNonStructDamage(client) + non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_as") + non_structural_building_dmg_as.set_parameter("hazard_type", hazard_type) + non_structural_building_dmg_as.set_parameter("hazard_id", hazard_id) + non_structural_building_dmg_as.set_parameter("num_cpu", 4) + non_structural_building_dmg_as.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.run_analysis() + + # Drift sensitive + non_structural_building_dmg_ds = BuildingNonStructDamage(client) + non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_ds") + non_structural_building_dmg_ds.set_parameter("hazard_type", hazard_type) + non_structural_building_dmg_ds.set_parameter("hazard_id", hazard_id) + non_structural_building_dmg_ds.set_parameter("num_cpu", 4) + non_structural_building_dmg_ds.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py new file mode 100644 index 000000000..4a7a3dfcc --- /dev/null +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_buildingnonstructuraldamage_w_hazard_obj.py @@ -0,0 +1,53 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, HazardService, Earthquake +from pyincore.analyses.buildingnonstructuraldamage import BuildingNonStructDamage, BuildingNonStructUtil +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # Memphis 7.9 AB-95 + hazard_service = HazardService(client) + earthquake = Earthquake.from_hazard_service("5b902cb273c3371e1236b36b", hazard_service) + + # Shelby County Essential Facilities + building_dataset_id = "5a284f42c7d30d13bc0821ba" + + # Default Building Fragility Mapping v1.0 + mapping_id = "5b47b350337d4a3629076f2c" + # Load fragility mapping + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + + # use liquefaction (slow) + # Shelby County Liquefaction Susceptibility + use_liquefaction = True + liq_geology_dataset_id = "5a284f55c7d30d13bc0824ba" + + # Acceleration sensitive + non_structural_building_dmg_as = BuildingNonStructDamage(client) + non_structural_building_dmg_as.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_as.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_as.set_input_hazard("hazard", earthquake) + non_structural_building_dmg_as.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_as") + non_structural_building_dmg_as.set_parameter("num_cpu", 4) + non_structural_building_dmg_as.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_AS) + non_structural_building_dmg_as.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_as.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_as.run_analysis() + + # Drift sensitive + non_structural_building_dmg_ds = BuildingNonStructDamage(client) + non_structural_building_dmg_ds.load_remote_input_dataset("buildings", building_dataset_id) + non_structural_building_dmg_ds.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_ds.set_parameter("result_name", "non_structural_building_dmg_result_w_hazard_obj_ds") + non_structural_building_dmg_ds.set_input_hazard("hazard", earthquake) + non_structural_building_dmg_ds.set_parameter("num_cpu", 4) + non_structural_building_dmg_ds.set_parameter("fragility_key", BuildingNonStructUtil.DEFAULT_FRAGILITY_KEY_DS) + non_structural_building_dmg_ds.set_parameter("use_liquefaction", use_liquefaction) + non_structural_building_dmg_ds.set_parameter("liq_geology_dataset_id", liq_geology_dataset_id) + non_structural_building_dmg_ds.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py b/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py new file mode 100644 index 000000000..74df76ce1 --- /dev/null +++ b/tests/pyincore/analyses/buildingnonstructdamage/test_flood_buildingnonstructuraldamage.py @@ -0,0 +1,33 @@ +from pyincore import IncoreClient, FragilityService, MappingSet, Flood, HazardService +from pyincore.analyses.buildingnonstructuraldamage import BuildingNonStructDamage +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + hazardsvc = HazardService(client) + + # lumberton flood + flood = Flood.from_hazard_service("5f4d02e99f43ee0dde768406", hazardsvc) + + # lumberton building inventory v7 + # bldg_dataset_id = "603010f7b1db9c28aef53214" # 40 building subset + bldg_dataset_id = "603010a4b1db9c28aef5319f" # 21k full building + + # lumberton building mapping (with equation) + mapping_id = "602f3cf981bd2c09ad8f4f9d" + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + + non_structural_building_dmg_flood = BuildingNonStructDamage(client) + non_structural_building_dmg_flood.load_remote_input_dataset("buildings", bldg_dataset_id) + non_structural_building_dmg_flood.set_input_dataset('dfr3_mapping_set', mapping_set) + non_structural_building_dmg_flood.set_parameter("result_name", "non_structural_building_dmg_result_flood") + non_structural_building_dmg_flood.set_input_hazard("hazard", flood) + non_structural_building_dmg_flood.set_parameter("num_cpu", 4) + non_structural_building_dmg_flood.set_parameter("fragility_key", "Lumberton Flood Building Fragility ID Code") + non_structural_building_dmg_flood.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() From 88d3b150f755e3e0e2a455b8182eb1e7ad7ab487 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Thu, 6 Jun 2024 16:25:55 -0500 Subject: [PATCH 15/17] 557 rename monte carlo failure probability to monte carlo limit state probability (#576) * update MCS * update requirements * Update CHANGELOG.md * module rst * resolve merge conflict --- CHANGELOG.md | 27 +- docs/source/modules.rst | 49 +-- .../montecarlofailureprobability.py | 336 +---------------- .../__init__.py | 9 + .../montecarlolimitstateprobability.py | 339 ++++++++++++++++++ .../test_montecarlolimitstateprobability.py | 30 ++ 6 files changed, 418 insertions(+), 372 deletions(-) create mode 100644 pyincore/analyses/montecarlolimitstateprobability/__init__.py create mode 100644 pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py create mode 100644 tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 30820f42d..6db3e6b4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Update Non-structural Building Damage to support flood [#562](https://github.com/IN-CORE/pyincore/issues/562) - Update flood input to non-structural building damage for combined wind-wave-surge building [#566](https://github.com/IN-CORE/pyincore/issues/566) - Rename transportation recovery analysis to traffic flow recovery analysis [#558](https://github.com/IN-CORE/pyincore/issues/558) +- Rename Monte Carlo Failure Probability to Monte Carlo Limit State Probability [#557](https://github.com/IN-CORE/pyincore/issues/557) +- Rename Housing Recovery to Housing Valuation Recovery Analysis [#560](https://github.com/IN-CORE/pyincore/issues/560) +- Rename Building Portfolio Analysis to Building Cluster Recovery Analysis [#559](https://github.com/IN-CORE/pyincore/issues/559) - Rename nonstructural building damage [#537](https://github.com/IN-CORE/pyincore/issues/537) - Rename building damage to building structural damage [#561](https://github.com/IN-CORE/pyincore/issues/561) @@ -20,28 +23,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Gas Facility Damage Analysis [#568](https://github.com/IN-CORE/pyincore/issues/568) - Copyrights to transportation recovery analysis [#579](https://github.com/IN-CORE/pyincore/issues/579) - Buyout Model Analyses [#539](https://github.com/IN-CORE/pyincore/issues/539) +- Google Analytics to the documentation site [#547](https://github.com/IN-CORE/pyincore/issues/547) ### Fixed - Permission error in clearing cache process [#563](https://github.com/IN-CORE/pyincore/issues/563) - - -## [Unreleased] - -### Changed -- Rename Housing Recovery to Housing Valuation Recovery Analysis [#560](https://github.com/IN-CORE/pyincore/issues/560) - - -## [Unreleased] - -### Changed -- Rename Building Portfolio Analysis to Building Cluster Recovery Analysis [#559](https://github.com/IN-CORE/pyincore/issues/559) - - -## [Unrelased] - -### Fixed - Out of index error in dfr3 service's property conversion when the rule is not found [#555](https://github.com/IN-CORE/pyincore/issues/555) + ## [1.18.1] - 2024-04-30 ### Changed @@ -56,11 +44,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Unnecessary dependency in setup.py [#519](https://github.com/IN-CORE/pyincore/issues/519) -## [Unreleased] - -### Added -- Google Analytics to the documentation site [#547](https://github.com/IN-CORE/pyincore/issues/547) - ## [1.18.0] - 2024-04-03 ### Added diff --git a/docs/source/modules.rst b/docs/source/modules.rst index afe113a84..41c356929 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -68,7 +68,7 @@ analyses/combinedwindwavesurgebuildingdamage :members: analyses/combinedwindwavesurgebuildingloss -============================================ +========================================== .. autoclass:: pyincore.analyses.combinedwindwavesurgebuildingloss.CombinedWindWaveSurgeBuildingLoss :members: @@ -78,7 +78,7 @@ analyses/commercialbuildingrecovery :members: analyses/core_cge_ml -=================================== +==================== .. autoclass:: core_cge_ml.corecgeml.CoreCGEML :members: @@ -100,7 +100,7 @@ analyses/epfdamage :members: analyses/epfrepaircost -================== +====================== .. autoclass:: epfrepaircost.epfrepaircost.EpfRepairCost :members: @@ -117,7 +117,7 @@ analyses/example :members: analyses/galvestoncge -================== +===================== .. autoclass:: galvestoncge.galvestoncge.GalvestonCGEModel :members: .. autoclass:: galvestoncge.equationlib.VarContainer @@ -140,7 +140,7 @@ analyses/galvestoncge :members: analyses/gasfacilitydamage -============================ +========================== .. autoclass:: gasfacilitydamage.gasfacilitydamage.GasFacilityDamage :members: .. autoclass:: gasfacilitydamage.gfutil.GfUtil @@ -162,14 +162,14 @@ analyses/housingunitallocation :members: analyses/housingvaluationrecovery -======================== +================================= .. autoclass:: housingvaluationrecovery.housingvaluationrecovery.HousingValuationRecovery :members: .. autoclass:: housingvaluationrecovery.housingvaluationrecoveryutil.HousingValuationRecoveryUtil :members: analyses/indp -============================== +============= .. autoclass:: indp.indp.INDP :members: .. autoclass:: indp.dislocationutils.DislocationUtil @@ -215,7 +215,7 @@ analyses/joplincge :members: analyses/joplinempiricalbuildingrestoration -=================================== +=========================================== .. autoclass:: joplinempiricalbuildingrestoration.joplinempiricalbuildingrestoration.JoplinEmpiricalBuildingRestoration :members: .. autoclass:: joplinempiricalbuildingrestoration.joplinempirrestor_util.JoplinEmpirRestorUtil @@ -227,13 +227,18 @@ analyses/meandamage :members: analyses/mlenabledcgeslc -=================== +======================== .. autoclass:: mlenabledcgeslc.mlcgeslc.MlEnabledCgeSlc :members: analyses/montecarlofailureprobability ===================================== -.. autoclass:: montecarlofailureprobability.montecarlofailureprobability.MonteCarloFailureProbability +.. deprecated:: 1.19.0 + This class will be deprecated soon. Use :class:`montecarlolimitstateprobability.MonteCarloLimitStateProbability` instead. + +analyses/montecarlolimitstateprobability +======================================== +.. autoclass:: montecarlolimitstateprobability.montecarlolimitstateprobability.MonteCarloLimitStateProbability :members: analyses/multiobjectiveretrofitoptimization @@ -268,12 +273,12 @@ analyses/pipelinedamagerepairrate :members: analyses/pipelinefunctionality -========================= +============================== .. autoclass:: pipelinefunctionality.pipelinefunctionality.PipelineFunctionality :members: analyses/pipelinerepaircost -============================ +=========================== .. autoclass:: pipelinerepaircost.pipelinerepaircost.PipelineRepairCost :members: @@ -315,7 +320,7 @@ analyses/seasidecge :members: analyses/saltlakecge -================== +==================== .. autoclass:: saltlakecge.saltlakecge.SaltLakeCGEModel :members: .. autoclass:: saltlakecge.equationlib.VarContainer @@ -345,7 +350,7 @@ analyses/socialvulnerability This class will be deprecated soon. Use :class:`socialvulnerabilityscore.SocialVulnerabilityScore` instead. analyses/socialvulnerabilityscore -============================ +================================= .. autoclass:: socialvulnerabilityscore.socialvulnerabilityscore.SocialVulnerabilityScore :members: @@ -378,7 +383,7 @@ analyses/transportationrecovery This class will be deprecated soon. Use :class:`trafficflowrecovery.TrafficFlowRecovery` instead. analyses/trafficflowrecovery -=============================== +============================ .. autoclass:: trafficflowrecovery.trafficflowrecovery.TrafficFlowRecovery :members: .. autoclass:: trafficflowrecovery.trafficflowrecoveryutil.TrafficFlowRecoveryUtil @@ -404,7 +409,7 @@ analyses/waterfacilitydamage :members: analyses/waterfacilityrepaircost -================================= +================================ .. autoclass:: waterfacilityrepaircost.waterfacilityrepaircost.WaterFacilityRepairCost :members: @@ -414,7 +419,7 @@ analyses/waterfacilityrestoration :members: analyses/wfnfunctionality -================================= +========================= .. autoclass:: wfnfunctionality.wfnfunctionality.WfnFunctionality :members: @@ -422,7 +427,7 @@ models ^^^^^^ models/hazard/earthquake -=================== +======================== .. autoclass:: models.earthquake.Earthquake :members: @@ -432,7 +437,7 @@ models/hazard/flood :members: models/hazard/hazard -=================== +==================== .. autoclass:: models.hazard.Hazard :members: @@ -487,7 +492,7 @@ models/mappingset :members: models/networkdataset -================= +===================== .. autoclass:: models.networkdataset.NetworkDataset :members: @@ -502,7 +507,7 @@ models/restorationcurveset :members: models/units -========================== +============ .. autoclass:: models.units.Units :members: @@ -515,7 +520,7 @@ utils/analysisutil :members: utils/cge_ml_file_util -================== +====================== .. autoclass:: utils.cge_ml_file_util.CGEMLFileUtil :members: diff --git a/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py b/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py index 2ae4235a5..2467a56da 100644 --- a/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py +++ b/pyincore/analyses/montecarlofailureprobability/montecarlofailureprobability.py @@ -1,339 +1,19 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import collections -import concurrent.futures -import numpy as np +from deprecated.sphinx import deprecated -from typing import List -from pyincore import BaseAnalysis, AnalysisUtil +from pyincore.analyses.montecarlolimitstateprobability import MonteCarloLimitStateProbability -class MonteCarloFailureProbability(BaseAnalysis): - """ - Args: - incore_client (IncoreClient): Service authentication. - - """ - +@deprecated(version='1.19.0', reason="This class will be deprecated soon. Use MonteCarloLimitStateProbability instead.") +class MonteCarloFailureProbability(): def __init__(self, incore_client): - super(MonteCarloFailureProbability, self).__init__(incore_client) - - def get_spec(self): - """Get specifications of the monte carlo failure probability analysis. - - Returns: - obj: A JSON object of specifications of the monte carlo failure probability analysis. - - """ - return { - 'name': 'monte-carlo-failure-probability', - 'description': 'calculate the probability of failure in monte-carlo simulation', - 'input_parameters': [ - { - 'id': 'result_name', - 'required': True, - 'description': 'basename of the result datasets. This analysis will create two outputs: failure ' - 'proability and failure state with the basename in the filename. ' - 'For example: "result_name = joplin_mcs_building" will result in ' - '"joplin_mcs_building_failure_state.csv" and ' - '"joplin_mcs_building_failure_probability.csv"', - 'type': str - }, - { - 'id': 'num_cpu', - 'required': False, - 'description': 'If using parallel execution, the number of cpus to request', - 'type': int - }, - { - 'id': 'num_samples', - 'required': True, - 'description': 'Number of MC samples', - 'type': int - }, - { - 'id': 'damage_interval_keys', - 'required': True, - 'description': 'Column name of the damage interval', - 'type': List[str] - }, - { - 'id': 'failure_state_keys', - 'required': True, - 'description': 'Column name of the damage interval that considered as damaged', - 'type': List[str] - }, - { - 'id': 'seed', - 'required': False, - 'description': 'Initial seed for the probabilistic model', - 'type': int - }, - ], - 'input_datasets': [ - { - 'id': 'damage', - 'required': True, - 'description': 'damage result that has damage intervals in it', - 'type': ['ergo:buildingDamageVer4', - 'ergo:buildingDamageVer5', - 'ergo:buildingDamageVer6', - 'ergo:nsBuildingInventoryDamage', - 'ergo:nsBuildingInventoryDamageVer2', - 'ergo:nsBuildingInventoryDamageVer3', - 'ergo:bridgeDamage', - 'ergo:bridgeDamageVer2', - 'ergo:bridgeDamageVer3', - 'ergo:waterFacilityDamageVer4', - 'ergo:waterFacilityDamageVer5', - 'ergo:waterFacilityDamageVer6', - 'ergo:roadDamage', - 'ergo:roadDamageVer2', - 'ergo:roadDamageVer3', - 'incore:epfDamage', - 'incore:epfDamageVer2', - 'incore:epfDamageVer3', - 'incore:pipelineDamage', - 'incore:pipelineDamageVer2', - 'incore:pipelineDamageVer3'] - }, - - ], - 'output_datasets': [ - { - 'id': 'failure_probability', - 'description': 'CSV file of failure probability', - 'type': 'incore:failureProbability' - }, - { - 'id': 'sample_failure_state', - 'description': 'CSV file of failure state for each sample', - 'type': 'incore:sampleFailureState' - }, - { - 'id': 'sample_damage_states', - 'description': 'CSV file of simulated damage states for each sample', - 'type': 'incore:sampleDamageState' - } - ] - } - - def run(self): - """Executes mc failure probability analysis.""" - - # read in file and parameters - damage = self.get_input_dataset("damage").get_csv_reader() - damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) - - # setting number of cpus to use - user_defined_cpu = 1 - if not self.get_parameter("num_cpu") is None and self.get_parameter( - "num_cpu") > 0: - user_defined_cpu = self.get_parameter("num_cpu") - - num_workers = AnalysisUtil.determine_parallelism_locally(self, - len( - damage_result), - user_defined_cpu) - - avg_bulk_input_size = int(len(damage_result) / num_workers) - inventory_args = [] - count = 0 - inventory_list = damage_result - - seed = self.get_parameter("seed") - seed_list = [] - if seed is not None: - while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) - seed_list.append([seed + i for i in range(count - 1, count + avg_bulk_input_size - 1)]) - count += avg_bulk_input_size - else: - while count < len(inventory_list): - inventory_args.append( - inventory_list[count:count + avg_bulk_input_size]) - seed_list.append([None for i in range(count - 1, count + avg_bulk_input_size - 1)]) - count += avg_bulk_input_size - - fs_results, fp_results, samples_results = self.monte_carlo_failure_probability_concurrent_future( - self.monte_carlo_failure_probability_bulk_input, num_workers, - inventory_args, seed_list) - self.set_result_csv_data("sample_failure_state", - fs_results, name=self.get_parameter("result_name") + "_failure_state") - self.set_result_csv_data("failure_probability", - fp_results, name=self.get_parameter("result_name") + "_failure_probability") - self.set_result_csv_data("sample_damage_states", - samples_results, name=self.get_parameter("result_name") + "_sample_damage_states") - return True - - def monte_carlo_failure_probability_concurrent_future(self, function_name, - parallelism, *args): - """Utilizes concurrent.future module. - - Args: - function_name (function): The function to be parallelized. - parallelism (int): Number of workers in parallelization. - *args: All the arguments in order to pass into parameter function_name. - - Returns: - list: A list of dictionary with id/guid and failure state for N samples. - list: A list dictionary with failure probability and other data/metadata. + self._delegate = MonteCarloLimitStateProbability(incore_client) + def __getattr__(self, name): """ - fs_output = [] - fp_output = [] - samples_output = [] - with concurrent.futures.ProcessPoolExecutor( - max_workers=parallelism) as executor: - for fs_ret, fp_ret, samples_ret in executor.map(function_name, *args): - fs_output.extend(fs_ret) - fp_output.extend(fp_ret) - samples_output.extend(samples_ret) - - return fs_output, fp_output, samples_output - - def monte_carlo_failure_probability_bulk_input(self, damage, seed_list): - """Run analysis for monte carlo failure probability calculation - - Args: - damage (obj): An output of building/bridge/waterfacility/epn damage that has damage interval. - seed_list (list): Random number generator seed per building for reproducibility. - - Returns: - fs_results (list): A list of dictionary with id/guid and failure state for N samples - fp_results (list): A list dictionary with failure probability and other data/metadata. - - """ - damage_interval_keys = self.get_parameter("damage_interval_keys") - failure_state_keys = self.get_parameter("failure_state_keys") - num_samples = self.get_parameter("num_samples") - - fs_result = [] - fp_result = [] - samples_output = [] - - i = 0 - for dmg in damage: - fs, fp, samples_result = self.monte_carlo_failure_probability(dmg, damage_interval_keys, failure_state_keys, - num_samples, seed_list[i]) - fs_result.append(fs) - fp_result.append(fp) - samples_output.append(samples_result) - i += 1 - - return fs_result, fp_result, samples_output - - def monte_carlo_failure_probability(self, dmg, damage_interval_keys, - failure_state_keys, num_samples, seed): - """Calculates building damage results for a single building. - - Args: - dmg (obj): Damage analysis output for a single entry. - damage_interval_keys (list): A list of the name of the damage intervals. - failure_state_keys (list): A list of the name of the damage state that is considered as failed. - num_samples (int): Number of samples for mc simulation. - seed (int): Random number generator seed for reproducibility. - - Returns: - dict: A dictionary with id/guid and failure state for N samples - dict: A dictionary with failure probability and other data/metadata. - dict: A dictionary with id/guid and damage states for N samples - - """ - # failure state - fs_result = collections.OrderedDict() - - # sample damage states - samples_result = collections.OrderedDict() - - # copying guid/id column to the sample damage failure table - if 'guid' in dmg.keys(): - fs_result['guid'] = dmg['guid'] - samples_result['guid'] = dmg['guid'] - - elif 'id' in dmg.keys(): - fs_result['id'] = dmg['id'] - samples_result['id'] = dmg['id'] - else: - fs_result['id'] = 'NA' - samples_result['id'] = 'NA' - - # failure probability - fp_result = collections.OrderedDict() - fp_result['guid'] = dmg['guid'] - - ds_sample = self.sample_damage_interval(dmg, damage_interval_keys, - num_samples, seed) - func, fp = self.calc_probability_failure_value(ds_sample, failure_state_keys) - - fs_result['failure'] = ",".join(func.values()) - fp_result['failure_probability'] = fp - samples_result['sample_damage_states'] = ','.join(ds_sample.values()) - - return fs_result, fp_result, samples_result - - def sample_damage_interval(self, dmg, damage_interval_keys, num_samples, seed): - """ - Dylan Sanderson code to calculate the Monte Carlo simulations of damage state. - - Args: - dmg (dict): Damage results that contains dmg interval values. - damage_interval_keys (list): Keys of the damage states. - num_samples (int): Number of simulation. - seed (int): Random number generator seed for reproducibility. - - Returns: - dict: A dictionary of damage states. - - """ - ds = {} - random_generator = np.random.RandomState(seed) - for i in range(num_samples): - # each sample should have a unique seed - rnd_num = random_generator.uniform(0, 1) - prob_val = 0 - flag = True - for ds_name in damage_interval_keys: - - if rnd_num < prob_val + AnalysisUtil.float_to_decimal(dmg[ds_name]): - ds['sample_{}'.format(i)] = ds_name - flag = False - break - else: - prob_val += AnalysisUtil.float_to_decimal(dmg[ds_name]) - if flag: - print("cannot determine MC damage state!") - break - - return ds - - def calc_probability_failure_value(self, ds_sample, failure_state_keys): - """ - Lisa Wang's approach to calculate a single value of failure probability. - - Args: - ds_sample (dict): A dictionary of damage states. - failure_state_keys (list): Damage state keys that considered as failure. - - Returns: - float: Failure state on each sample 0 (failed), 1 (not failed). - float: Failure probability (0 - 1). - + Delegate attribute access to the MonteCarloLimitStateProbability instance. """ - count = 0 - func = {} - for sample, state in ds_sample.items(): - if state in failure_state_keys: - func[sample] = "0" - count += 1 - else: - func[sample] = "1" - if len(ds_sample): - return func, count / len(ds_sample) - else: - return func, np.nan + return getattr(self._delegate, name) diff --git a/pyincore/analyses/montecarlolimitstateprobability/__init__.py b/pyincore/analyses/montecarlolimitstateprobability/__init__.py new file mode 100644 index 000000000..a358ba14c --- /dev/null +++ b/pyincore/analyses/montecarlolimitstateprobability/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.montecarlolimitstateprobability.montecarlolimitstateprobability import \ + MonteCarloLimitStateProbability diff --git a/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py new file mode 100644 index 000000000..9bc219544 --- /dev/null +++ b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py @@ -0,0 +1,339 @@ +# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import collections +import concurrent.futures +import numpy as np + +from typing import List +from pyincore import BaseAnalysis, AnalysisUtil + + +class MonteCarloLimitStateProbability(BaseAnalysis): + """ + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + super(MonteCarloLimitStateProbability, self).__init__(incore_client) + + def get_spec(self): + """Get specifications of the monte carlo limit state probability analysis. + + Returns: + obj: A JSON object of specifications of the monte carlo limit state probability analysis. + + """ + return { + 'name': 'monte-carlo-limit-state-probability', + 'description': 'calculate the probability of limit state in monte-carlo simulation', + 'input_parameters': [ + { + 'id': 'result_name', + 'required': True, + 'description': 'basename of the result datasets. This analysis will create two outputs: failure ' + 'proability and failure state with the basename in the filename. ' + 'For example: "result_name = joplin_mcs_building" will result in ' + '"joplin_mcs_building_failure_state.csv" and ' + '"joplin_mcs_building_failure_probability.csv"', + 'type': str + }, + { + 'id': 'num_cpu', + 'required': False, + 'description': 'If using parallel execution, the number of cpus to request', + 'type': int + }, + { + 'id': 'num_samples', + 'required': True, + 'description': 'Number of MC samples', + 'type': int + }, + { + 'id': 'damage_interval_keys', + 'required': True, + 'description': 'Column name of the damage interval', + 'type': List[str] + }, + { + 'id': 'failure_state_keys', + 'required': True, + 'description': 'Column name of the damage interval that considered as damaged', + 'type': List[str] + }, + { + 'id': 'seed', + 'required': False, + 'description': 'Initial seed for the probabilistic model', + 'type': int + }, + ], + 'input_datasets': [ + { + 'id': 'damage', + 'required': True, + 'description': 'damage result that has damage intervals in it', + 'type': ['ergo:buildingDamageVer4', + 'ergo:buildingDamageVer5', + 'ergo:buildingDamageVer6', + 'ergo:nsBuildingInventoryDamage', + 'ergo:nsBuildingInventoryDamageVer2', + 'ergo:nsBuildingInventoryDamageVer3', + 'ergo:bridgeDamage', + 'ergo:bridgeDamageVer2', + 'ergo:bridgeDamageVer3', + 'ergo:waterFacilityDamageVer4', + 'ergo:waterFacilityDamageVer5', + 'ergo:waterFacilityDamageVer6', + 'ergo:roadDamage', + 'ergo:roadDamageVer2', + 'ergo:roadDamageVer3', + 'incore:epfDamage', + 'incore:epfDamageVer2', + 'incore:epfDamageVer3', + 'incore:pipelineDamage', + 'incore:pipelineDamageVer2', + 'incore:pipelineDamageVer3'] + }, + + ], + 'output_datasets': [ + { + 'id': 'failure_probability', + 'description': 'CSV file of failure probability', + 'type': 'incore:failureProbability' + }, + { + 'id': 'sample_failure_state', + 'description': 'CSV file of failure state for each sample', + 'type': 'incore:sampleFailureState' + }, + { + 'id': 'sample_damage_states', + 'description': 'CSV file of simulated damage states for each sample', + 'type': 'incore:sampleDamageState' + } + ] + } + + def run(self): + """Executes mc limit state probability analysis.""" + + # read in file and parameters + damage = self.get_input_dataset("damage").get_csv_reader() + damage_result = AnalysisUtil.get_csv_table_rows(damage, ignore_first_row=False) + + # setting number of cpus to use + user_defined_cpu = 1 + if not self.get_parameter("num_cpu") is None and self.get_parameter( + "num_cpu") > 0: + user_defined_cpu = self.get_parameter("num_cpu") + + num_workers = AnalysisUtil.determine_parallelism_locally(self, + len( + damage_result), + user_defined_cpu) + + avg_bulk_input_size = int(len(damage_result) / num_workers) + inventory_args = [] + count = 0 + inventory_list = damage_result + + seed = self.get_parameter("seed") + seed_list = [] + if seed is not None: + while count < len(inventory_list): + inventory_args.append( + inventory_list[count:count + avg_bulk_input_size]) + seed_list.append([seed + i for i in range(count - 1, count + avg_bulk_input_size - 1)]) + count += avg_bulk_input_size + else: + while count < len(inventory_list): + inventory_args.append( + inventory_list[count:count + avg_bulk_input_size]) + seed_list.append([None for i in range(count - 1, count + avg_bulk_input_size - 1)]) + count += avg_bulk_input_size + + fs_results, fp_results, samples_results = self.monte_carlo_failure_probability_concurrent_future( + self.monte_carlo_failure_probability_bulk_input, num_workers, + inventory_args, seed_list) + self.set_result_csv_data("sample_failure_state", + fs_results, name=self.get_parameter("result_name") + "_failure_state") + self.set_result_csv_data("failure_probability", + fp_results, name=self.get_parameter("result_name") + "_failure_probability") + self.set_result_csv_data("sample_damage_states", + samples_results, name=self.get_parameter("result_name") + "_sample_damage_states") + return True + + def monte_carlo_failure_probability_concurrent_future(self, function_name, + parallelism, *args): + """Utilizes concurrent.future module. + + Args: + function_name (function): The function to be parallelized. + parallelism (int): Number of workers in parallelization. + *args: All the arguments in order to pass into parameter function_name. + + Returns: + list: A list of dictionary with id/guid and failure state for N samples. + list: A list dictionary with failure probability and other data/metadata. + + """ + fs_output = [] + fp_output = [] + samples_output = [] + with concurrent.futures.ProcessPoolExecutor( + max_workers=parallelism) as executor: + for fs_ret, fp_ret, samples_ret in executor.map(function_name, *args): + fs_output.extend(fs_ret) + fp_output.extend(fp_ret) + samples_output.extend(samples_ret) + + return fs_output, fp_output, samples_output + + def monte_carlo_failure_probability_bulk_input(self, damage, seed_list): + """Run analysis for monte carlo failure probability calculation + + Args: + damage (obj): An output of building/bridge/waterfacility/epn damage that has damage interval. + seed_list (list): Random number generator seed per building for reproducibility. + + Returns: + fs_results (list): A list of dictionary with id/guid and failure state for N samples + fp_results (list): A list dictionary with failure probability and other data/metadata. + + """ + damage_interval_keys = self.get_parameter("damage_interval_keys") + failure_state_keys = self.get_parameter("failure_state_keys") + num_samples = self.get_parameter("num_samples") + + fs_result = [] + fp_result = [] + samples_output = [] + + i = 0 + for dmg in damage: + fs, fp, samples_result = self.monte_carlo_failure_probability(dmg, damage_interval_keys, failure_state_keys, + num_samples, seed_list[i]) + fs_result.append(fs) + fp_result.append(fp) + samples_output.append(samples_result) + i += 1 + + return fs_result, fp_result, samples_output + + def monte_carlo_failure_probability(self, dmg, damage_interval_keys, + failure_state_keys, num_samples, seed): + """Calculates building damage results for a single building. + + Args: + dmg (obj): Damage analysis output for a single entry. + damage_interval_keys (list): A list of the name of the damage intervals. + failure_state_keys (list): A list of the name of the damage state that is considered as failed. + num_samples (int): Number of samples for mc simulation. + seed (int): Random number generator seed for reproducibility. + + Returns: + dict: A dictionary with id/guid and failure state for N samples + dict: A dictionary with failure probability and other data/metadata. + dict: A dictionary with id/guid and damage states for N samples + + """ + # failure state + fs_result = collections.OrderedDict() + + # sample damage states + samples_result = collections.OrderedDict() + + # copying guid/id column to the sample damage failure table + if 'guid' in dmg.keys(): + fs_result['guid'] = dmg['guid'] + samples_result['guid'] = dmg['guid'] + + elif 'id' in dmg.keys(): + fs_result['id'] = dmg['id'] + samples_result['id'] = dmg['id'] + else: + fs_result['id'] = 'NA' + samples_result['id'] = 'NA' + + # failure probability + fp_result = collections.OrderedDict() + fp_result['guid'] = dmg['guid'] + + ds_sample = self.sample_damage_interval(dmg, damage_interval_keys, + num_samples, seed) + func, fp = self.calc_probability_failure_value(ds_sample, failure_state_keys) + + fs_result['failure'] = ",".join(func.values()) + fp_result['failure_probability'] = fp + samples_result['sample_damage_states'] = ','.join(ds_sample.values()) + + return fs_result, fp_result, samples_result + + def sample_damage_interval(self, dmg, damage_interval_keys, num_samples, seed): + """ + Dylan Sanderson code to calculate the Monte Carlo simulations of damage state. + + Args: + dmg (dict): Damage results that contains dmg interval values. + damage_interval_keys (list): Keys of the damage states. + num_samples (int): Number of simulation. + seed (int): Random number generator seed for reproducibility. + + Returns: + dict: A dictionary of damage states. + + """ + ds = {} + random_generator = np.random.RandomState(seed) + for i in range(num_samples): + # each sample should have a unique seed + rnd_num = random_generator.uniform(0, 1) + prob_val = 0 + flag = True + for ds_name in damage_interval_keys: + + if rnd_num < prob_val + AnalysisUtil.float_to_decimal(dmg[ds_name]): + ds['sample_{}'.format(i)] = ds_name + flag = False + break + else: + prob_val += AnalysisUtil.float_to_decimal(dmg[ds_name]) + if flag: + print("cannot determine MC damage state!") + break + + return ds + + def calc_probability_failure_value(self, ds_sample, failure_state_keys): + """ + Lisa Wang's approach to calculate a single value of failure probability. + + Args: + ds_sample (dict): A dictionary of damage states. + failure_state_keys (list): Damage state keys that considered as failure. + + Returns: + float: Failure state on each sample 0 (failed), 1 (not failed). + float: Failure probability (0 - 1). + + """ + count = 0 + func = {} + for sample, state in ds_sample.items(): + if state in failure_state_keys: + func[sample] = "0" + count += 1 + else: + func[sample] = "1" + if len(ds_sample): + return func, count / len(ds_sample) + else: + return func, np.nan diff --git a/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py b/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py new file mode 100644 index 000000000..9828ce8c3 --- /dev/null +++ b/tests/pyincore/analyses/montecarlolimitstateprobability/test_montecarlolimitstateprobability.py @@ -0,0 +1,30 @@ +from pyincore.client import IncoreClient +from pyincore.analyses.montecarlolimitstateprobability import \ + MonteCarloLimitStateProbability +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + mc = MonteCarloLimitStateProbability(client) + + # # example of using local dataset + # damage_dataset = Dataset.from_file("memphis_bldg_dmg_result.csv", + # "ergo:buildingDamageVer4") + # mc.set_input_dataset("damage", damage_dataset) + mc.load_remote_input_dataset("damage", "602d96e4b1db9c28aeeebdce") + mc.set_parameter("result_name", "building_damage") + mc.set_parameter("num_cpu", 8) + mc.set_parameter("num_samples", 10) + mc.set_parameter("damage_interval_keys", + ["DS_0", "DS_1", "DS_2", "DS_3"]) + mc.set_parameter("failure_state_keys", ["DS_1", "DS_2", "DS_3"]) + + # optional parameter + mc.set_parameter("seed", 2) + + mc.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() From 35261dfc70ecd35c67f1ed4d6232353fce101d89 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Thu, 6 Jun 2024 16:28:45 -0500 Subject: [PATCH 16/17] release 1.19.0 --- CHANGELOG.md | 2 +- docs/source/conf.py | 6 +++--- pyincore/globals.py | 2 +- setup.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6db3e6b4e..e48d19cd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] +## [1.19.0] - 2024-06-12 ### Changed - Rename Social Vulnerability Analysis to Social Vulnerability Index Analysis [#556](https://github.com/IN-CORE/pyincore/issues/556) diff --git a/docs/source/conf.py b/docs/source/conf.py index 5cd8c1f43..579ad834a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,9 +33,9 @@ author = '' # The short X.Y version -version = '1.18' +version = '1.19' # The full version, including alpha/beta/rc tags -release = '1.18.1' +release = '1.19.0' # -- General configuration --------------------------------------------------- @@ -237,4 +237,4 @@ # -- Options for intersphinx extension --------------------------------------- # Example configuration for inter sphinx: refer to the Python standard library. -# intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} \ No newline at end of file +# intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} diff --git a/pyincore/globals.py b/pyincore/globals.py index 6375b8810..4836f757a 100644 --- a/pyincore/globals.py +++ b/pyincore/globals.py @@ -10,7 +10,7 @@ import os import shutil -PACKAGE_VERSION = "1.18.1" +PACKAGE_VERSION = "1.19.0" INCORE_API_PROD_URL = "https://incore.ncsa.illinois.edu" INCORE_API_DEV_URL = "https://incore-dev.ncsa.illinois.edu" diff --git a/setup.py b/setup.py index 3ea2d73df..9d490aa50 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ from setuptools import setup, find_packages # version number of pyincore -version = '1.18.1' +version = '1.19.0' with open("README.rst", encoding="utf-8") as f: readme = f.read() From a41de0a356843110293f0241212effb2b966a517 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Tue, 11 Jun 2024 11:37:59 -0500 Subject: [PATCH 17/17] add missing types (#586) * add missing types * Update joplinempiricalbuildingrestoration.py removing trailing , * Update meandamage.py --- .../joplinempiricalbuildingrestoration.py | 5 ++++- pyincore/analyses/meandamage/meandamage.py | 4 +++- .../montecarlolimitstateprobability.py | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py b/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py index 6e9299c9c..fd835eac5 100755 --- a/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py +++ b/pyincore/analyses/joplinempiricalbuildingrestoration/joplinempiricalbuildingrestoration.py @@ -170,7 +170,10 @@ def get_spec(self): "ergo:buildingDamageVer6", "ergo:buildingInventory", "ergo:nsBuildingInventoryDamage", - "ergo:nsBuildingInventoryDamageVer2"] + "ergo:nsBuildingInventoryDamageVer2", + 'ergo:nsBuildingInventoryDamageVer3', + 'ergo:nsBuildingInventoryDamageVer4' + ] }, { "id": "building_functionality_level", diff --git a/pyincore/analyses/meandamage/meandamage.py b/pyincore/analyses/meandamage/meandamage.py index db46bfe7e..483011d88 100644 --- a/pyincore/analyses/meandamage/meandamage.py +++ b/pyincore/analyses/meandamage/meandamage.py @@ -61,12 +61,14 @@ def get_spec(self): 'ergo:nsBuildingInventoryDamage', 'ergo:nsBuildingInventoryDamageVer2', 'ergo:nsBuildingInventoryDamageVer3', + 'ergo:nsBuildingInventoryDamageVer4', 'ergo:bridgeDamage', 'ergo:bridgeDamageVer2', 'ergo:bridgeDamageVer3', 'ergo:roadDamage', 'ergo:roadDamageVer2', - 'ergo:roadDamageVer3'] + 'ergo:roadDamageVer3' + ] }, { 'id': 'dmg_ratios', diff --git a/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py index 9bc219544..1352b3a66 100644 --- a/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py +++ b/pyincore/analyses/montecarlolimitstateprobability/montecarlolimitstateprobability.py @@ -85,6 +85,7 @@ def get_spec(self): 'ergo:nsBuildingInventoryDamage', 'ergo:nsBuildingInventoryDamageVer2', 'ergo:nsBuildingInventoryDamageVer3', + 'ergo:nsBuildingInventoryDamageVer4', 'ergo:bridgeDamage', 'ergo:bridgeDamageVer2', 'ergo:bridgeDamageVer3',