From e084440c28bdbf0db6e8def5f0e8877e3cc6cf8f Mon Sep 17 00:00:00 2001 From: Lukas Jansen Date: Thu, 19 Dec 2024 16:36:25 +0100 Subject: [PATCH 1/5] add wiki table to complement number of vehicles; refactoring transport data script --- scripts/prepare_transport_data_input.py | 183 ++++++++++++------------ 1 file changed, 95 insertions(+), 88 deletions(-) diff --git a/scripts/prepare_transport_data_input.py b/scripts/prepare_transport_data_input.py index d932d1174..2ff1f5ed9 100644 --- a/scripts/prepare_transport_data_input.py +++ b/scripts/prepare_transport_data_input.py @@ -12,57 +12,76 @@ import pandas as pd from _helpers import BASE_DIR -# logger = logging.getLogger(__name__) +logger = logging.getLogger(__name__) +def _add_iso2_code_per_country_and_clean_data(df): + + cc = coco.CountryConverter() + df["country"] = cc.pandas_convert( + series=pd.Series(df["Country"]), to="ISO2", not_found="not found") + + df = df[df.country != "not found"] + + # Drop region names where country column contains list of countries + df = df[df.country.apply(lambda x: isinstance(x, str))] + + df = df.drop_duplicates(subset=["country"]) + + return df def download_number_of_vehicles(): """ - Downloads the Number of registered vehicles as .csv File. - - The following csv file was downloaded from the webpage - https://apps.who.int/gho/data/node.main.A995 - as a .csv file. + Downloads and returns the number of registered vehicles + as tabular data from WHO and Wikipedia. + + The csv data from the WHO website is imported + from 'https://apps.who.int/gho/data/node.main.A995'. + A few countries are missing in the WHO list (e.g. South Africa, Algeria). + Therefore, the number of vehicles per country table from Wikipedia + is also imported for completion (prio 2): + 'https://en.wikipedia.org/wiki/List_of_countries_and_territories_by_motor_vehicles_per_capita'. """ - fn = "https://apps.who.int/gho/athena/data/GHO/RS_194?filter=COUNTRY:*&ead=&x-sideaxis=COUNTRY;YEAR;DATASOURCE&x-topaxis=GHO&profile=crosstable&format=csv" - storage_options = {"User-Agent": "Mozilla/5.0"} - - # Read the 'Data' sheet directly from the csv file at the provided URL + + def _download_vehicles_data_from_gho(): + url = ("https://apps.who.int/gho/athena/data/GHO/RS_194?filter=COUNTRY:*&ead=&x-sideaxis=COUNTRY;YEAR;DATASOURCE&x-topaxis=GHO&profile=crosstable&format=csv") + storage_options = {"User-Agent": "Mozilla/5.0"} + df = pd.read_csv(url, storage_options=storage_options, encoding="utf8") + + df.rename(columns={"Countries, territories and areas": "Country", + "Number of registered vehicles": "number cars"}, + inplace=True) + + df["number cars"] = df["number cars"].str.replace(" ", "").replace("", np.nan) + + df = df.dropna(subset=["number cars"]) + + return df[["Country", "number cars"]] + + def _download_vehicles_data_from_wiki(): + url = ("https://en.wikipedia.org/wiki/List_of_countries_and_territories_by_motor_vehicles_per_capita") + df = pd.read_html(url)[0] + + df.rename(columns={"Location": "Country", "Vehicles": "number cars"}, + inplace=True) + + return df[["Country", "number cars"]] + try: - Nbr_vehicles_csv = pd.read_csv( - fn, storage_options=storage_options, encoding="utf8" - ) - print("File read successfully.") + nbr_vehicles = pd.concat([_download_vehicles_data_from_gho(), + _download_vehicles_data_from_wiki()], + ignore_index=True) + + nbr_vehicles["number cars"] = nbr_vehicles["number cars"].astype(int) + + nbr_vehicles = _add_iso2_code_per_country_and_clean_data(nbr_vehicles) + + return nbr_vehicles + except Exception as e: - print("Failed to read the file:", e) + logger.warning("Failed to read the file:", e, + "\nReturning an empty df and falling back on the hard-coded data.") return pd.DataFrame() - Nbr_vehicles_csv = Nbr_vehicles_csv.rename( - columns={ - "Countries, territories and areas": "Country", - "Number of registered vehicles": "number cars", - } - ) - - # Add ISO2 country code for each country - cc = coco.CountryConverter() - Country = pd.Series(Nbr_vehicles_csv["Country"]) - Nbr_vehicles_csv["country"] = cc.pandas_convert( - series=Country, to="ISO2", not_found="not found" - ) - - # # Remove spaces, Replace empty values with NaN - Nbr_vehicles_csv["number cars"] = ( - Nbr_vehicles_csv["number cars"].str.replace(" ", "").replace("", np.nan) - ) - - # Drop rows with NaN values in 'number cars' - Nbr_vehicles_csv = Nbr_vehicles_csv.dropna(subset=["number cars"]) - - # convert the 'number cars' to integer - Nbr_vehicles_csv["number cars"] = Nbr_vehicles_csv["number cars"].astype(int) - - return Nbr_vehicles_csv - def download_CO2_emissions(): """ @@ -71,40 +90,41 @@ def download_CO2_emissions(): The dataset is downloaded from the following link: https://data.worldbank.org/indicator/EN.CO2.TRAN.ZS?view=map It is until the year 2014. # TODO: Maybe search for more recent years. """ - url = ( - "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" - ) - - # Read the 'Data' sheet directly from the Excel file at the provided URL + try: + url = ( + "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" + ) + CO2_emissions = pd.read_excel(url, sheet_name="Data", skiprows=[0, 1, 2]) - print("File read successfully.") - except Exception as e: - print("Failed to read the file:", e) - return pd.DataFrame() + + CO2_emissions = CO2_emissions[ + ["Country Name", "Country Code", "Indicator Name", "2014"] + ] - CO2_emissions = CO2_emissions[ - ["Country Name", "Country Code", "Indicator Name", "2014"] - ] + # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) + CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 + + CO2_emissions = CO2_emissions.dropna(subset=["average fuel efficiency"]) - # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) - CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 + CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) - # Add ISO2 country code for each country - CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) - cc = coco.CountryConverter() - CO2_emissions.loc[:, "country"] = cc.pandas_convert( - series=CO2_emissions["Country"], to="ISO2", not_found="not found" - ) + CO2_emissions = _add_iso2_code_per_country_and_clean_data(CO2_emissions) + + CO2_emissions["average fuel efficiency"] = CO2_emissions[ + "average fuel efficiency" + ].astype(float) - # Drop region names that have no ISO2: - CO2_emissions = CO2_emissions[CO2_emissions.country != "not found"] + CO2_emissions.loc[:, "average fuel efficiency"] = CO2_emissions[ + "average fuel efficiency" + ].round(3) + + return CO2_emissions[['country', 'average fuel efficiency']] + + except Exception as e: + logger.warning("Failed to read the file:", e) + return pd.DataFrame() - # Drop region names where country column contains list of countries - CO2_emissions = CO2_emissions[ - CO2_emissions.country.apply(lambda x: isinstance(x, str)) - ] - return CO2_emissions if __name__ == "__main__": @@ -120,35 +140,22 @@ def download_CO2_emissions(): # store_path_data = Path.joinpath(Path().cwd(), "data") # country_list = country_list_to_geofk(snakemake.config["countries"])' - # Downloaded and prepare vehicles_csv: - vehicles_csv = download_number_of_vehicles().copy() + # Downloaded and prepare vehicles data: + vehicles_df = download_number_of_vehicles().copy() - # Downloaded and prepare CO2_emissions_csv: - CO2_emissions_csv = download_CO2_emissions().copy() + # Downloaded and prepare CO2_emissions data: + CO2_emissions_df = download_CO2_emissions().copy() - if vehicles_csv.empty or CO2_emissions_csv.empty: + if vehicles_df.empty or CO2_emissions_df.empty: # In case one of the urls is not working, we can use the hard-coded data src = BASE_DIR + "/data/temp_hard_coded/transport_data.csv" dest = snakemake.output.transport_data_input shutil.copy(src, dest) else: # Join the DataFrames by the 'country' column - merged_df = pd.merge(vehicles_csv, CO2_emissions_csv, on="country") + merged_df = pd.merge(vehicles_df, CO2_emissions_df, on="country") merged_df = merged_df[["country", "number cars", "average fuel efficiency"]] - # Drop rows with NaN values in 'average fuel efficiency' - merged_df = merged_df.dropna(subset=["average fuel efficiency"]) - - # Convert the 'average fuel efficiency' to float - merged_df["average fuel efficiency"] = merged_df[ - "average fuel efficiency" - ].astype(float) - - # Round the 'average fuel efficiency' to three decimal places - merged_df.loc[:, "average fuel efficiency"] = merged_df[ - "average fuel efficiency" - ].round(3) - # Save the merged DataFrame to a CSV file merged_df.to_csv( snakemake.output.transport_data_input, From 74d6732f1fce464311555f7eebea2403ba11c8bc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:33:57 +0000 Subject: [PATCH 2/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/prepare_transport_data_input.py | 101 +++++++++++++----------- 1 file changed, 55 insertions(+), 46 deletions(-) diff --git a/scripts/prepare_transport_data_input.py b/scripts/prepare_transport_data_input.py index 2ff1f5ed9..ae48f3acd 100644 --- a/scripts/prepare_transport_data_input.py +++ b/scripts/prepare_transport_data_input.py @@ -14,72 +14,84 @@ logger = logging.getLogger(__name__) + def _add_iso2_code_per_country_and_clean_data(df): - + cc = coco.CountryConverter() df["country"] = cc.pandas_convert( - series=pd.Series(df["Country"]), to="ISO2", not_found="not found") - + series=pd.Series(df["Country"]), to="ISO2", not_found="not found" + ) + df = df[df.country != "not found"] - + # Drop region names where country column contains list of countries df = df[df.country.apply(lambda x: isinstance(x, str))] - + df = df.drop_duplicates(subset=["country"]) - + return df + def download_number_of_vehicles(): """ - Downloads and returns the number of registered vehicles + Downloads and returns the number of registered vehicles as tabular data from WHO and Wikipedia. - - The csv data from the WHO website is imported - from 'https://apps.who.int/gho/data/node.main.A995'. - A few countries are missing in the WHO list (e.g. South Africa, Algeria). - Therefore, the number of vehicles per country table from Wikipedia + + The csv data from the WHO website is imported + from 'https://apps.who.int/gho/data/node.main.A995'. + A few countries are missing in the WHO list (e.g. South Africa, Algeria). + Therefore, the number of vehicles per country table from Wikipedia is also imported for completion (prio 2): 'https://en.wikipedia.org/wiki/List_of_countries_and_territories_by_motor_vehicles_per_capita'. """ - + def _download_vehicles_data_from_gho(): - url = ("https://apps.who.int/gho/athena/data/GHO/RS_194?filter=COUNTRY:*&ead=&x-sideaxis=COUNTRY;YEAR;DATASOURCE&x-topaxis=GHO&profile=crosstable&format=csv") + url = "https://apps.who.int/gho/athena/data/GHO/RS_194?filter=COUNTRY:*&ead=&x-sideaxis=COUNTRY;YEAR;DATASOURCE&x-topaxis=GHO&profile=crosstable&format=csv" storage_options = {"User-Agent": "Mozilla/5.0"} df = pd.read_csv(url, storage_options=storage_options, encoding="utf8") - - df.rename(columns={"Countries, territories and areas": "Country", - "Number of registered vehicles": "number cars"}, - inplace=True) - + + df.rename( + columns={ + "Countries, territories and areas": "Country", + "Number of registered vehicles": "number cars", + }, + inplace=True, + ) + df["number cars"] = df["number cars"].str.replace(" ", "").replace("", np.nan) - + df = df.dropna(subset=["number cars"]) - + return df[["Country", "number cars"]] def _download_vehicles_data_from_wiki(): - url = ("https://en.wikipedia.org/wiki/List_of_countries_and_territories_by_motor_vehicles_per_capita") + url = "https://en.wikipedia.org/wiki/List_of_countries_and_territories_by_motor_vehicles_per_capita" df = pd.read_html(url)[0] - - df.rename(columns={"Location": "Country", "Vehicles": "number cars"}, - inplace=True) - + + df.rename( + columns={"Location": "Country", "Vehicles": "number cars"}, inplace=True + ) + return df[["Country", "number cars"]] - + try: - nbr_vehicles = pd.concat([_download_vehicles_data_from_gho(), - _download_vehicles_data_from_wiki()], - ignore_index=True) - + nbr_vehicles = pd.concat( + [_download_vehicles_data_from_gho(), _download_vehicles_data_from_wiki()], + ignore_index=True, + ) + nbr_vehicles["number cars"] = nbr_vehicles["number cars"].astype(int) - + nbr_vehicles = _add_iso2_code_per_country_and_clean_data(nbr_vehicles) return nbr_vehicles - + except Exception as e: - logger.warning("Failed to read the file:", e, - "\nReturning an empty df and falling back on the hard-coded data.") + logger.warning( + "Failed to read the file:", + e, + "\nReturning an empty df and falling back on the hard-coded data.", + ) return pd.DataFrame() @@ -90,27 +102,25 @@ def download_CO2_emissions(): The dataset is downloaded from the following link: https://data.worldbank.org/indicator/EN.CO2.TRAN.ZS?view=map It is until the year 2014. # TODO: Maybe search for more recent years. """ - + try: - url = ( - "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" - ) - + url = "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" + CO2_emissions = pd.read_excel(url, sheet_name="Data", skiprows=[0, 1, 2]) - + CO2_emissions = CO2_emissions[ ["Country Name", "Country Code", "Indicator Name", "2014"] ] # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 - + CO2_emissions = CO2_emissions.dropna(subset=["average fuel efficiency"]) CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) CO2_emissions = _add_iso2_code_per_country_and_clean_data(CO2_emissions) - + CO2_emissions["average fuel efficiency"] = CO2_emissions[ "average fuel efficiency" ].astype(float) @@ -119,14 +129,13 @@ def download_CO2_emissions(): "average fuel efficiency" ].round(3) - return CO2_emissions[['country', 'average fuel efficiency']] - + return CO2_emissions[["country", "average fuel efficiency"]] + except Exception as e: logger.warning("Failed to read the file:", e) return pd.DataFrame() - if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake From b8746c0efe343abfeb616be2c3fb0252b1323f08 Mon Sep 17 00:00:00 2001 From: Lukas Jansen Date: Fri, 20 Dec 2024 13:32:32 +0100 Subject: [PATCH 3/5] add minor doc strings; move drop_duplicates and exception --- scripts/prepare_transport_data_input.py | 75 +++++++++++++------------ 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/scripts/prepare_transport_data_input.py b/scripts/prepare_transport_data_input.py index 2ff1f5ed9..073a2a146 100644 --- a/scripts/prepare_transport_data_input.py +++ b/scripts/prepare_transport_data_input.py @@ -15,6 +15,10 @@ logger = logging.getLogger(__name__) def _add_iso2_code_per_country_and_clean_data(df): + """ + Converts 'Country' names to ISO2 codes in a new 'country' column. + Cleans DataFrame by removing rows with invalid 'country' values. + """ cc = coco.CountryConverter() df["country"] = cc.pandas_convert( @@ -25,8 +29,6 @@ def _add_iso2_code_per_country_and_clean_data(df): # Drop region names where country column contains list of countries df = df[df.country.apply(lambda x: isinstance(x, str))] - df = df.drop_duplicates(subset=["country"]) - return df def download_number_of_vehicles(): @@ -70,18 +72,20 @@ def _download_vehicles_data_from_wiki(): nbr_vehicles = pd.concat([_download_vehicles_data_from_gho(), _download_vehicles_data_from_wiki()], ignore_index=True) - - nbr_vehicles["number cars"] = nbr_vehicles["number cars"].astype(int) - - nbr_vehicles = _add_iso2_code_per_country_and_clean_data(nbr_vehicles) - - return nbr_vehicles - except Exception as e: logger.warning("Failed to read the file:", e, "\nReturning an empty df and falling back on the hard-coded data.") return pd.DataFrame() + + nbr_vehicles["number cars"] = nbr_vehicles["number cars"].astype(int) + + nbr_vehicles = _add_iso2_code_per_country_and_clean_data(nbr_vehicles) + + # Drops duplicates and keeps WHO-GHO data in case of duplicates + nbr_vehicles = nbr_vehicles.drop_duplicates(subset=["country"],keep="first") + return nbr_vehicles + def download_CO2_emissions(): """ @@ -91,39 +95,40 @@ def download_CO2_emissions(): It is until the year 2014. # TODO: Maybe search for more recent years. """ + url = ( + "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" + ) + + # Read the 'Data' sheet directly from the Excel file at the provided URL try: - url = ( - "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" - ) - CO2_emissions = pd.read_excel(url, sheet_name="Data", skiprows=[0, 1, 2]) - - CO2_emissions = CO2_emissions[ - ["Country Name", "Country Code", "Indicator Name", "2014"] - ] - - # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) - CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 - - CO2_emissions = CO2_emissions.dropna(subset=["average fuel efficiency"]) + print("File read successfully.") + except Exception as e: + print("Failed to read the file:", e) + return pd.DataFrame() - CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) + CO2_emissions = CO2_emissions[ + ["Country Name", "Country Code", "Indicator Name", "2014"] + ] - CO2_emissions = _add_iso2_code_per_country_and_clean_data(CO2_emissions) - - CO2_emissions["average fuel efficiency"] = CO2_emissions[ - "average fuel efficiency" - ].astype(float) + # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) + CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 + + CO2_emissions = CO2_emissions.dropna(subset=["average fuel efficiency"]) - CO2_emissions.loc[:, "average fuel efficiency"] = CO2_emissions[ - "average fuel efficiency" - ].round(3) + CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) - return CO2_emissions[['country', 'average fuel efficiency']] + CO2_emissions = _add_iso2_code_per_country_and_clean_data(CO2_emissions) - except Exception as e: - logger.warning("Failed to read the file:", e) - return pd.DataFrame() + CO2_emissions["average fuel efficiency"] = CO2_emissions[ + "average fuel efficiency" + ].astype(float) + + CO2_emissions.loc[:, "average fuel efficiency"] = CO2_emissions[ + "average fuel efficiency" + ].round(3) + + return CO2_emissions[['country', 'average fuel efficiency']] From 4ef4556f0377d9284d9dff718d8da8b53f62089e Mon Sep 17 00:00:00 2001 From: Lukas Jansen Date: Fri, 20 Dec 2024 13:34:33 +0100 Subject: [PATCH 4/5] add PR #1244 to release_notes as advised --- doc/release_notes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 9d0ec07c7..85d7056ff 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -13,6 +13,7 @@ This part of documentation collects descriptive release notes to capture the mai **New Features and Major Changes** +* Add wikipedia source to transport_data `PR #1244 `__ **Minor Changes and bug-fixing** From ff419fae99fd41328890ee539ee74d72dc71e931 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 16:45:24 +0000 Subject: [PATCH 5/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/prepare_transport_data_input.py | 34 +++++++++++++------------ 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/scripts/prepare_transport_data_input.py b/scripts/prepare_transport_data_input.py index dd97234fb..de139694e 100644 --- a/scripts/prepare_transport_data_input.py +++ b/scripts/prepare_transport_data_input.py @@ -73,10 +73,7 @@ def _download_vehicles_data_from_wiki(): df = pd.read_html(url)[0] df.rename( - columns={ - "Location": "Country", - "Vehicles": "number cars"}, - inplace=True + columns={"Location": "Country", "Vehicles": "number cars"}, inplace=True ) return df[["Country", "number cars"]] @@ -84,21 +81,25 @@ def _download_vehicles_data_from_wiki(): try: nbr_vehicles = pd.concat( [_download_vehicles_data_from_gho(), _download_vehicles_data_from_wiki()], - ignore_index=True) + ignore_index=True, + ) except Exception as e: - logger.warning("Failed to read the file:", e, - "\nReturning an empty df and falling back on the hard-coded data.") + logger.warning( + "Failed to read the file:", + e, + "\nReturning an empty df and falling back on the hard-coded data.", + ) return pd.DataFrame() - + nbr_vehicles["number cars"] = nbr_vehicles["number cars"].astype(int) nbr_vehicles = _add_iso2_code_per_country_and_clean_data(nbr_vehicles) # Drops duplicates and keeps WHO-GHO data in case of duplicates - nbr_vehicles = nbr_vehicles.drop_duplicates(subset=["country"],keep="first") + nbr_vehicles = nbr_vehicles.drop_duplicates(subset=["country"], keep="first") return nbr_vehicles - + def download_CO2_emissions(): """ @@ -107,8 +108,10 @@ def download_CO2_emissions(): The dataset is downloaded from the following link: https://data.worldbank.org/indicator/EN.CO2.TRAN.ZS?view=map It is until the year 2014. # TODO: Maybe search for more recent years. """ - - url = "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" + + url = ( + "https://api.worldbank.org/v2/en/indicator/EN.CO2.TRAN.ZS?downloadformat=excel" + ) # Read the 'Data' sheet directly from the Excel file at the provided URL try: @@ -124,13 +127,13 @@ def download_CO2_emissions(): # Calculate efficiency based on CO2 emissions from transport (% of total fuel combustion) CO2_emissions["average fuel efficiency"] = (100 - CO2_emissions["2014"]) / 100 - + CO2_emissions = CO2_emissions.dropna(subset=["average fuel efficiency"]) CO2_emissions = CO2_emissions.rename(columns={"Country Name": "Country"}) CO2_emissions = _add_iso2_code_per_country_and_clean_data(CO2_emissions) - + CO2_emissions["average fuel efficiency"] = CO2_emissions[ "average fuel efficiency" ].astype(float) @@ -139,8 +142,7 @@ def download_CO2_emissions(): "average fuel efficiency" ].round(3) - return CO2_emissions[['country', 'average fuel efficiency']] - + return CO2_emissions[["country", "average fuel efficiency"]] if __name__ == "__main__":