diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a243a304b..8ab61e1e7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,8 +31,8 @@ jobs: env_file: envs/linux-pinned.yaml - os: macos env_file: envs/macos-pinned.yaml - # - os: windows - # env_file: envs/windows-pinned.yaml + - os: windows + env_file: envs/windows-pinned.yaml defaults: run: diff --git a/README.md b/README.md index c1f9e0ef3..d9325b303 100644 --- a/README.md +++ b/README.md @@ -121,10 +121,10 @@ There are multiple ways to get involved and learn more about our work: ## Running the model in previous versions -The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.4.1, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: +The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.6.0, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: ```bash -git checkout v0.4.1 +git checkout v0.6.0 ``` After checking out the tag, the model can be run as usual. Please make sure to install the required packages for the respective version. diff --git a/Snakefile b/Snakefile index 814a5d4b1..b54a82e7a 100644 --- a/Snakefile +++ b/Snakefile @@ -553,6 +553,7 @@ rule add_electricity: rule simplify_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], renewable=config["renewable"], geo_crs=config["crs"]["geo_crs"], cluster_options=config["cluster_options"], @@ -595,6 +596,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == True: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], @@ -680,6 +682,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == False: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], diff --git a/config.default.yaml b/config.default.yaml index e41820df9..56e03a56e 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -98,6 +98,7 @@ cluster_options: p_nom_max: sum p_nom_min: sum p_min_pu: mean + p_max_pu: weighted_average marginal_cost: mean committable: any ramp_limit_up: max diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 4843b1dfd..b28b545b5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -19,6 +19,10 @@ This part of documentation collects descriptive release notes to capture the mai * Add a function to calculate length-based efficiencies and apply it to the H2 pipelines. `PR #1192 `__ +* Support of Linopy for Power and Sector-Coupled Modelling and latest PyPSA version `PR #1172 `__ + +* Update workflow to geopandas >= 1.0 `PR #1276 `__ + **Minor Changes and bug-fixing** * Prevent computation of powerplantmatching if replace option is selected for custom_powerplants `PR #1281 `__ @@ -156,6 +160,7 @@ PyPSA-Earth 0.4.0 * Add an option to use csv format for custom demand imports. `PR #995 `__ + **Minor Changes and bug-fixing** * Minor bug-fixing to run the cluster wildcard min `PR #1019 `__ diff --git a/envs/environment.yaml b/envs/environment.yaml index 4c172b36e..c336868b1 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -12,7 +12,8 @@ dependencies: - pip - mamba # esp for windows build -- pypsa>=0.24, <0.25 +- pypsa>=0.25 +# - atlite>=0.2.4 # until https://github.com/PyPSA/atlite/issues/244 is not merged - dask # currently the packages are being installed with pip # need to move back to conda once the issues will be resolved @@ -28,13 +29,14 @@ dependencies: - memory_profiler - ruamel.yaml<=0.17.26 - pytables +- pyscipopt # added to compy with the quadratic objective requirement of the clustering script - lxml - numpy # starting from 1.3.5 numpoly requires numpy>2.0 which leads to issues - numpoly<=1.3.4 - pandas -- geopandas>=0.11.0, <=0.14.3 -- fiona!=1.8.22, <1.10.0 +- geopandas>=1 +- fiona>=1.10 - xarray>=2023.11.0, <2023.12.0 - netcdf4 - networkx @@ -49,6 +51,7 @@ dependencies: - pyogrio - numba - py7zr +- tsam>=1.1.0 # Keep in conda environment when calling ipython - ipython @@ -80,11 +83,9 @@ dependencies: # Default solver for tests (required for CI) - glpk -- ipopt - gurobi - pip: - git+https://github.com/davide-f/google-drive-downloader@master # google drive with fix for virus scan - - tsam>=1.1.0 - chaospy # lastest version only available on pip - fake_useragent diff --git a/envs/linux-pinned.yaml b/envs/linux-pinned.yaml index 53b2b8f48..d35941c07 100644 --- a/envs/linux-pinned.yaml +++ b/envs/linux-pinned.yaml @@ -92,6 +92,7 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 @@ -119,7 +120,7 @@ dependencies: - executing=2.1.0 - expat=2.6.4 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 - folium=0.19.2 - font-ttf-dejavu-sans-mono=2.37 @@ -140,8 +141,8 @@ dependencies: - gdk-pixbuf=2.42.12 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 @@ -350,6 +351,7 @@ dependencies: - metis=5.1.0 - minizip=4.0.7 - mistune=3.0.2 +- mpfr=4.2.1 - mpg123=1.32.9 - msgpack-python=1.1.0 - multipledispatch=0.6.0 @@ -432,13 +434,14 @@ dependencies: - pydoe2=1.3.0 - pygments=2.18.0 - pyogrio=0.10.0 -- pyomo=6.8.2 +- pyomo=6.6.1 - pyparsing=3.2.0 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -469,11 +472,11 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 - s2n=1.5.10 - scikit-learn=1.6.0 +- scip=9.2.0 - scipy=1.14.1 - seaborn=0.13.2 - seaborn-base=0.13.2 @@ -497,6 +500,7 @@ dependencies: - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -512,6 +516,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 diff --git a/envs/macos-pinned.yaml b/envs/macos-pinned.yaml index 38c73a2fc..c6d1be082 100644 --- a/envs/macos-pinned.yaml +++ b/envs/macos-pinned.yaml @@ -89,6 +89,7 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 @@ -114,7 +115,7 @@ dependencies: - exceptiongroup=1.2.2 - executing=2.1.0 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 - folium=0.19.2 - font-ttf-dejavu-sans-mono=2.37 @@ -135,8 +136,8 @@ dependencies: - gdk-pixbuf=2.42.12 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 @@ -243,6 +244,7 @@ dependencies: - libgoogle-cloud=2.32.0 - libgoogle-cloud-storage=2.32.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libjpeg-turbo=3.0.0 @@ -303,6 +305,7 @@ dependencies: - metis=5.1.0 - minizip=4.0.7 - mistune=3.0.2 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -382,11 +385,12 @@ dependencies: - pyobjc-core=10.3.2 - pyobjc-framework-cocoa=10.3.2 - pyogrio=0.10.0 -- pyomo=6.8.2 +- pyomo=6.6.1 - pyparsing=3.2.0 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -416,10 +420,10 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 - scikit-learn=1.6.0 +- scip=9.2.0 - scipy=1.14.1 - seaborn=0.13.2 - seaborn-base=0.13.2 @@ -442,6 +446,7 @@ dependencies: - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -456,6 +461,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 @@ -480,7 +486,6 @@ dependencies: - xarray=2023.11.0 - xerces-c=3.2.5 - xlrd=2.0.1 -- xorg-libxau=1.0.12 - xorg-libxdmcp=1.1.5 - xyzservices=2024.9.0 - yaml=0.2.5 diff --git a/envs/windows-pinned.yaml b/envs/windows-pinned.yaml index 4ac607945..c43b70e60 100644 --- a/envs/windows-pinned.yaml +++ b/envs/windows-pinned.yaml @@ -82,6 +82,7 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cpython=3.10.16 - cycler=0.12.1 - cytoolz=1.0.1 @@ -107,7 +108,7 @@ dependencies: - exceptiongroup=1.2.2 - executing=2.1.0 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 - folium=0.19.2 - font-ttf-dejavu-sans-mono=2.37 @@ -127,8 +128,8 @@ dependencies: - gdal=3.9.3 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 @@ -140,6 +141,7 @@ dependencies: - glib=2.82.2 - glib-tools=2.82.2 - glpk=5.0 +- gmp=6.3.0 - graphite2=1.3.13 - graphviz=12.0.0 - gst-plugins-base=1.24.7 @@ -200,6 +202,7 @@ dependencies: - libarrow-dataset=18.1.0 - libarrow-substrait=18.1.0 - libblas=3.9.0 +- libboost=1.86.0 - libbrotlicommon=1.1.0 - libbrotlidec=1.1.0 - libbrotlienc=1.1.0 @@ -233,6 +236,7 @@ dependencies: - libgoogle-cloud=2.32.0 - libgoogle-cloud-storage=2.32.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libintl-devel=0.22.5 @@ -290,6 +294,7 @@ dependencies: - mercantile=1.2.1 - minizip=4.0.7 - mistune=3.0.2 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -361,14 +366,15 @@ dependencies: - pydoe2=1.3.0 - pygments=2.18.0 - pyogrio=0.10.0 -- pyomo=6.8.2 +- pyomo=6.6.1 - pyparsing=3.2.0 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 - pyreadline3=3.5.4 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -400,10 +406,10 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 - scikit-learn=1.6.0 +- scip=9.2.0 - scipy=1.14.1 - seaborn=0.13.2 - seaborn-base=0.13.2 @@ -427,6 +433,7 @@ dependencies: - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -442,6 +449,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 @@ -497,5 +505,4 @@ dependencies: - googledrivedownloader==0.4 - highspy==1.9.0 - polars==1.17.1 - - tsam==2.3.6 prefix: C:\Miniconda\envs\pypsa-earth diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 869547b07..33d5c7d78 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -60,7 +60,8 @@ def check_config_version(config, fp_config=CONFIG_DEFAULT_PATH): f"The current version of 'config.yaml' doesn't match to the code version:\n\r" f" {current_config_version} provided, {actual_config_version} expected.\n\r" f"That can lead to weird errors during execution of the workflow.\n\r" - f"Please update 'config.yaml' according to 'config.default.yaml' ." + f"Please update 'config.yaml' according to 'config.default.yaml.'\n\r" + "If issues persist, consider to update the environment to the latest version." ) @@ -921,6 +922,16 @@ def get_last_commit_message(path): return last_commit_message +def update_config_dictionary( + config_dict, + parameter_key_to_fill="lines", + dict_to_use={"geometry": "first", "bounds": "first"}, +): + config_dict.setdefault(parameter_key_to_fill, {}) + config_dict[parameter_key_to_fill].update(dict_to_use) + return config_dict + + # PYPSA-EARTH-SEC def annuity(n, r): """ diff --git a/scripts/base_network.py b/scripts/base_network.py index 65d640d44..e11ff83c6 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -523,20 +523,6 @@ def base_network( result_type="reduce", ) n.import_components_from_dataframe(lines_ac, "Line") - # The columns which names starts with "bus" are mixed up with the third-bus specification - # when executing additional_linkports() - lines_dc.drop( - labels=[ - "bus0_lon", - "bus0_lat", - "bus1_lon", - "bus1_lat", - "bus_0_coors", - "bus_1_coors", - ], - axis=1, - inplace=True, - ) n.import_components_from_dataframe(lines_dc, "Link") n.import_components_from_dataframe(transformers, "Transformer") diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index e5b3cf3ca..f9f4775fb 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -141,7 +141,7 @@ def get_gadm_shape( # when duplicates, keep only the first entry join_geos = join_geos[~join_geos.index.duplicated()] - gadm_sel = gadm_shapes.loc[join_geos["index_right"].values] + gadm_sel = gadm_shapes.loc[join_geos[gadm_shapes.index.name].values] return gadm_sel.geometry.values, gadm_sel.index.values diff --git a/scripts/build_osm_network.py b/scripts/build_osm_network.py index b4e7e6f4c..de88be8e5 100644 --- a/scripts/build_osm_network.py +++ b/scripts/build_osm_network.py @@ -26,6 +26,27 @@ logger = create_logger(__name__) +# Keep only a predefined set of columns, as otherwise conflicts are possible +# e.g. the columns which names starts with "bus" are mixed up with +# the third-bus specification when executing additional_linkports() +LINES_COLUMNS = [ + "line_id", + "circuits", + "tag_type", + "voltage", + "bus0", + "bus1", + "length", + "underground", + "under_construction", + "tag_frequency", + "dc", + "country", + "geometry", + "bounds", +] + + def line_endings_to_bus_conversion(lines): # Assign to every line a start and end point @@ -554,21 +575,17 @@ def fix_overpassing_lines(lines, buses, distance_crs, tol=1): buffer_df = df_p.buffer(tol).to_frame() # Spatial join to find lines intersecting point buffers - joined = gpd.sjoin(df_l, buffer_df, how="inner", op="intersects") + joined = gpd.sjoin(df_l, buffer_df, how="inner", predicate="intersects") # group lines by their index group_lines = joined.groupby(level=0) # iterate over the groups, TODO: change to apply for i, group in group_lines: - line_id = df_l.loc[i, line_id_str] # pick the line id that represents the group line_geom = df_l.loc[i, "geometry"] - # number of points that intersect with the line - num_points = len(group) - # get the indices of the points that intersect with the line - points_indexes = group["index_right"].tolist() + points_indexes = group[buffer_df.index.name].tolist() # get the geometries of the points that intersect with the line all_points = df_p.loc[points_indexes, "geometry"] @@ -720,6 +737,7 @@ def built_network( countries_config, geo_crs, distance_crs, + lines_cols_standard, force_ac=False, ): logger.info("Stage 1/5: Read input data") @@ -784,6 +802,8 @@ def built_network( if not os.path.exists(outputs["lines"]): os.makedirs(os.path.dirname(outputs["lines"]), exist_ok=True) + lines = lines[lines_cols_standard] + to_csv_nafix(lines, outputs["lines"]) # Generate CSV to_csv_nafix(converters, outputs["converters"]) # Generate CSV to_csv_nafix(transformers, outputs["transformers"]) # Generate CSV @@ -819,5 +839,6 @@ def built_network( countries, geo_crs, distance_crs, + lines_cols_standard=LINES_COLUMNS, force_ac=force_ac, ) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 3e0e8bd5a..a4247e64b 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -22,6 +22,7 @@ BASE_DIR, configure_logging, create_logger, + save_to_geojson, three_2_two_digits_country, two_2_three_digits_country, two_digits_2_name_country, @@ -305,23 +306,6 @@ def country_cover(country_shapes, eez_shapes=None, out_logging=False, distance=0 return africa_shape -def save_to_geojson(df, fn): - if os.path.exists(fn): - os.unlink(fn) # remove file if it exists - if not isinstance(df, gpd.GeoDataFrame): - df = gpd.GeoDataFrame(dict(geometry=df)) - - # save file if the GeoDataFrame is non-empty - if df.shape[0] > 0: - df = df.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema) - else: - # create empty file to avoid issues with snakemake - with open(fn, "w") as fp: - pass - - def load_EEZ(countries_codes, geo_crs, EEZ_gpkg="./data/eez/eez_v11.gpkg"): """ Function to load the database of the Exclusive Economic Zones. diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index d98c69221..562b9cfa7 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -134,6 +134,7 @@ configure_logging, create_logger, get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs @@ -575,9 +576,10 @@ def clustering_for_n_clusters( extended_link_costs=0, focus_weights=None, ): - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) if not isinstance(custom_busmap, pd.Series): if alternative_clustering: @@ -603,12 +605,14 @@ def clustering_for_n_clusters( clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -630,14 +634,6 @@ def clustering_for_n_clusters( return clustering -def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - df = s.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema) - - def cluster_regions(busmaps, inputs, output): busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) @@ -703,9 +699,7 @@ def cluster_regions(busmaps, inputs, output): # Fast-path if no clustering is necessary busmap = n.buses.index.to_series() linemap = n.lines.index.to_series() - clustering = pypsa.clustering.spatial.Clustering( - n, busmap, linemap, linemap, pd.Series(dtype="O") - ) + clustering = pypsa.clustering.spatial.Clustering(n, busmap, linemap) elif len(n.buses) < n_clusters: logger.error( f"Desired number of clusters ({n_clusters}) higher than the number of buses ({len(n.buses)})" @@ -727,14 +721,27 @@ def consense(x): ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "tag_substation": "first", + "tag_area": "first", + "country": "first", + }, + ) + custom_busmap = False # snakemake.params.custom_busmap custom busmap is depreciated https://github.com/pypsa-meets-earth/pypsa-earth/pull/694 if custom_busmap: busmap = pd.read_csv( diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 92c3dd340..502cf1b9d 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -96,13 +96,12 @@ from _helpers import ( configure_logging, create_logger, - get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs from cluster_network import cluster_regions, clustering_for_n_clusters from pypsa.clustering.spatial import ( - aggregategenerators, aggregateoneport, busmap_by_stubs, get_clustering_from_busmap, @@ -276,11 +275,15 @@ def replace_components(n, c, df, pnl): _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) - _, generator_strategies = get_aggregation_strategies(aggregation_strategies) + generator_strategies = aggregation_strategies["generators"] carriers = set(n.generators.carrier) - set(exclude_carriers) - generators, generators_pnl = aggregategenerators( - n, busmap, carriers=carriers, custom_strategies=generator_strategies + generators, generators_pnl = aggregateoneport( + n, + busmap, + "Generator", + carriers=carriers, + custom_strategies=generator_strategies, ) replace_components(n, "Generator", generators, generators_pnl) @@ -588,19 +591,22 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): if not dist.empty: busmap.loc[buses_i] = dist.idxmin(1) - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) return clustering.network, busmap @@ -848,19 +854,22 @@ def merge_into_network(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -934,19 +943,22 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -976,14 +988,27 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): "exclude_carriers", [] ) hvdc_as_lines = snakemake.params.electricity["hvdc_as_lines"] - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "tag_substation": "first", + "tag_area": "first", + "country": "first", + }, + ) + n, trafo_map = simplify_network_to_base_voltage(n, linetype, base_voltage) Nyears = n.snapshot_weightings.objective.sum() / 8760 @@ -1088,7 +1113,7 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): solver_name, cluster_config.get("algorithm", "hac"), cluster_config.get("feature", None), - aggregation_strategies, + aggregation_strategies=aggregation_strategies, ) busmaps.append(cluster_map) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index cad5c8485..2541ae9b2 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -52,15 +52,15 @@ linear optimal power flow (plus investment planning) is provided in the `documentation of PyPSA `_. -The optimization is based on the ``pyomo=False`` setting in the :func:`network.lopf` and :func:`pypsa.linopf.ilopf` function. -Additionally, some extra constraints specified in :mod:`prepare_network` are added. +The optimization is based on the :func:`network.optimize` function. +Additionally, some extra constraints specified in :mod:`prepare_network` and :mod:`solve_network` are added. Solving the network in multiple iterations is motivated through the dependence of transmission line capacities and impedances on values of corresponding flows. As lines are expanded their electrical parameters change, which renders the optimisation bilinear even if the power flow equations are linearized. To retain the computational advantage of continuous linear programming, a sequential linear programming technique is used, where in between iterations the line impedances are updated. -Details (and errors made through this heuristic) are discussed in the paper +Details (and errors introduced through this heuristic) are discussed in the paper - Fabian Neumann and Tom Brown. `Heuristics for Transmission Expansion Planning in Low-Carbon Energy System Models `_), *16th International Conference on the European Energy Market*, 2019. `arXiv:1907.10548 `_. @@ -85,24 +85,18 @@ import numpy as np import pandas as pd import pypsa +import xarray as xr from _helpers import configure_logging, create_logger, override_component_attrs +from linopy import merge from pypsa.descriptors import get_switchable_as_dense as get_as_dense -from pypsa.linopf import ( - define_constraints, - define_variables, - get_var, - ilopf, - join_exprs, - linexpr, - network_lopf, -) -from pypsa.linopt import define_constraints, get_var, join_exprs, linexpr +from pypsa.optimization.abstract import optimize_transmission_expansion_iteratively +from pypsa.optimization.optimize import optimize logger = create_logger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) -def prepare_network(n, solve_opts): +def prepare_network(n, solve_opts, config): if "clip_p_max_pu" in solve_opts: for df in ( n.generators_t.p_max_pu, @@ -159,6 +153,25 @@ def prepare_network(n, solve_opts): def add_CCL_constraints(n, config): + """ + Add CCL (country & carrier limit) constraint to the network. + + Add minimum and maximum levels of generator nominal capacity per carrier + for individual countries. Opts and path for agg_p_nom_minmax.csv must be defined + in config.yaml. Default file is available at data/agg_p_nom_minmax.csv. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-CCL-24H] + electricity: + agg_p_nom_limits: data/agg_p_nom_minmax.csv + """ agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits") try: @@ -174,32 +187,57 @@ def add_CCL_constraints(n, config): ) gen_country = n.generators.bus.map(n.buses.country) - # cc means country and carrier - p_nom_per_cc = ( - pd.DataFrame( - { - "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))), - "country": gen_country, - "carrier": n.generators.carrier, - } + capacity_variable = n.model["Generator-p_nom"] + + lhs = [] + ext_carriers = n.generators.query("p_nom_extendable").carrier.unique() + for c in ext_carriers: + ext_carrier = n.generators.query("p_nom_extendable and carrier == @c") + country_grouper = ( + ext_carrier.bus.map(n.buses.country) + .rename_axis("Generator-ext") + .rename("country") ) - .dropna(subset=["p_nom"]) - .groupby(["country", "carrier"]) - .p_nom.apply(join_exprs) + ext_carrier_per_country = capacity_variable.loc[ + country_grouper.index + ].groupby_sum(country_grouper) + lhs.append(ext_carrier_per_country) + lhs = merge(lhs, dim=pd.Index(ext_carriers, name="carrier")) + + min_matrix = agg_p_nom_minmax["min"].to_xarray().unstack().reindex_like(lhs) + max_matrix = agg_p_nom_minmax["max"].to_xarray().unstack().reindex_like(lhs) + + n.model.add_constraints( + lhs >= min_matrix, name="agg_p_nom_min", mask=min_matrix.notnull() + ) + n.model.add_constraints( + lhs <= max_matrix, name="agg_p_nom_max", mask=max_matrix.notnull() ) - minimum = agg_p_nom_minmax["min"].dropna() - if not minimum.empty: - minconstraint = define_constraints( - n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min" - ) - maximum = agg_p_nom_minmax["max"].dropna() - if not maximum.empty: - maxconstraint = define_constraints( - n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max" - ) def add_EQ_constraints(n, o, scaling=1e-1): + """ + Add equity constraints to the network. + + Currently this is only implemented for the electricity sector only. + + Opts must be specified in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + o : str + + Example + ------- + scenario: + opts: [Co2L-EQ0.7-24h] + + Require each country or node to on average produce a minimal share + of its total electricity consumption itself. Example: EQ0.7c demands each country + to produce on average at least 70% of its consumption; EQ0.7 demands + each node to produce on average at least 70% of its consumption. + """ float_regex = "[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) if o[-1] == "c": @@ -220,99 +258,149 @@ def add_EQ_constraints(n, o, scaling=1e-1): ) inflow = inflow.reindex(load.index).fillna(0.0) rhs = scaling * (level * load - inflow) + dispatch_variable = n.model["Generator-p"] lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (dispatch_variable * (n.snapshot_weightings.generators * scaling)) + .groupby(ggrouper.to_xarray()) + .sum() + .sum("snapshot") ) - lhs_spill = ( - linexpr( - ( - -n.snapshot_weightings.stores * scaling, - get_var(n, "StorageUnit", "spill").T, - ) + # the current formulation implies that the available hydro power is (inflow - spillage) + # it implies efficiency_dispatch is 1 which is not quite general + # see https://github.com/pypsa-meets-earth/pypsa-earth/issues/1245 for possible improvements + if not n.storage_units_t.inflow.empty: + spillage_variable = n.model["StorageUnit-spill"] + lhs_spill = ( + (spillage_variable * (-n.snapshot_weightings.stores * scaling)) + .groupby_sum(sgrouper) + .groupby(sgrouper.to_xarray()) + .sum() + .sum("snapshot") ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") - lhs = lhs_gen + lhs_spill - define_constraints(n, lhs, ">=", rhs, "equity", "min") + lhs = lhs_gen + lhs_spill + else: + lhs = lhs_gen + n.model.add_constraints(lhs >= rhs, name="equity_min") def add_BAU_constraints(n, config): - ext_c = n.generators.query("p_nom_extendable").carrier.unique() - mincaps = pd.Series( - config["electricity"].get("BAU_mincapacities", {key: 0 for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps") - - maxcaps = pd.Series( - config["electricity"].get("BAU_maxcapacities", {key: np.inf for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, "<=", maxcaps[lhs.index], "Carrier", "bau_maxcaps") + """ + Add a per-carrier minimal overall capacity. + + BAU_mincapacities and opts must be adjusted in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-BAU-24h] + electricity: + BAU_mincapacities: + solar: 0 + onwind: 0 + OCGT: 100000 + offwind-ac: 0 + offwind-dc: 0 + Which sets minimum expansion across all nodes e.g. in Europe to 100GW. + OCGT bus 1 + OCGT bus 2 + ... > 100000 + """ + mincaps = pd.Series(config["electricity"]["BAU_mincapacities"]) + p_nom = n.model["Generator-p_nom"] + ext_i = n.generators.query("p_nom_extendable") + ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext")) + lhs = p_nom.groupby(ext_carrier_i).sum() + rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier") + n.model.add_constraints(lhs >= rhs, name="bau_mincaps") def add_SAFE_constraints(n, config): - peakdemand = ( - 1.0 + config["electricity"]["SAFE_reservemargin"] - ) * n.loads_t.p_set.sum(axis=1).max() - conv_techs = config["plotting"]["conv_techs"] + """ + Add a capacity reserve margin of a certain fraction above the peak demand. + Renewable generators and storage do not contribute. Ignores network. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + config.yaml requires to specify opts: + + scenario: + opts: [Co2L-SAFE-24h] + electricity: + SAFE_reservemargin: 0.1 + Which sets a reserve margin of 10% above the peak demand. + """ + peakdemand = n.loads_t.p_set.sum(axis=1).max() + margin = 1.0 + config["electricity"]["SAFE_reservemargin"] + reserve_margin = peakdemand * margin + conventional_carriers = config["electricity"]["conventional_carriers"] + ext_gens_i = n.generators.query( + "carrier in @conventional_carriers & p_nom_extendable" + ).index + capacity_variable = n.model["Generator-p_nom"] + p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] + lhs = p_nom.sum() exist_conv_caps = n.generators.query( - "~p_nom_extendable & carrier in @conv_techs" + "~p_nom_extendable & carrier in @conventional_carriers" ).p_nom.sum() - ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index - lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum() - rhs = peakdemand - exist_conv_caps - define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap") + rhs = reserve_margin - exist_conv_caps + n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap") -def add_operational_reserve_margin_constraint(n, config): +def add_operational_reserve_margin_constraint(n, sns, config): + """ + Build reserve margin constraints based on the formulation + as suggested in GenX + https://energy.mit.edu/wp-content/uploads/2017/10/Enhanced-Decision-Support-for-a-Changing-Electricity-Landscape.pdf + It implies that the reserve margin also accounts for optimal + dispatch of distributed energy resources (DERs) and demand response + which is a novel feature of GenX. + """ reserve_config = config["electricity"]["operational_reserve"] EPSILON_LOAD = reserve_config["epsilon_load"] EPSILON_VRES = reserve_config["epsilon_vres"] CONTINGENCY = reserve_config["contingency"] # Reserve Variables - reserve = get_var(n, "Generator", "r") - lhs = linexpr((1, reserve)).sum(1) + n.model.add_variables( + 0, np.inf, coords=[sns, n.generators.index], name="Generator-r" + ) + reserve = n.model["Generator-r"] + summed_reserve = reserve.sum("Generator") # Share of extendable renewable capacities ext_i = n.generators.query("p_nom_extendable").index vres_i = n.generators_t.p_max_pu.columns if not ext_i.empty and not vres_i.empty: capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)] - renewable_capacity_variables = get_var(n, "Generator", "p_nom")[ - vres_i.intersection(ext_i) - ] - lhs += linexpr( - (-EPSILON_VRES * capacity_factor, renewable_capacity_variables) - ).sum(1) + p_nom_vres = ( + n.model["Generator-p_nom"] + .loc[vres_i.intersection(ext_i)] + .rename({"Generator-ext": "Generator"}) + ) + lhs = summed_reserve + ( + p_nom_vres * (-EPSILON_VRES * xr.DataArray(capacity_factor)) + ).sum("Generator") - # Total demand at t - demand = n.loads_t.p.sum(1) + # Total demand per t + demand = get_as_dense(n, "Load", "p_set").sum(axis=1) # VRES potential of non extendable generators capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)] renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)] - potential = (capacity_factor * renewable_capacity).sum(1) + potential = (capacity_factor * renewable_capacity).sum(axis=1) # Right-hand-side rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY - define_constraints(n, lhs, ">=", rhs, "Reserve margin") + n.model.add_constraints(lhs >= rhs, name="reserve_margin") def update_capacity_constraint(n): @@ -320,171 +408,153 @@ def update_capacity_constraint(n): ext_i = n.generators.query("p_nom_extendable").index fix_i = n.generators.query("not p_nom_extendable").index - dispatch = get_var(n, "Generator", "p") - reserve = get_var(n, "Generator", "r") + dispatch = n.model["Generator-p"] + reserve = n.model["Generator-r"] capacity_fixed = n.generators.p_nom[fix_i] p_max_pu = get_as_dense(n, "Generator", "p_max_pu") - lhs = linexpr((1, dispatch), (1, reserve)) + lhs = dispatch + reserve + # TODO check if `p_max_pu[ext_i]` is safe for empty `ext_i` and drop if cause in case if not ext_i.empty: - capacity_variable = get_var(n, "Generator", "p_nom") - lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex( - columns=gen_i, fill_value="" + capacity_variable = n.model["Generator-p_nom"].rename( + {"Generator-ext": "Generator"} ) + lhs = dispatch + reserve - capacity_variable * xr.DataArray(p_max_pu[ext_i]) rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0) - define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint") + n.model.add_constraints(lhs <= rhs, name="gen_updated_capacity_constraint") def add_operational_reserve_margin(n, sns, config): """ - Build reserve margin constraints based on the formulation given in - https://genxproject.github.io/GenX/dev/core/#Reserves. + Parameters + ---------- + n : pypsa.Network + sns: pd.DatetimeIndex + config : dict + + Example: + -------- + config.yaml requires to specify operational_reserve: + operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves + activate: true + epsilon_load: 0.02 # percentage of load at each snapshot + epsilon_vres: 0.02 # percentage of VRES at each snapshot + contingency: 400000 # MW """ - define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index]) - - add_operational_reserve_margin_constraint(n, config) + add_operational_reserve_margin_constraint(n, sns, config) update_capacity_constraint(n) def add_battery_constraints(n): - nodes = n.buses.index[n.buses.carrier == "battery"] - if nodes.empty or ("Link", "p_nom") not in n.variables.index: + """ + Add constraint ensuring that charger = discharger, i.e. + 1 * charger_size - efficiency * discharger_size = 0 + """ + if not n.links.p_nom_extendable.any(): return - link_p_nom = get_var(n, "Link", "p_nom") - lhs = linexpr( - (1, link_p_nom[nodes + " charger"]), - ( - -n.links.loc[nodes + " discharger", "efficiency"].values, - link_p_nom[nodes + " discharger"].values, - ), + + discharger_bool = n.links.index.str.contains("battery discharger") + charger_bool = n.links.index.str.contains("battery charger") + + dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index + chargers_ext = n.links[charger_bool].query("p_nom_extendable").index + + eff = n.links.efficiency[dischargers_ext].values + lhs = ( + n.model["Link-p_nom"].loc[chargers_ext] + - n.model["Link-p_nom"].loc[dischargers_ext] * eff ) - define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") + n.model.add_constraints(lhs == 0, name="Link-charger_ratio") -def add_RES_constraints(n, res_share): - lgrouper = n.loads.bus.map(n.buses.country) - ggrouper = n.generators.bus.map(n.buses.country) - sgrouper = n.storage_units.bus.map(n.buses.country) - cgrouper = n.links.bus0.map(n.buses.country) + +def add_RES_constraints(n, res_share, config): + """ + The constraint ensures that a predefined share of power is generated + by renewable sources + + Parameters + ---------- + n : pypsa.Network + res_share: float + config : dict + """ logger.warning( - "The add_RES_constraints functionality is still work in progress. " + "The add_RES_constraints() is still work in progress. " "Unexpected results might be incurred, particularly if " "temporal clustering is applied or if an unexpected change of technologies " - "is subject to the obtimisation." + "is subject to future improvements." ) + renew_techs = config["electricity"]["renewable_carriers"] + + charger = ["H2 electrolysis", "battery charger"] + discharger = ["H2 fuel cell", "battery discharger"] + + ren_gen = n.generators.query("carrier in @renew_techs") + ren_stores = n.storage_units.query("carrier in @renew_techs") + ren_charger = n.links.query("carrier in @charger") + ren_discharger = n.links.query("carrier in @discharger") + + gens_i = ren_gen.index + stores_i = ren_stores.index + charger_i = ren_charger.index + discharger_i = ren_discharger.index + + stores_t_weights = n.snapshot_weightings.stores + + lgrouper = n.loads.bus.map(n.buses.country) + ggrouper = ren_gen.bus.map(n.buses.country) + sgrouper = ren_stores.bus.map(n.buses.country) + cgrouper = ren_charger.bus0.map(n.buses.country) + dgrouper = ren_discharger.bus0.map(n.buses.country) + load = ( n.snapshot_weightings.generators @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() ) - rhs = res_share * load - res_techs = [ - "solar", - "onwind", - "offwind-dc", - "offwind-ac", - "battery", - "hydro", - "ror", - ] - charger = ["H2 electrolysis", "battery charger"] - discharger = ["H2 fuel cell", "battery discharger"] - - gens_i = n.generators.query("carrier in @res_techs").index - stores_i = n.storage_units.query("carrier in @res_techs").index - charger_i = n.links.query("carrier in @charger").index - discharger_i = n.links.query("carrier in @discharger").index - # Generators lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators, get_var(n, "Generator", "p")[gens_i].T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (n.model["Generator-p"].loc[:, gens_i] * n.snapshot_weightings.generators) + .groupby(ggrouper.to_xarray()) + .sum() ) # StorageUnits - lhs_dispatch = ( - ( - linexpr( - ( - n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_dispatch")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_disp_expr = ( + n.model["StorageUnit-p_dispatch"].loc[:, stores_i] * stores_t_weights ) - - lhs_store = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_store")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_expr = n.model["StorageUnit-p_store"].loc[:, stores_i] * stores_t_weights + charge_expr = n.model["Link-p"].loc[:, charger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[charger_i].efficiency ) + discharge_expr = n.model["Link-p"].loc[:, discharger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[discharger_i].efficiency + ) + + lhs_dispatch = store_disp_expr.groupby(sgrouper).sum() + lhs_store = store_expr.groupby(sgrouper).sum() # Stores (or their resp. Link components) # Note that the variables "p0" and "p1" currently do not exist. # Thus, p0 and p1 must be derived from "p" (which exists), taking into account the link efficiency. - lhs_charge = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "Link", "p")[charger_i].T, - ) - ) - .T.groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_charge = charge_expr.groupby(cgrouper).sum() - lhs_discharge = ( - ( - linexpr( - ( - n.snapshot_weightings.stores.apply( - lambda r: r * n.links.loc[discharger_i].efficiency - ), - get_var(n, "Link", "p")[discharger_i], - ) - ) - .groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_discharge = discharge_expr.groupby(cgrouper).sum() - # signs of resp. terms are coded in the linexpr. - # todo: for links (lhs_charge and lhs_discharge), account for snapshot weightings - lhs = lhs_gen + lhs_dispatch + lhs_store + lhs_charge + lhs_discharge + lhs = lhs_gen + lhs_dispatch - lhs_store - lhs_charge + lhs_discharge - define_constraints(n, lhs, "=", rhs, "RES share") + n.model.add_constraints(lhs == rhs, name="res_share") def add_land_use_constraint(n): @@ -548,16 +618,21 @@ def _add_land_use_constraint_m(n): def add_h2_network_cap(n, cap): h2_network = n.links.loc[n.links.carrier == "H2 pipeline"] - if h2_network.index.empty or ("Link", "p_nom") not in n.variables.index: + if h2_network.index.empty: return - h2_network_cap = get_var(n, "Link", "p_nom") - subset_index = h2_network.index.intersection(h2_network_cap.index) - lhs = linexpr( - (h2_network.loc[subset_index, "length"], h2_network_cap[subset_index]) + h2_network_cap = n.model["Link-p_nom"] + h2_network_cap_index = h2_network_cap.indexes["Link-ext"] + subset_index = h2_network.index.intersection(h2_network_cap_index) + diff_index = h2_network_cap_index.difference(subset_index) + if len(diff_index) > 0: + logger.warning( + f"Impossible to set a limit for H2 pipelines extension for the following links: {diff_index}" + ) + lhs = ( + h2_network_cap.loc[subset_index] * h2_network.loc[subset_index, "length"] ).sum() - # lhs = linexpr((1, h2_network_cap[h2_network.index])).sum() rhs = cap * 1000 - define_constraints(n, lhs, "<=", rhs, "h2_network_cap") + n.model.add_constraints(lhs <= rhs, name="h2_network_cap") def H2_export_yearly_constraint(n): @@ -578,9 +653,10 @@ def H2_export_yearly_constraint(n): index=n.snapshots, columns=res_index, ) - res = join_exprs( - linexpr((weightings, get_var(n, "Generator", "p")[res_index])) - ) # single line sum + capacity_variable = n.model["Generator-p"] + + # single line sum + res = (weightings * capacity_variable.loc[res_index]).sum() load_ind = n.loads[n.loads.carrier == "AC"].index.intersection( n.loads_t.p_set.columns @@ -608,7 +684,7 @@ def H2_export_yearly_constraint(n): else: rhs = h2_export * (1 / 0.7) - con = define_constraints(n, lhs, ">=", rhs, "H2ExportConstraint", "RESproduction") + n.model.add_constraints(lhs >= rhs, name="H2ExportConstraint-RESproduction") def monthly_constraints(n, n_ref): @@ -631,15 +707,17 @@ def monthly_constraints(n, n_ref): index=n.snapshots, columns=res_index, ) + capacity_variable = n.model["Generator-p"] - res = linexpr((weightings, get_var(n, "Generator", "p")[res_index])).sum( - axis=1 - ) # single line sum + # single line sum + res = (weightings * capacity_variable[res_index]).sum(axis=1) res = res.groupby(res.index.month).sum() - electrolysis = get_var(n, "Link", "p")[ + link_p = n.model["Link-p"] + electrolysis = link_p.loc[ n.links.index[n.links.index.str.contains("H2 Electrolysis")] ] + weightings_electrolysis = pd.DataFrame( np.outer( n.snapshot_weightings["generators"], [1.0] * len(electrolysis.columns) @@ -648,7 +726,7 @@ def monthly_constraints(n, n_ref): columns=electrolysis.columns, ) - elec_input = linexpr((-allowed_excess * weightings_electrolysis, electrolysis)).sum( + elec_input = ((-allowed_excess * weightings_electrolysis) * electrolysis).sum( axis=1 ) @@ -671,16 +749,16 @@ def monthly_constraints(n, n_ref): for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] rhs = res_ref.iloc[i] + elec_input_ref.iloc[i] - con = define_constraints( - n, lhs, ">=", rhs, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= rhs, name=f"RESconstraints_{i}-REStarget_{i}" ) else: for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] - con = define_constraints( - n, lhs, ">=", 0.0, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= 0.0, name=f"RESconstraints_{i}-REStarget_{i}" ) # else: # logger.info("ignoring H2 export constraint as wildcard is set to 0") @@ -701,84 +779,72 @@ def add_chp_constraints(n): electric = n.links.index[electric_bool] heat = n.links.index[heat_bool] - electric_ext = n.links.index[electric_bool & n.links.p_nom_extendable] - heat_ext = n.links.index[heat_bool & n.links.p_nom_extendable] + electric_ext = n.links[electric_bool].query("p_nom_extendable").index + heat_ext = n.links[heat_bool].query("p_nom_extendable").index - electric_fix = n.links.index[electric_bool & ~n.links.p_nom_extendable] - heat_fix = n.links.index[heat_bool & ~n.links.p_nom_extendable] + electric_fix = n.links[electric_bool].query("~p_nom_extendable").index + heat_fix = n.links[heat_bool].query("~p_nom_extendable").index - link_p = get_var(n, "Link", "p") + p = n.model["Link-p"] # dimension: [time, link] + # output ratio between heat and electricity and top_iso_fuel_line for extendable if not electric_ext.empty: - link_p_nom = get_var(n, "Link", "p_nom") - - # ratio of output heat to electricity set by p_nom_ratio - lhs = linexpr( - ( - n.links.loc[electric_ext, "efficiency"] - * n.links.loc[electric_ext, "p_nom_ratio"], - link_p_nom[electric_ext], - ), - (-n.links.loc[heat_ext, "efficiency"].values, link_p_nom[heat_ext].values), - ) + p_nom = n.model["Link-p_nom"] - define_constraints(n, lhs, "=", 0, "chplink", "fix_p_nom_ratio") - - # top_iso_fuel_line for extendable - lhs = linexpr( - (1, link_p[heat_ext]), - (1, link_p[electric_ext].values), - (-1, link_p_nom[electric_ext].values), + lhs = ( + p_nom.loc[electric_ext] + * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values + - p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values ) + n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio") - define_constraints(n, lhs, "<=", 0, "chplink", "top_iso_fuel_line_ext") + rename = {"Link-ext": "Link"} + lhs = ( + p.loc[:, electric_ext] + + p.loc[:, heat_ext] + - p_nom.rename(rename).loc[electric_ext] + ) + n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext") + # top_iso_fuel_line for fixed if not electric_fix.empty: - # top_iso_fuel_line for fixed - lhs = linexpr((1, link_p[heat_fix]), (1, link_p[electric_fix].values)) - - rhs = n.links.loc[electric_fix, "p_nom"].values - - define_constraints(n, lhs, "<=", rhs, "chplink", "top_iso_fuel_line_fix") + lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix] + rhs = n.links.p_nom[electric_fix] + n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix") + # back-pressure if not electric.empty: - # backpressure - lhs = linexpr( - ( - n.links.loc[electric, "c_b"].values * n.links.loc[heat, "efficiency"], - link_p[heat], - ), - (-n.links.loc[electric, "efficiency"].values, link_p[electric].values), + lhs = ( + p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) + - p.loc[:, electric] * n.links.efficiency[electric] ) - - define_constraints(n, lhs, "<=", 0, "chplink", "backpressure") + n.model.add_constraints(lhs <= rhs, name="chplink-backpressure") def add_co2_sequestration_limit(n, sns): co2_stores = n.stores.loc[n.stores.carrier == "co2 stored"].index - if co2_stores.empty or ("Store", "e") not in n.variables.index: + if co2_stores.empty: return - vars_final_co2_stored = get_var(n, "Store", "e").loc[sns[-1], co2_stores] + vars_final_co2_stored = n.model["Store-e"].loc[sns[-1], co2_stores] - lhs = linexpr((1, vars_final_co2_stored)).sum() + lhs = (1 * vars_final_co2_stored).sum() rhs = ( n.config["sector"].get("co2_sequestration_potential", 5) * 1e6 ) # TODO change 200 limit (Europe) name = "co2_sequestration_limit" - define_constraints( - n, lhs, "<=", rhs, "GlobalConstraint", "mu", axes=pd.Index([name]), spec=name - ) + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") def set_h2_colors(n): - blue_h2 = get_var(n, "Link", "p")[ + blue_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("blue H2")] ] - pink_h2 = get_var(n, "Link", "p")[ + pink_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("pink H2")] ] @@ -810,16 +876,16 @@ def set_h2_colors(n): columns=pink_h2.columns, ) - total_blue = linexpr((weightings_blue, blue_h2)).sum().sum() + total_blue = (weightings_blue * blue_h2).sum().sum() - total_pink = linexpr((weightings_pink, pink_h2)).sum().sum() + total_pink = (weightings_pink * pink_h2).sum().sum() rhs_blue = load_h2 * snakemake.config["sector"]["hydrogen"]["blue_share"] rhs_pink = load_h2 * snakemake.config["sector"]["hydrogen"]["pink_share"] - define_constraints(n, total_blue, "=", rhs_blue, "blue_h2_share") + n.model.add_constraints(total_blue == rhs_blue, name="blue_h2_share") - define_constraints(n, total_pink, "=", rhs_pink, "pink_h2_share") + n.model.add_constraints(total_pink == rhs_pink, name="pink_h2_share") def add_existing(n): @@ -867,9 +933,9 @@ def add_lossy_bidirectional_link_constraints(n: pypsa.components.Network) -> Non carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 # get the indices of all forward links (non-reversed), that have a reversed counterpart - forward_i = n.links.loc[ - n.links.carrier.isin(carriers) & ~n.links.reversed & n.links.p_nom_extendable - ].index + forward_i = n.links.query( + "carrier in @carriers and ~reversed and p_nom_extendable" + ).index # function to get backward (reversed) indices corresponding to forward links # this function is required to properly interact with the myopic naming scheme @@ -889,11 +955,11 @@ def get_backward_i(forward_i): backward_i = get_backward_i(forward_i) # get the p_nom optimization variables for the links using the get_var function - links_p_nom = get_var(n, "Link", "p_nom") + links_p_nom = n.model["Link-p_nom"] # only consider forward and backward links that are present in the optimization variables - subset_forward = forward_i.intersection(links_p_nom.index) - subset_backward = backward_i.intersection(links_p_nom.index) + subset_forward = forward_i.intersection(links_p_nom.indexes["Link-ext"]) + subset_backward = backward_i.intersection(links_p_nom.indexes["Link-ext"]) # ensure we have a matching number of forward and backward links if len(subset_forward) != len(subset_backward): @@ -901,13 +967,10 @@ def get_backward_i(forward_i): # define the lefthand side of the constrain p_nom (forward) - p_nom (backward) = 0 # this ensures that the forward links always have the same maximum nominal power as their backward counterpart - lhs = linexpr( - (1, get_var(n, "Link", "p_nom")[backward_i].to_numpy()), - (-1, get_var(n, "Link", "p_nom")[forward_i].to_numpy()), - ) + lhs = links_p_nom.loc[backward_i] - links_p_nom.loc[forward_i] # add the constraint to the PySPA model - define_constraints(n, lhs, "=", 0, "Link-bidirectional_sync") + n.model.add_constraints(lhs == 0, name="Link-bidirectional_sync") def extra_functionality(n, snapshots): @@ -932,13 +995,18 @@ def extra_functionality(n, snapshots): for o in opts: if "RES" in o: res_share = float(re.findall("[0-9]*\.?[0-9]+$", o)[0]) - add_RES_constraints(n, res_share) + add_RES_constraints(n, res_share, config) for o in opts: if "EQ" in o: add_EQ_constraints(n, o) + add_battery_constraints(n) add_lossy_bidirectional_link_constraints(n) + if snakemake.config["sector"]["chp"]: + logger.info("setting CHP constraints") + add_chp_constraints(n) + if ( snakemake.config["policy_config"]["hydrogen"]["temporal_matching"] == "h2_yearly_matching" @@ -984,40 +1052,45 @@ def extra_functionality(n, snapshots): add_co2_sequestration_limit(n, snapshots) -def solve_network(n, config, solving={}, opts="", **kwargs): +def solve_network(n, config, solving, **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] - solver_options = solving["solver_options"][set_of_options] if set_of_options else {} - solver_name = solving["solver"]["name"] + kwargs["solver_options"] = ( + solving["solver_options"][set_of_options] if set_of_options else {} + ) + kwargs["solver_name"] = solving["solver"]["name"] + kwargs["extra_functionality"] = extra_functionality - track_iterations = cf_solving.get("track_iterations", False) - min_iterations = cf_solving.get("min_iterations", 4) - max_iterations = cf_solving.get("max_iterations", 6) + skip_iterations = cf_solving.get("skip_iterations", False) + if not n.lines.s_nom_extendable.any(): + skip_iterations = True + logger.info("No expandable lines found. Skipping iterative solving.") # add to network for extra_functionality n.config = config n.opts = opts - if cf_solving.get("skip_iterations", False): - network_lopf( - n, - solver_name=solver_name, - solver_options=solver_options, - extra_functionality=extra_functionality, - **kwargs, - ) + if skip_iterations: + status, condition = n.optimize(**kwargs) else: - ilopf( - n, - solver_name=solver_name, - solver_options=solver_options, - track_iterations=track_iterations, - min_iterations=min_iterations, - max_iterations=max_iterations, - extra_functionality=extra_functionality, - **kwargs, + kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),) + kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),) + kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),) + status, condition = n.optimize.optimize_transmission_expansion_iteratively( + **kwargs ) + + if status != "ok": # and not rolling_horizon: + logger.warning( + f"Solving status '{status}' with termination condition '{condition}'" + ) + if "infeasible" in condition: + labels = n.model.compute_infeasibilities() + logger.info(f"Labels:\n{labels}") + n.model.print_infeasibilities() + raise RuntimeError("Solving status 'infeasible'") + return n @@ -1026,20 +1099,23 @@ def solve_network(n, config, solving={}, opts="", **kwargs): from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_network", + "solve_sector_network", simpl="", clusters="4", ll="c1", opts="Co2L-4H", + planning_horizons="2030", + discountrate="0.071", + demand="AB", + sopts="144H", + h2export="120", + configfile="config.tutorial.yaml", ) configure_logging(snakemake) - tmpdir = snakemake.params.solving.get("tmpdir") - if tmpdir is not None: - Path(tmpdir).mkdir(parents=True, exist_ok=True) opts = snakemake.wildcards.opts.split("-") - solving = snakemake.params.solving + solve_opts = snakemake.config["solving"]["options"] is_sector_coupled = "sopts" in snakemake.wildcards.keys() @@ -1070,15 +1146,13 @@ def solve_network(n, config, solving={}, opts="", **kwargs): else: n_ref = None - n = prepare_network(n, solving["options"]) + n = prepare_network(n, solve_opts, config=solve_opts) n = solve_network( n, config=snakemake.config, - solving=solving, - opts=opts, - solver_dir=tmpdir, - solver_logfile=snakemake.log.solver, + solving=snakemake.params.solving, + log_fn=snakemake.log.solver, ) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0])