diff --git a/esmvalcore/cmor/_fixes/cmip6/cesm2.py b/esmvalcore/cmor/_fixes/cmip6/cesm2.py index 53cd87a52d..4c17d95307 100644 --- a/esmvalcore/cmor/_fixes/cmip6/cesm2.py +++ b/esmvalcore/cmor/_fixes/cmip6/cesm2.py @@ -1,4 +1,5 @@ """Fixes for CESM2 model.""" + import ncdata.dataset_like import ncdata.iris import ncdata.netcdf4 @@ -46,9 +47,10 @@ def fix_file(self, filepath, output_dir, add_unique_suffix=False): """ ncdataset = ncdata.netcdf4.from_nc4(filepath) dataset = ncdata.dataset_like.Nc4DatasetLike(ncdataset) - dataset.variables['lev'].formula_terms = 'p0: p0 a: a b: b ps: ps' - dataset.variables['lev'].standard_name = ( - 'atmosphere_hybrid_sigma_pressure_coordinate') + dataset.variables["lev"].formula_terms = "p0: p0 a: a b: b ps: ps" + dataset.variables[ + "lev" + ].standard_name = "atmosphere_hybrid_sigma_pressure_coordinate" cubes = ncdata.iris.to_iris(ncdataset) return cubes @@ -68,14 +70,14 @@ def fix_metadata(self, cubes): """ cube = self.get_cube_from_list(cubes) - lev_coord = cube.coord(var_name='lev') - a_coord = cube.coord(var_name='a') + lev_coord = cube.coord(var_name="lev") + a_coord = cube.coord(var_name="a") a_coord.bounds = a_coord.core_bounds()[::-1, :] - b_coord = cube.coord(var_name='b') + b_coord = cube.coord(var_name="b") b_coord.bounds = b_coord.core_bounds()[::-1, :] lev_coord.points = a_coord.core_points() + b_coord.core_points() lev_coord.bounds = a_coord.core_bounds() + b_coord.core_bounds() - lev_coord.units = '1' + lev_coord.units = "1" return cubes @@ -120,12 +122,13 @@ def fix_metadata(self, cubes): iris.cube.CubeList """ for cube in cubes: - for coord_name in ['latitude', 'longitude']: + for coord_name in ["latitude", "longitude"]: coord = cube.coord(coord_name) if not coord.has_bounds(): coord.guess_bounds() - coord.bounds = np.round(coord.core_bounds().astype(np.float64), - 4) + coord.bounds = np.round( + coord.core_bounds().astype(np.float64), 4 + ) return cubes @@ -219,9 +222,10 @@ def fix_metadata(self, cubes): cube = self.get_cube_from_list(cubes) for cube in cubes: - if cube.attributes['mipTable'] == 'Omon': - cube.coord('time').points = \ - np.round(cube.coord('time').points, 1) + if cube.attributes["mipTable"] == "Omon": + cube.coord("time").points = np.round( + cube.coord("time").points, 1 + ) return cubes @@ -241,13 +245,13 @@ def fix_metadata(self, cubes): iris.cube.CubeList """ for cube in cubes: - if cube.coords(axis='Z'): - z_coord = cube.coord(axis='Z') + if cube.coords(axis="Z"): + z_coord = cube.coord(axis="Z") # Only points need to be fixed, not bounds - if z_coord.units == 'cm': + if z_coord.units == "cm": z_coord.points = z_coord.core_points() / 100.0 - z_coord.units = 'm' + z_coord.units = "m" # Fix depth metadata if z_coord.standard_name is None: diff --git a/esmvalcore/cmor/_fixes/cmip6/cesm2_waccm.py b/esmvalcore/cmor/_fixes/cmip6/cesm2_waccm.py index 87bf8f9859..4d2ceae473 100644 --- a/esmvalcore/cmor/_fixes/cmip6/cesm2_waccm.py +++ b/esmvalcore/cmor/_fixes/cmip6/cesm2_waccm.py @@ -1,4 +1,5 @@ """Fixes for CESM2-WACCM model.""" + from ..common import SiconcFixScalarCoord from .cesm2 import Cl as BaseCl from .cesm2 import Fgco2 as BaseFgco2 diff --git a/esmvalcore/preprocessor/_io.py b/esmvalcore/preprocessor/_io.py index e50ffd163e..cb4d78fde6 100644 --- a/esmvalcore/preprocessor/_io.py +++ b/esmvalcore/preprocessor/_io.py @@ -1,4 +1,5 @@ """Functions for loading and saving cubes.""" + from __future__ import annotations import copy @@ -25,14 +26,14 @@ logger = logging.getLogger(__name__) -GLOBAL_FILL_VALUE = 1e+20 +GLOBAL_FILL_VALUE = 1e20 DATASET_KEYS = { - 'mip', + "mip", } VARIABLE_KEYS = { - 'reference_dataset', - 'alternative_dataset', + "reference_dataset", + "alternative_dataset", } iris.FUTURE.save_split_attrs = True @@ -50,17 +51,18 @@ def _get_attr_from_field_coord(ncfield, coord_name, attr): def _load_callback(raw_cube, field, _): """Use this callback to fix anything Iris tries to break.""" # Remove attributes that cause issues with merging and concatenation - _delete_attributes(raw_cube, - ('creation_date', 'tracking_id', 'history', 'comment')) + _delete_attributes( + raw_cube, ("creation_date", "tracking_id", "history", "comment") + ) for coord in raw_cube.coords(): # Iris chooses to change longitude and latitude units to degrees # regardless of value in file, so reinstating file value - if coord.standard_name in ['longitude', 'latitude']: - units = _get_attr_from_field_coord(field, coord.var_name, 'units') + if coord.standard_name in ["longitude", "latitude"]: + units = _get_attr_from_field_coord(field, coord.var_name, "units") if units is not None: coord.units = units # CMOR sometimes adds a history to the coordinates. - _delete_attributes(coord, ('history', )) + _delete_attributes(coord, ("history",)) def _delete_attributes(iris_object, atts): @@ -109,26 +111,32 @@ def load( ignore_warnings = list(ignore_warnings) # Default warnings ignored for every dataset - ignore_warnings.append({ - 'message': "Missing CF-netCDF measure variable .*", - 'category': UserWarning, - 'module': 'iris', - }) - ignore_warnings.append({ - 'message': "Ignoring netCDF variable '.*' invalid units '.*'", - 'category': UserWarning, - 'module': 'iris', - }) # iris < 3.8 - ignore_warnings.append({ - 'message': "Ignoring invalid units .* on netCDF variable .*", - 'category': UserWarning, - 'module': 'iris', - }) # iris >= 3.8 + ignore_warnings.append( + { + "message": "Missing CF-netCDF measure variable .*", + "category": UserWarning, + "module": "iris", + } + ) + ignore_warnings.append( + { + "message": "Ignoring netCDF variable '.*' invalid units '.*'", + "category": UserWarning, + "module": "iris", + } + ) # iris < 3.8 + ignore_warnings.append( + { + "message": "Ignoring invalid units .* on netCDF variable .*", + "category": UserWarning, + "module": "iris", + } + ) # iris >= 3.8 # Filter warnings with catch_warnings(): for warning_kwargs in ignore_warnings: - warning_kwargs.setdefault('action', 'ignore') + warning_kwargs.setdefault("action", "ignore") filterwarnings(**warning_kwargs) # Suppress UDUNITS-2 error messages that cannot be ignored with # warnings.filterwarnings @@ -138,10 +146,10 @@ def load( logger.debug("Done with loading %s", file) if not raw_cubes: - raise ValueError(f'Can not load cubes from {file}') + raise ValueError(f"Can not load cubes from {file}") for cube in raw_cubes: - cube.attributes['source_file'] = str(file) + cube.attributes["source_file"] = str(file) return raw_cubes @@ -149,18 +157,19 @@ def load( def _concatenate_cubes(cubes, check_level): """Concatenate cubes according to the check_level.""" kwargs = { - 'check_aux_coords': True, - 'check_cell_measures': True, - 'check_ancils': True, - 'check_derived_coords': True + "check_aux_coords": True, + "check_cell_measures": True, + "check_ancils": True, + "check_derived_coords": True, } if check_level > CheckLevels.DEFAULT: kwargs = dict.fromkeys(kwargs, False) logger.debug( - 'Concatenation will be performed without checking ' - 'auxiliary coordinates, cell measures, ancillaries ' - 'and derived coordinates present in the cubes.', ) + "Concatenation will be performed without checking " + "auxiliary coordinates, cell measures, ancillaries " + "and derived coordinates present in the cubes.", + ) concatenated = iris.cube.CubeList(cubes).concatenate(**kwargs) @@ -168,7 +177,6 @@ def _concatenate_cubes(cubes, check_level): class _TimesHelper: - def __init__(self, time): self.times = time.core_points() self.units = str(time.units) @@ -228,7 +236,10 @@ def from_cube(cls, cube): # current cube ends after new one, just forget new cube logger.debug( "Discarding %s because the time range " - "is already covered by %s", new_cube.cube, current_cube.cube) + "is already covered by %s", + new_cube.cube, + current_cube.cube, + ) continue if new_cube.start == current_cube.start: # new cube completely covers current one @@ -236,20 +247,27 @@ def from_cube(cls, cube): current_cube = new_cube logger.debug( "Discarding %s because the time range is covered by %s", - current_cube.cube, new_cube.cube) + current_cube.cube, + new_cube.cube, + ) continue # new cube ends after current one, # use all of new cube, and shorten current cube to # eliminate overlap with new cube - cut_index = cftime.time2index( - new_cube.start, - _TimesHelper(current_cube.times), - current_cube.times.units.calendar, - select="before", - ) + 1 - logger.debug("Using %s shortened to %s due to overlap", - current_cube.cube, - current_cube.times.cell(cut_index).point) + cut_index = ( + cftime.time2index( + new_cube.start, + _TimesHelper(current_cube.times), + current_cube.times.units.calendar, + select="before", + ) + + 1 + ) + logger.debug( + "Using %s shortened to %s due to overlap", + current_cube.cube, + current_cube.times.cell(cut_index).point, + ) new_cubes.append(current_cube.cube[:cut_index]) current_cube = new_cube @@ -261,20 +279,23 @@ def from_cube(cls, cube): def _fix_calendars(cubes): """Check and homogenise calendars, if possible.""" - calendars = [cube.coord('time').units.calendar for cube in cubes] + calendars = [cube.coord("time").units.calendar for cube in cubes] unique_calendars = np.unique(calendars) calendar_ocurrences = np.array( - [calendars.count(calendar) for calendar in unique_calendars]) + [calendars.count(calendar) for calendar in unique_calendars] + ) calendar_index = int( - np.argwhere(calendar_ocurrences == calendar_ocurrences.max())) + np.argwhere(calendar_ocurrences == calendar_ocurrences.max()) + ) for cube in cubes: - time_coord = cube.coord('time') + time_coord = cube.coord("time") old_calendar = time_coord.units.calendar if old_calendar != unique_calendars[calendar_index]: new_unit = time_coord.units.change_calendar( - unique_calendars[calendar_index]) + unique_calendars[calendar_index] + ) time_coord.units = new_unit @@ -285,14 +306,14 @@ def _get_concatenation_error(cubes): iris.cube.CubeList(cubes).concatenate_cube() except iris.exceptions.ConcatenateError as exc: msg = str(exc) - logger.error('Can not concatenate cubes into a single one: %s', msg) - logger.error('Resulting cubes:') + logger.error("Can not concatenate cubes into a single one: %s", msg) + logger.error("Resulting cubes:") for cube in cubes: logger.error(cube) time = cube.coord("time") - logger.error('From %s to %s', time.cell(0), time.cell(-1)) + logger.error("From %s to %s", time.cell(0), time.cell(-1)) - raise ValueError(f'Can not concatenate cubes: {msg}') + raise ValueError(f"Can not concatenate cubes: {msg}") def _sort_cubes_by_time(cubes): @@ -300,12 +321,15 @@ def _sort_cubes_by_time(cubes): try: cubes = sorted(cubes, key=lambda c: c.coord("time").cell(0).point) except iris.exceptions.CoordinateNotFoundError as exc: - msg = "One or more cubes {} are missing".format(cubes) + \ - " time coordinate: {}".format(str(exc)) + msg = "One or more cubes {} are missing".format( + cubes + ) + " time coordinate: {}".format(str(exc)) raise ValueError(msg) except TypeError as error: - msg = ("Cubes cannot be sorted " - f"due to differing time units: {str(error)}") + msg = ( + "Cubes cannot be sorted " + f"due to differing time units: {str(error)}" + ) raise TypeError(msg) from error return cubes @@ -349,12 +373,9 @@ def concatenate(cubes, check_level=CheckLevels.DEFAULT): return result -def save(cubes, - filename, - optimize_access='', - compress=False, - alias='', - **kwargs): +def save( + cubes, filename, optimize_access="", compress=False, alias="", **kwargs +): """Save iris cubes to file. Parameters @@ -394,59 +415,71 @@ def save(cubes, raise ValueError(f"Cannot save empty cubes '{cubes}'") # Rename some arguments - kwargs['target'] = filename - kwargs['zlib'] = compress + kwargs["target"] = filename + kwargs["zlib"] = compress dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) - if (os.path.exists(filename) - and all(cube.has_lazy_data() for cube in cubes)): + if os.path.exists(filename) and all( + cube.has_lazy_data() for cube in cubes + ): logger.debug( "Not saving cubes %s to %s to avoid data loss. " - "The cube is probably unchanged.", cubes, filename) + "The cube is probably unchanged.", + cubes, + filename, + ) return filename for cube in cubes: - logger.debug("Saving cube:\n%s\nwith %s data to %s", cube, - "lazy" if cube.has_lazy_data() else "realized", filename) + logger.debug( + "Saving cube:\n%s\nwith %s data to %s", + cube, + "lazy" if cube.has_lazy_data() else "realized", + filename, + ) if optimize_access: cube = cubes[0] - if optimize_access == 'map': + if optimize_access == "map": dims = set( - cube.coord_dims('latitude') + cube.coord_dims('longitude')) - elif optimize_access == 'timeseries': - dims = set(cube.coord_dims('time')) + cube.coord_dims("latitude") + cube.coord_dims("longitude") + ) + elif optimize_access == "timeseries": + dims = set(cube.coord_dims("time")) else: dims = tuple() - for coord_dims in (cube.coord_dims(dimension) - for dimension in optimize_access.split(' ')): + for coord_dims in ( + cube.coord_dims(dimension) + for dimension in optimize_access.split(" ") + ): dims += coord_dims dims = set(dims) - kwargs['chunksizes'] = tuple( + kwargs["chunksizes"] = tuple( length if index in dims else 1 - for index, length in enumerate(cube.shape)) + for index, length in enumerate(cube.shape) + ) - kwargs['fill_value'] = GLOBAL_FILL_VALUE + kwargs["fill_value"] = GLOBAL_FILL_VALUE if alias: - for cube in cubes: - logger.debug('Changing var_name from %s to %s', cube.var_name, - alias) + logger.debug( + "Changing var_name from %s to %s", cube.var_name, alias + ) cube.var_name = alias # Ignore some warnings when saving with catch_warnings(): filterwarnings( - 'ignore', + "ignore", message=( ".* is being added as CF data variable attribute, but .* " "should only be a CF global attribute" ), category=UserWarning, - module='iris', + module="iris", ) iris.save(cubes, **kwargs) @@ -460,7 +493,7 @@ def _get_debug_filename(filename, step): num = int(sorted(os.listdir(dirname)).pop()[:2]) + 1 else: num = 0 - filename = os.path.join(dirname, '{:02}_{}.nc'.format(num, step)) + filename = os.path.join(dirname, "{:02}_{}.nc".format(num, step)) return filename @@ -469,8 +502,8 @@ def _sort_products(products): return sorted( products, key=lambda p: ( - p.attributes.get('recipe_dataset_index', 1e6), - p.attributes.get('dataset', ''), + p.attributes.get("recipe_dataset_index", 1e6), + p.attributes.get("dataset", ""), ), ) @@ -478,21 +511,22 @@ def _sort_products(products): def write_metadata(products, write_ncl=False): """Write product metadata to file.""" output_files = [] - for output_dir, prods in groupby(products, - lambda p: os.path.dirname(p.filename)): + for output_dir, prods in groupby( + products, lambda p: os.path.dirname(p.filename) + ): sorted_products = _sort_products(prods) metadata = {} for product in sorted_products: - if isinstance(product.attributes.get('exp'), (list, tuple)): + if isinstance(product.attributes.get("exp"), (list, tuple)): product.attributes = dict(product.attributes) - product.attributes['exp'] = '-'.join(product.attributes['exp']) - if 'original_short_name' in product.attributes: - del product.attributes['original_short_name'] + product.attributes["exp"] = "-".join(product.attributes["exp"]) + if "original_short_name" in product.attributes: + del product.attributes["original_short_name"] metadata[product.filename] = product.attributes - output_filename = os.path.join(output_dir, 'metadata.yml') + output_filename = os.path.join(output_dir, "metadata.yml") output_files.append(output_filename) - with open(output_filename, 'w', encoding='utf-8') as file: + with open(output_filename, "w", encoding="utf-8") as file: yaml.safe_dump(metadata, file) if write_ncl: output_files.append(_write_ncl_metadata(output_dir, metadata)) @@ -504,28 +538,31 @@ def _write_ncl_metadata(output_dir, metadata): """Write NCL metadata files to output_dir.""" variables = [copy.deepcopy(v) for v in metadata.values()] - info = {'input_file_info': variables} + info = {"input_file_info": variables} # Split input_file_info into dataset and variable properties # dataset keys and keys with non-identical values will be stored # in dataset_info, the rest in variable_info variable_info = {} - info['variable_info'] = [variable_info] - info['dataset_info'] = [] + info["variable_info"] = [variable_info] + info["dataset_info"] = [] for variable in variables: dataset_info = {} - info['dataset_info'].append(dataset_info) + info["dataset_info"].append(dataset_info) for key in variable: - dataset_specific = any(variable[key] != var.get(key, object()) - for var in variables) - if ((dataset_specific or key in DATASET_KEYS) - and key not in VARIABLE_KEYS): + dataset_specific = any( + variable[key] != var.get(key, object()) for var in variables + ) + if ( + dataset_specific or key in DATASET_KEYS + ) and key not in VARIABLE_KEYS: dataset_info[key] = variable[key] else: variable_info[key] = variable[key] - filename = os.path.join(output_dir, - variable_info['short_name'] + '_info.ncl') + filename = os.path.join( + output_dir, variable_info["short_name"] + "_info.ncl" + ) write_ncl_settings(info, filename) return filename diff --git a/setup.py b/setup.py index f97763fd6e..6dbebeea0f 100755 --- a/setup.py +++ b/setup.py @@ -16,90 +16,90 @@ from setuptools import Command, setup PACKAGES = [ - 'esmvalcore', + "esmvalcore", ] REQUIREMENTS = { # Installation script (this file) dependencies - 'setup': [ - 'setuptools_scm', + "setup": [ + "setuptools_scm", ], # Installation dependencies # Use with pip install . to install from source - 'install': [ - 'cartopy', - 'cf-units', - 'dask[array,distributed]', - 'dask-jobqueue', - 'esgf-pyclient>=0.3.1', - 'esmf-regrid>=0.10.0', # iris-esmf-regrid #342 - 'esmpy!=8.1.0', # not on PyPI - 'filelock', - 'fiona', - 'fire', - 'geopy', - 'humanfriendly', + "install": [ + "cartopy", + "cf-units", + "dask[array,distributed]", + "dask-jobqueue", + "esgf-pyclient>=0.3.1", + "esmf-regrid>=0.10.0", # iris-esmf-regrid #342 + "esmpy!=8.1.0", # not on PyPI + "filelock", + "fiona", + "fire", + "geopy", + "humanfriendly", "importlib_metadata;python_version<'3.10'", - 'isodate', - 'jinja2', - 'nc-time-axis', # needed by iris.plot - 'ncdata', - 'nested-lookup', - 'netCDF4', - 'numpy!=1.24.3,<2.0.0', # avoid pulling 2.0.0rc1 - 'packaging', - 'pandas!=2.2.0,!=2.2.1,!=2.2.2', # GH #2305 #2349 etc - 'pillow', - 'prov', - 'psutil', - 'py-cordex', - 'pybtex', - 'pyyaml', - 'requests', - 'scipy>=1.6', - 'scitools-iris>=3.9.0', - 'shapely>=2.0.0', - 'stratify>=0.3', - 'yamale', + "isodate", + "jinja2", + "nc-time-axis", # needed by iris.plot + "ncdata", + "nested-lookup", + "netCDF4", + "numpy!=1.24.3,<2.0.0", # avoid pulling 2.0.0rc1 + "packaging", + "pandas!=2.2.0,!=2.2.1,!=2.2.2", # GH #2305 #2349 etc + "pillow", + "prov", + "psutil", + "py-cordex", + "pybtex", + "pyyaml", + "requests", + "scipy>=1.6", + "scitools-iris>=3.9.0", + "shapely>=2.0.0", + "stratify>=0.3", + "yamale", ], # Test dependencies - 'test': [ - 'flake8>=7.0.0', # not to pick up E231 - 'pytest>=3.9,!=6.0.0rc1,!=6.0.0', - 'pytest-cov>=2.10.1', - 'pytest-env', - 'pytest-html!=2.1.0', - 'pytest-metadata>=1.5.1', - 'pytest-mypy>=0.10.3', # gh issue/2314 - 'pytest-mock', - 'pytest-xdist', - 'ESMValTool_sample_data==0.0.3', + "test": [ + "flake8>=7.0.0", # not to pick up E231 + "pytest>=3.9,!=6.0.0rc1,!=6.0.0", + "pytest-cov>=2.10.1", + "pytest-env", + "pytest-html!=2.1.0", + "pytest-metadata>=1.5.1", + "pytest-mypy>=0.10.3", # gh issue/2314 + "pytest-mock", + "pytest-xdist", + "ESMValTool_sample_data==0.0.3", # MyPy library stubs - 'mypy>=0.990', - 'types-requests', - 'types-PyYAML', + "mypy>=0.990", + "types-requests", + "types-PyYAML", ], # Documentation dependencies - 'doc': [ - 'autodocsumm>=0.2.2', - 'ipython', - 'nbsphinx', - 'sphinx>=6.1.3', - 'pydata_sphinx_theme', + "doc": [ + "autodocsumm>=0.2.2", + "ipython", + "nbsphinx", + "sphinx>=6.1.3", + "pydata_sphinx_theme", ], # Development dependencies # Use pip install -e .[develop] to install in development mode - 'develop': [ - 'codespell', - 'docformatter', - 'isort', - 'flake8>=7', - 'pre-commit', - 'pylint', - 'pydocstyle', - 'vprof', - 'yamllint', - 'yapf', + "develop": [ + "codespell", + "docformatter", + "isort", + "flake8>=7", + "pre-commit", + "pylint", + "pydocstyle", + "vprof", + "yamllint", + "yapf", ], } @@ -117,8 +117,7 @@ def _ignore(path): continue for filename in files: filename = os.path.join(root, filename) - if (filename.lower().endswith('.py') - and not _ignore(filename)): + if filename.lower().endswith(".py") and not _ignore(filename): yield filename @@ -129,7 +128,8 @@ def install_deps_temp(self): """Try to temporarily install packages needed to run the command.""" if self.distribution.install_requires: self.distribution.fetch_build_eggs( - self.distribution.install_requires) + self.distribution.install_requires + ) if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) @@ -148,11 +148,11 @@ def finalize_options(self): def run(self): """Run prospector and generate a report.""" check_paths = PACKAGES + [ - 'setup.py', - 'tests', + "setup.py", + "tests", ] ignore = [ - 'doc/', + "doc/", ] # try to install missing dependencies and import prospector @@ -160,7 +160,7 @@ def run(self): from prospector.run import main except ImportError: # try to install and then import - self.distribution.fetch_build_eggs(['prospector[with_pyroma]']) + self.distribution.fetch_build_eggs(["prospector[with_pyroma]"]) from prospector.run import main self.install_deps_temp() @@ -173,7 +173,7 @@ def run(self): # write command line files = discover_python_files(check_paths, ignore) - sys.argv = ['prospector'] + sys.argv = ["prospector"] sys.argv.extend(files) # run prospector @@ -184,70 +184,69 @@ def run(self): def read_authors(filename): """Read the list of authors from .zenodo.json file.""" - with Path(filename).open(encoding='utf-8') as file: + with Path(filename).open(encoding="utf-8") as file: info = json.load(file) authors = [] - for author in info['creators']: - name = ' '.join(author['name'].split(',')[::-1]).strip() + for author in info["creators"]: + name = " ".join(author["name"].split(",")[::-1]).strip() authors.append(name) - return ', '.join(authors) + return ", ".join(authors) def read_description(filename): """Read the description from .zenodo.json file.""" - with Path(filename).open(encoding='utf-8') as file: + with Path(filename).open(encoding="utf-8") as file: info = json.load(file) - return info['description'] + return info["description"] setup( - name='ESMValCore', - author=read_authors('.zenodo.json'), - description=read_description('.zenodo.json'), - long_description=Path('README.md').read_text(encoding='utf-8'), - long_description_content_type='text/markdown', - url='https://www.esmvaltool.org', - download_url='https://github.com/ESMValGroup/ESMValCore', - license='Apache License, Version 2.0', + name="ESMValCore", + author=read_authors(".zenodo.json"), + description=read_description(".zenodo.json"), + long_description=Path("README.md").read_text(encoding="utf-8"), + long_description_content_type="text/markdown", + url="https://www.esmvaltool.org", + download_url="https://github.com/ESMValGroup/ESMValCore", + license="Apache License, Version 2.0", classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Atmospheric Science', - 'Topic :: Scientific/Engineering :: GIS', - 'Topic :: Scientific/Engineering :: Hydrology', - 'Topic :: Scientific/Engineering :: Physics', + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Atmospheric Science", + "Topic :: Scientific/Engineering :: GIS", + "Topic :: Scientific/Engineering :: Hydrology", + "Topic :: Scientific/Engineering :: Physics", ], packages=PACKAGES, # Include all version controlled files include_package_data=True, - setup_requires=REQUIREMENTS['setup'], - install_requires=REQUIREMENTS['install'], - tests_require=REQUIREMENTS['test'], + setup_requires=REQUIREMENTS["setup"], + install_requires=REQUIREMENTS["install"], + tests_require=REQUIREMENTS["test"], extras_require={ - 'develop': - REQUIREMENTS['develop'] + REQUIREMENTS['test'] + REQUIREMENTS['doc'], - 'test': - REQUIREMENTS['test'], - 'doc': - REQUIREMENTS['doc'], + "develop": REQUIREMENTS["develop"] + + REQUIREMENTS["test"] + + REQUIREMENTS["doc"], + "test": REQUIREMENTS["test"], + "doc": REQUIREMENTS["doc"], }, entry_points={ - 'console_scripts': [ - 'esmvaltool = esmvalcore._main:run', + "console_scripts": [ + "esmvaltool = esmvalcore._main:run", ], }, cmdclass={ - 'lint': RunLinter, + "lint": RunLinter, }, zip_safe=False, )