diff --git a/docs/source/whatsnew/releases/v0.4.0.rst b/docs/source/whatsnew/releases/v0.4.0.rst
index db8dec9..e6855b6 100644
--- a/docs/source/whatsnew/releases/v0.4.0.rst
+++ b/docs/source/whatsnew/releases/v0.4.0.rst
@@ -18,11 +18,12 @@ Scenarios
Geospatial Improvements
-* Autotemplating system for geospatial analysis using `pvdeg.geospatial.autotemplate`
+* Autotemplating system for geospatial analysis using `pvdeg.geospatial.autotemplate`.
* New module `pvdeg.decorators` that contains `pvdeg` specific decorator functions.
* Implemented `geospatial_result_type` decorator to update functions and preform runtime introspection to determine if a function is autotemplate-able.
* `Geospatial Templates.ipynb` notebook to showcase new and old templating functionality for users.
-* Implemented testing for geospatial analysis
+* Implemented testing for geospatial analysis.
+* Added chunked and unchunked testing.
Symbolic Evaluation
diff --git a/pvdeg/geospatial.py b/pvdeg/geospatial.py
index a9c226c..2ffdb62 100644
--- a/pvdeg/geospatial.py
+++ b/pvdeg/geospatial.py
@@ -27,7 +27,6 @@
from typing import Tuple
from shapely import LineString, MultiLineString
-
def start_dask(hpc=None):
"""
Starts a dask cluster for parallel processing.
@@ -271,12 +270,12 @@ def output_template(
output_template = xr.Dataset(
data_vars={
- var: (dim, da.empty([dims_size[d] for d in dim]), attrs.get(var))
+ var: (dim, da.empty([dims_size[d] for d in dim]), attrs.get(var)) # this will produce a dask array with 1 chunk of the same size as the input
for var, dim in shapes.items()
},
coords={dim: ds_gids[dim] for dim in dims},
attrs=global_attrs,
- ) # moved chunks down from here
+ )
if ds_gids.chunks: # chunk to match input
output_template = output_template.chunk({dim: ds_gids.chunks[dim] for dim in dims})
diff --git a/tests/sandbox.ipynb b/tests/sandbox.ipynb
index d828964..c9208cf 100644
--- a/tests/sandbox.ipynb
+++ b/tests/sandbox.ipynb
@@ -23,43 +23,43 @@
"from pytest import approx\n",
"from pvdeg import TEST_DATA_DIR\n",
"\n",
- "#Load weather data\n",
- "WEATHER = pd.read_csv(os.path.join(TEST_DATA_DIR, 'weather_day_pytest.csv'),\n",
- " index_col= 0, parse_dates=True)\n",
- "with open(os.path.join(TEST_DATA_DIR, 'meta.json'),'r') as file:\n",
+ "# Load weather data\n",
+ "WEATHER = pd.read_csv(\n",
+ " os.path.join(TEST_DATA_DIR, \"weather_day_pytest.csv\"), index_col=0, parse_dates=True\n",
+ ")\n",
+ "with open(os.path.join(TEST_DATA_DIR, \"meta.json\"), \"r\") as file:\n",
" META = json.load(file)\n",
"\n",
- "#Load expected results\n",
- "rh_expected = pd.read_csv(os.path.join(TEST_DATA_DIR, 'input_day_pytest.csv'),\n",
- " index_col=0, parse_dates=True)\n",
- "rh_cols = [col for col in rh_expected.columns if 'RH' in col]\n",
+ "# Load expected results\n",
+ "rh_expected = pd.read_csv(\n",
+ " os.path.join(TEST_DATA_DIR, \"input_day_pytest.csv\"), index_col=0, parse_dates=True\n",
+ ")\n",
+ "rh_cols = [col for col in rh_expected.columns if \"RH\" in col]\n",
"rh_expected = rh_expected[rh_cols]\n",
"\n",
"\n",
"def test_module():\n",
- " '''\n",
+ " \"\"\"\n",
" test pvdeg.humidity.calc_rel_humidity\n",
- " \n",
+ "\n",
" Requires:\n",
" ---------\n",
" weather dataframe and meta dictionary\n",
- " '''\n",
+ " \"\"\"\n",
" result = pvdeg.humidity.module(WEATHER, META)\n",
- " pd.testing.assert_frame_equal(result, \n",
- " rh_expected, \n",
- " check_dtype=False)\n",
+ " pd.testing.assert_frame_equal(result, rh_expected, check_dtype=False)\n",
+ "\n",
"\n",
"def test_psat():\n",
- " '''\n",
+ " \"\"\"\n",
" test pvdeg.humidity.psat\n",
- " \n",
+ "\n",
" Requires:\n",
" ---------\n",
" weahter dataframe and meta dictionary\n",
- " '''\n",
- " psat_avg = pvdeg.humidity.psat(temp=WEATHER['temp_air'])[1]\n",
- " assert psat_avg == approx(0.47607, abs=5e-5)\n",
- "\n"
+ " \"\"\"\n",
+ " psat_avg = pvdeg.humidity.psat(temp=WEATHER[\"temp_air\"])[1]\n",
+ " assert psat_avg == approx(0.47607, abs=5e-5)"
]
},
{
@@ -73,7 +73,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -86,7 +86,7 @@
"from pvdeg import TEST_DATA_DIR\n",
"\n",
"GEO_META = pd.read_csv(os.path.join(TEST_DATA_DIR, \"summit-meta.csv\"), index_col=0)\n",
- "with open(os.path.join(TEST_DATA_DIR, \"summit-weather.pkl\"), 'rb') as f:\n",
+ "with open(os.path.join(TEST_DATA_DIR, \"summit-weather.pkl\"), \"rb\") as f:\n",
" GEO_WEATHER = pickle.load(f)"
]
},
@@ -99,23 +99,24 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"autotemplate_result = pvdeg.geospatial.auto_template(\n",
- " func=pvdeg.humidity.module, \n",
- " ds_gids=GEO_WEATHER\n",
+ " func=pvdeg.humidity.module, ds_gids=GEO_WEATHER\n",
").compute()"
]
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
- "humidity_template = xr.open_dataset(os.path.join(TEST_DATA_DIR, 'humidity_template.nc')).compute()"
+ "humidity_template = xr.open_dataset(\n",
+ " os.path.join(TEST_DATA_DIR, \"humidity_template.nc\")\n",
+ ").compute()"
]
},
{
@@ -134,11 +135,11 @@
" return False\n",
"\n",
" for coord in ds1.coords:\n",
- " if ds1.coords[coord].dtype.kind in {'i', 'f'}:\n",
+ " if ds1.coords[coord].dtype.kind in {\"i\", \"f\"}:\n",
" # Use np.allclose for numeric coordinates\n",
" if not np.allclose(ds1.coords[coord], ds2.coords[coord], atol=atol):\n",
" return False\n",
- " elif ds1.coords[coord].dtype.kind == 'M': # datetime64 type\n",
+ " elif ds1.coords[coord].dtype.kind == \"M\": # datetime64 type\n",
" # Use array equality for datetime coordinates\n",
" if not np.array_equal(ds1.coords[coord], ds2.coords[coord]):\n",
" return False\n",
@@ -162,20 +163,11 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "C:\\Users\\tford\\dev\\PVDegradationTools\\pvdeg\\utilities.py:479: FutureWarning: The return type of `Dataset.dims` will be changed to return a set of dimension names in future, in order to be more consistent with `DataArray.dims`. To access a mapping from dimension names to lengths, please use `Dataset.sizes`.\n",
- " if ds1.dims != ds2.dims:\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "assert pvdeg.utilities.compare_datasets(autotemplate_result,humidity_template)"
+ "assert pvdeg.utilities.compare_datasets(autotemplate_result, humidity_template)"
]
},
{
@@ -187,7 +179,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -199,21 +191,1380 @@
"}\n",
"\n",
"manual_template = pvdeg.geospatial.output_template(\n",
+ " shapes=shapes, ds_gids=GEO_WEATHER\n",
+ ").compute()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pvdeg.utilities.compare_datasets(manual_template, humidity_template)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# test template\n",
+ "\n",
+ "shapes = {\"testA\": (\"gid\",), \"testB\": (\"gid\", \"time\")}\n",
+ "\n",
+ "template = pvdeg.geospatial.output_template(\n",
" shapes=shapes,\n",
- " ds_gids=GEO_WEATHER\n",
+ " ds_gids=GEO_WEATHER,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "template.to_netcdf(os.path.join(TEST_DATA_DIR, \"mismatch-template.nc\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pvdeg\n",
+ "from pvdeg import TEST_DATA_DIR\n",
+ "import pickle\n",
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "import xarray as xr\n",
+ "import os\n",
+ "\n",
+ "GEO_META = pd.read_csv(os.path.join(TEST_DATA_DIR, \"summit-meta.csv\"), index_col=0)\n",
+ "\n",
+ "with open(os.path.join(TEST_DATA_DIR, \"summit-weather.pkl\"), \"rb\") as f:\n",
+ " GEO_WEATHER = pickle.load(f).compute().load()\n",
+ "\n",
+ "HUMIDITY_TEMPLATE = xr.open_dataset(\n",
+ " os.path.join(TEST_DATA_DIR, \"humidity_template.nc\"), engine=\"h5netcdf\"\n",
").compute()"
]
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "GEO_WEATHER"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "GEO_WEATHER.chunks"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "HUMIDITY_TEMPLATE.chunks"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1\n",
+ "1\n"
+ ]
+ }
+ ],
+ "source": [
+ "shapes = {\n",
+ " \"RH_surface_outside\": (\"gid\", \"time\"),\n",
+ " \"RH_front_encap\": (\"gid\", \"time\"),\n",
+ " \"RH_back_encap\": (\"gid\", \"time\"),\n",
+ " \"RH_backsheet\": (\"gid\", \"time\"),\n",
+ "}\n",
+ "\n",
+ "# falsely assigning chunks here\n",
+ "manual_template = pvdeg.geospatial.output_template(shapes=shapes, ds_gids=GEO_WEATHER)\n",
+ "\n",
+ "assert pvdeg.utilities.compare_templates(manual_template, HUMIDITY_TEMPLATE)\n",
+ "for k, v in manual_template.chunks.items():\n",
+ " if len(v) != 1:\n",
+ " raise ValueError(f\"\"\"\n",
+ " Need one chunk per axis for an unchunked input\n",
+ " dimension {k} has {len(v)} chunks.\n",
+ " \"\"\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "chunked_weather = GEO_WEATHER.chunk({\"gid\": 3})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "
<xarray.Dataset> Size: 6MB\n",
+ "Dimensions: (gid: 11, time: 17520)\n",
+ "Coordinates:\n",
+ " * gid (gid) int64 88B 449211 452064 453020 ... 460613 462498\n",
+ " * time (time) datetime64[ns] 140kB 2022-01-01 ... 2022-12-31...\n",
+ "Data variables:\n",
+ " RH_surface_outside (gid, time) float64 2MB dask.array<chunksize=(3, 17520), meta=np.ndarray>\n",
+ " RH_front_encap (gid, time) float64 2MB dask.array<chunksize=(3, 17520), meta=np.ndarray>\n",
+ " RH_back_encap (gid, time) float64 2MB dask.array<chunksize=(3, 17520), meta=np.ndarray>\n",
+ " RH_backsheet (gid, time) float64 2MB dask.array<chunksize=(3, 17520), meta=np.ndarray>
RH_surface_outside
(gid, time)
float64
dask.array<chunksize=(3, 17520), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 1.47 MiB | \n",
+ " 410.62 kiB | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11, 17520) | \n",
+ " (3, 17520) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 1 graph layer | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
RH_front_encap
(gid, time)
float64
dask.array<chunksize=(3, 17520), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 1.47 MiB | \n",
+ " 410.62 kiB | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11, 17520) | \n",
+ " (3, 17520) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 1 graph layer | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
RH_back_encap
(gid, time)
float64
dask.array<chunksize=(3, 17520), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 1.47 MiB | \n",
+ " 410.62 kiB | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11, 17520) | \n",
+ " (3, 17520) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 1 graph layer | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
RH_backsheet
(gid, time)
float64
dask.array<chunksize=(3, 17520), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 1.47 MiB | \n",
+ " 410.62 kiB | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11, 17520) | \n",
+ " (3, 17520) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 1 graph layer | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
PandasIndex
PandasIndex(Index([449211, 452064, 453020, 454916, 455867, 455877, 456827, 457776, 459670,\n",
+ " 460613, 462498],\n",
+ " dtype='int64', name='gid'))
PandasIndex
PandasIndex(DatetimeIndex(['2022-01-01 00:00:00', '2022-01-01 00:30:00',\n",
+ " '2022-01-01 01:00:00', '2022-01-01 01:30:00',\n",
+ " '2022-01-01 02:00:00', '2022-01-01 02:30:00',\n",
+ " '2022-01-01 03:00:00', '2022-01-01 03:30:00',\n",
+ " '2022-01-01 04:00:00', '2022-01-01 04:30:00',\n",
+ " ...\n",
+ " '2022-12-31 19:00:00', '2022-12-31 19:30:00',\n",
+ " '2022-12-31 20:00:00', '2022-12-31 20:30:00',\n",
+ " '2022-12-31 21:00:00', '2022-12-31 21:30:00',\n",
+ " '2022-12-31 22:00:00', '2022-12-31 22:30:00',\n",
+ " '2022-12-31 23:00:00', '2022-12-31 23:30:00'],\n",
+ " dtype='datetime64[ns]', name='time', length=17520, freq=None))
"
+ ],
+ "text/plain": [
+ " Size: 6MB\n",
+ "Dimensions: (gid: 11, time: 17520)\n",
+ "Coordinates:\n",
+ " * gid (gid) int64 88B 449211 452064 453020 ... 460613 462498\n",
+ " * time (time) datetime64[ns] 140kB 2022-01-01 ... 2022-12-31...\n",
+ "Data variables:\n",
+ " RH_surface_outside (gid, time) float64 2MB dask.array\n",
+ " RH_front_encap (gid, time) float64 2MB dask.array\n",
+ " RH_back_encap (gid, time) float64 2MB dask.array\n",
+ " RH_backsheet (gid, time) float64 2MB dask.array"
+ ]
+ },
+ "execution_count": 22,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "HUMIDITY_TEMPLATE.chunk({\"gid\": 3})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
<xarray.Dataset> Size: 352B\n",
+ "Dimensions: (gid: 11)\n",
+ "Coordinates:\n",
+ " * gid (gid) int64 88B 449211 452064 453020 ... 459670 460613 462498\n",
+ "Data variables:\n",
+ " x (gid) float64 88B dask.array<chunksize=(3,), meta=np.ndarray>\n",
+ " T98_0 (gid) float64 88B dask.array<chunksize=(3,), meta=np.ndarray>\n",
+ " T98_inf (gid) float64 88B dask.array<chunksize=(3,), meta=np.ndarray>
x
(gid)
float64
dask.array<chunksize=(3,), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 88 B | \n",
+ " 24 B | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11,) | \n",
+ " (3,) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 2 graph layers | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
T98_0
(gid)
float64
dask.array<chunksize=(3,), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 88 B | \n",
+ " 24 B | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11,) | \n",
+ " (3,) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 2 graph layers | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
T98_inf
(gid)
float64
dask.array<chunksize=(3,), meta=np.ndarray>
\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " Array | \n",
+ " Chunk | \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " Bytes | \n",
+ " 88 B | \n",
+ " 24 B | \n",
+ " \n",
+ " \n",
+ " \n",
+ " Shape | \n",
+ " (11,) | \n",
+ " (3,) | \n",
+ " \n",
+ " \n",
+ " Dask graph | \n",
+ " 4 chunks in 2 graph layers | \n",
+ " \n",
+ " \n",
+ " Data type | \n",
+ " float64 numpy.ndarray | \n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
PandasIndex
PandasIndex(Index([449211, 452064, 453020, 454916, 455867, 455877, 456827, 457776, 459670,\n",
+ " 460613, 462498],\n",
+ " dtype='int64', name='gid'))
"
+ ],
+ "text/plain": [
+ " Size: 352B\n",
+ "Dimensions: (gid: 11)\n",
+ "Coordinates:\n",
+ " * gid (gid) int64 88B 449211 452064 453020 ... 459670 460613 462498\n",
+ "Data variables:\n",
+ " x (gid) float64 88B dask.array\n",
+ " T98_0 (gid) float64 88B dask.array\n",
+ " T98_inf (gid) float64 88B dask.array"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chunked_template"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "C:\\Users\\tford\\dev\\PVDegradationTools\\pvdeg\\utilities.py:479: FutureWarning: The return type of `Dataset.dims` will be changed to return a set of dimension names in future, in order to be more consistent with `DataArray.dims`. To access a mapping from dimension names to lengths, please use `Dataset.sizes`.\n",
+ "C:\\Users\\tford\\dev\\PVDegradationTools\\pvdeg\\utilities.py:1231: FutureWarning: The return type of `Dataset.dims` will be changed to return a set of dimension names in future, in order to be more consistent with `DataArray.dims`. To access a mapping from dimension names to lengths, please use `Dataset.sizes`.\n",
" if ds1.dims != ds2.dims:\n"
]
},
@@ -223,13 +1574,49 @@
"True"
]
},
- "execution_count": 11,
+ "execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "pvdeg.utilities.compare_datasets(manual_template, humidity_template)"
+ "pvdeg.utilities.compare_templates(chunked_template, HUMIDITY_TEMPLATE.chunk({\"gid\": 3}))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.9 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.5 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.5 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.8 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.4 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.8 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.6 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.9 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.6 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.4 was used.\n",
+ "The array tilt angle was not provided, therefore the latitude tilt of 39.7 was used.\n"
+ ]
+ }
+ ],
+ "source": [
+ "chunked_template = pvdeg.geospatial.auto_template(\n",
+ " ds_gids=chunked_weather, func=pvdeg.humidity.module\n",
+ ")\n",
+ "\n",
+ "geo_res = pvdeg.geospatial.analysis(\n",
+ " weather_ds=chunked_weather,\n",
+ " meta_df=GEO_META,\n",
+ " func=pvdeg.humidity.module,\n",
+ " template=chunked_template,\n",
+ ")"
]
},
{
@@ -238,18 +1625,40 @@
"metadata": {},
"outputs": [],
"source": [
- "# test template\n",
+ "geo_res = pvdeg.geospatial.analysis(\n",
+ " chunked_weather,\n",
+ " meta_df=GEO_META,\n",
+ " func=pvdeg.humidity.module,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "res_ds = pvdeg.geospatial.analysis(\n",
+ " weather_ds=GEO_WEATHER,\n",
+ " meta_df=GEO_META,\n",
+ " func=pvdeg.standards.standoff,\n",
+ ")\n",
"\n",
- "shapes = {\n",
- " 'testA' : (\"gid\", ),\n",
- " 'testB' : (\"gid\", \"time\")\n",
- "}\n",
+ "data_var = res_ds[\"x\"]\n",
"\n",
- "template = pvdeg.geospatial.output_template(\n",
- " shapes=shapes,\n",
+ "# Stack the latitude and longitude coordinates into a single dimension\n",
+ "# convert to dataframe, this can be done with xr.dataset.to_dataframe as well\n",
+ "stacked = data_var.stack(z=(\"latitude\", \"longitude\"))\n",
+ "latitudes = stacked[\"latitude\"].values\n",
+ "longitudes = stacked[\"longitude\"].values\n",
+ "data_values = stacked.values\n",
+ "combined_array = np.column_stack((latitudes, longitudes, data_values))\n",
"\n",
- " ds_gids=GEO_WEATHER,\n",
- ")\n"
+ "res = pd.DataFrame(combined_array).dropna()\n",
+ "ans = pd.read_csv(os.path.join(TEST_DATA_DIR, \"summit-standoff-res.csv\"), index_col=0)\n",
+ "res.columns = ans.columns\n",
+ "\n",
+ "# pd.testing.assert_frame_equal(res, ans, check_dtype=False, check_names=False)"
]
},
{
@@ -258,7 +1667,7 @@
"metadata": {},
"outputs": [],
"source": [
- "template.to_netcdf(os.path.join(TEST_DATA_DIR, \"mismatch-template.nc\"))"
+ "res_ds.chunks"
]
},
{
@@ -285,7 +1694,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.14"
+ "version": "3.10.12"
},
"orig_nbformat": 4
},
diff --git a/tests/test_geospatial.py b/tests/test_geospatial.py
index 5e3a61a..590108a 100644
--- a/tests/test_geospatial.py
+++ b/tests/test_geospatial.py
@@ -6,17 +6,22 @@
import xarray as xr
import os
+# NOTE ON UNCHUNKED INPUTS FOR TESTING
+# The output template result will always be chunked (because they are dask arrays)
+# this is fine for unchunked inputs if the number of chunks along all axes is 1
+# AND the size of the chunk will contain all elements in the entire axis,
GEO_META = pd.read_csv(os.path.join(TEST_DATA_DIR, "summit-meta.csv"), index_col=0)
with open(os.path.join(TEST_DATA_DIR, "summit-weather.pkl"), "rb") as f:
- GEO_WEATHER = pickle.load(f)
+ GEO_WEATHER = pickle.load(f) # unchunked
HUMIDITY_TEMPLATE = xr.open_dataset(
- os.path.join(TEST_DATA_DIR, "humidity_template.nc"), engine='h5netcdf'
-).compute()
+ os.path.join(TEST_DATA_DIR, "humidity_template.nc"), engine="h5netcdf"
+).compute() # unchunked
+# unchunked input
def test_analysis_standoff_unchunked():
res_ds = pvdeg.geospatial.analysis(
weather_ds=GEO_WEATHER,
@@ -41,6 +46,35 @@ def test_analysis_standoff_unchunked():
res.columns = ans.columns
pd.testing.assert_frame_equal(res, ans, check_dtype=False, check_names=False)
+ assert not res_ds.chunks # output is never chunked
+
+
+# chunked input
+def test_analysis_standoff_chunked():
+ chunked_weather = GEO_WEATHER.chunk({"gid": 3}) # artifically chunked
+
+ # the result is always unchunked, checking if function works on chunked input
+ res_ds = pvdeg.geospatial.analysis(
+ weather_ds=chunked_weather,
+ meta_df=GEO_META,
+ func=pvdeg.standards.standoff,
+ )
+
+ data_var = res_ds["x"]
+ stacked = data_var.stack(z=("latitude", "longitude"))
+ latitudes = stacked["latitude"].values
+ longitudes = stacked["longitude"].values
+ data_values = stacked.values
+ combined_array = np.column_stack((latitudes, longitudes, data_values))
+
+ res = pd.DataFrame(combined_array).dropna()
+ ans = pd.read_csv(
+ os.path.join(TEST_DATA_DIR, "summit-standoff-res.csv"), index_col=0
+ )
+ res.columns = ans.columns
+
+ pd.testing.assert_frame_equal(res, ans, check_dtype=False, check_names=False)
+ assert not res_ds.chunks # output is never chunked, even if input is chunked
def test_autotemplate():
@@ -53,7 +87,7 @@ def test_autotemplate():
) # custom function because we cant use equals or identical because of empty like values
-def test_template():
+def test_output_template_unchunked():
shapes = {
"RH_surface_outside": ("gid", "time"),
"RH_front_encap": ("gid", "time"),
@@ -63,6 +97,31 @@ def test_template():
manual_template = pvdeg.geospatial.output_template(
shapes=shapes, ds_gids=GEO_WEATHER
- ).compute()
+ )
assert pvdeg.utilities.compare_templates(manual_template, HUMIDITY_TEMPLATE)
+ for k, v in manual_template.chunks.items():
+ if len(v) != 1:
+ raise ValueError(f"""
+ Need one chunk per axis for an unchunked input
+ dimension {k} has {len(v)} chunks.
+ """)
+
+
+def test_output_template_chunked():
+ chunked_weather = GEO_WEATHER.chunk({"gid": 3}) # artifically chunked
+
+ shapes = {
+ "RH_surface_outside": ("gid", "time"),
+ "RH_front_encap": ("gid", "time"),
+ "RH_back_encap": ("gid", "time"),
+ "RH_backsheet": ("gid", "time"),
+ }
+
+ chunked_template = pvdeg.geospatial.output_template(
+ shapes=shapes, ds_gids=chunked_weather
+ )
+
+ assert pvdeg.utilities.compare_templates(
+ chunked_template, HUMIDITY_TEMPLATE.chunk({"gid": 3})
+ )
diff --git a/tests/test_letid.py b/tests/test_letid.py
index bb8f0af..4fb2a3a 100644
--- a/tests/test_letid.py
+++ b/tests/test_letid.py
@@ -1,12 +1,5 @@
import pytest
-from pvdeg import (
- weather,
- letid,
- utilities,
- collection,
- DATA_DIR,
- TEST_DATA_DIR
-)
+from pvdeg import weather, letid, utilities, collection, DATA_DIR, TEST_DATA_DIR
import os
import pandas as pd
@@ -27,7 +20,7 @@
WEATHER = pd.read_csv(
os.path.join(TEST_DATA_DIR, r"weather_day_pytest.csv"),
index_col=0,
- parse_dates=True
+ parse_dates=True,
)
### dtype HEADACHES ###
@@ -36,19 +29,19 @@
index_col=0,
parse_dates=True,
# for some reason timesteps are int32 but all other numerics are int64
- dtype = {'Temperature':'int32'}
+ dtype={"Temperature": "int32"},
)
-# did not properly parse and update Datetime column to datetime64[ns] so must be done manually
-REPINS_TIMESTEPS['Datetime'] = pd.to_datetime(REPINS_TIMESTEPS['Datetime'])
+# did not properly parse and update Datetime column to datetime64[ns] so must be done manually
+REPINS_TIMESTEPS["Datetime"] = pd.to_datetime(REPINS_TIMESTEPS["Datetime"])
###
DEVICE_PARAMS = pd.read_csv(
os.path.join(TEST_DATA_DIR, r"repins-device-params-timesteps.csv"),
index_col=0,
parse_dates=True,
- dtype = {'Temperature':'int32'}
+ dtype={"Temperature": "int32"},
)
-DEVICE_PARAMS['Datetime'] = pd.to_datetime(DEVICE_PARAMS['Datetime'])
+DEVICE_PARAMS["Datetime"] = pd.to_datetime(DEVICE_PARAMS["Datetime"])
LETID_OUTDOORS = pd.read_csv(
os.path.join(TEST_DATA_DIR, r"letid-outdoors.csv"),
@@ -60,107 +53,126 @@
os.path.join(TEST_DATA_DIR, r"letid-lab.csv"),
index_col=0,
parse_dates=True,
- dtype = {'Temperature':'int64'}
+ dtype={"Temperature": "int64"},
)
-LETID_LAB['Datetime'] = pd.to_datetime(LETID_LAB['Datetime'])
+LETID_LAB["Datetime"] = pd.to_datetime(LETID_LAB["Datetime"])
INJECTION = pd.read_csv(
- os.path.join(TEST_DATA_DIR, "injection-outdoors.csv"),
- index_col=0,
- parse_dates=True
+ os.path.join(TEST_DATA_DIR, "injection-outdoors.csv"), index_col=0, parse_dates=True
)
with open(os.path.join(TEST_DATA_DIR, "meta.json"), "r") as file:
META = json.load(file)
META = weather.map_meta(META)
+
def test_tau_now():
-# from LETID - Passivated Wafer.ipynb
- tau_0 = 350 # us
- tau_deg = 41 # us
+ # from LETID - Passivated Wafer.ipynb
+ tau_0 = 350 # us
+ tau_deg = 41 # us
nB_0 = 0
result = letid.tau_now(tau_0, tau_deg, nB_0)
assert result == pytest.approx(350.0)
+
def test_k_ij():
-# from LETID - Passivated Wafer.ipynb
- mechanism_params = utilities.get_kinetics('repins')
- temperature = 49 # °C
+ # from LETID - Passivated Wafer.ipynb
+ mechanism_params = utilities.get_kinetics("repins")
+ temperature = 49 # °C
- result = letid.k_ij(mechanism_params['v_ab'], mechanism_params['ea_ab'], temperature)
+ result = letid.k_ij(
+ mechanism_params["v_ab"], mechanism_params["ea_ab"], temperature
+ )
# ABS value here?
assert result == pytest.approx(5.389713113272726e-06)
-def test_carrier_factor():
- tau = 350 # us, lifetime of the wafer for demonstration purposes
- transition = 'ab'
- temperature = 20 # C
+def test_carrier_factor():
+ tau = 350 # us, lifetime of the wafer for demonstration purposes
+ transition = "ab"
+ temperature = 20 # C
suns = 0.5
meas_jsc = 40
- wafer_thickness = 180 # um
+ wafer_thickness = 180 # um
s_rear = 90
- mechanism_params = utilities.get_kinetics('repins')
+ mechanism_params = utilities.get_kinetics("repins")
- result = letid.carrier_factor(tau, transition, temperature, suns, meas_jsc, wafer_thickness, s_rear, mechanism_params)
+ result = letid.carrier_factor(
+ tau,
+ transition,
+ temperature,
+ suns,
+ meas_jsc,
+ wafer_thickness,
+ s_rear,
+ mechanism_params,
+ )
+
+ assert result == pytest.approx(1.2700522149193714, abs=0.000005)
- assert result == pytest.approx(1.2700522149193714, abs = 0.000005)
def test_carrier_factor_wafer():
-# from LETID - Passivated Wafer.ipynb
- wafer_thickness = 180 # um
- tau = 350 # us, lifetime of the wafer for demonstration purposes
- mechanism_params = utilities.get_kinetics('repins')
- injection = 0.5 #
- transition = 'bc' # for this example we'll look at the B -> C transition
-
- generation_df = pd.read_excel(os.path.join(DATA_DIR, 'PVL_GenProfile.xlsx'), header = 0) # this is an optical generation profile generated by PVLighthouse's OPAL2 default model for 1-sun, normal incident AM1.5 sunlight on a 180-um thick SiNx-coated, pyramid-textured wafer.
- generation = generation_df['Generation (cm-3s-1)']
- depth = generation_df['Depth (um)']
+ # from LETID - Passivated Wafer.ipynb
+ wafer_thickness = 180 # um
+ tau = 350 # us, lifetime of the wafer for demonstration purposes
+ mechanism_params = utilities.get_kinetics("repins")
+ injection = 0.5 #
+ transition = "bc" # for this example we'll look at the B -> C transition
+
+ generation_df = pd.read_excel(
+ os.path.join(DATA_DIR, "PVL_GenProfile.xlsx"), header=0
+ ) # this is an optical generation profile generated by PVLighthouse's OPAL2 default model for 1-sun, normal incident AM1.5 sunlight on a 180-um thick SiNx-coated, pyramid-textured wafer.
+ generation = generation_df["Generation (cm-3s-1)"]
+ depth = generation_df["Depth (um)"]
j_gen = collection.generation_current(generation, depth)
- result = letid.carrier_factor_wafer(tau, transition, injection, j_gen, wafer_thickness, mechanism_params)
+ result = letid.carrier_factor_wafer(
+ tau, transition, injection, j_gen, wafer_thickness, mechanism_params
+ )
assert result == pytest.approx(4.711029107876425, abs=0.000005)
+
def test_calc_dn():
-# from LETID - Passivated Wafer.ipynb
- transition = 'bc' # for this example we'll look at the B -> C transition
+ # from LETID - Passivated Wafer.ipynb
+ transition = "bc" # for this example we'll look at the B -> C transition
- mechanism_params = utilities.get_kinetics('repins')
+ mechanism_params = utilities.get_kinetics("repins")
meas_tau = mechanism_params[f"tau_{transition}"]
meas_temp = mechanism_params[f"temperature_{transition}"]
- meas_temp = convert_temperature(meas_temp, 'K', 'C')
+ meas_temp = convert_temperature(meas_temp, "K", "C")
meas_suns = mechanism_params[f"suns_{transition}"]
meas_jsc = 40
meas_wafer_thickness = mechanism_params[f"thickness_{transition}"]
meas_srv = mechanism_params[f"srv_{transition}"]
result = letid.calc_dn(
- meas_tau,
- meas_temp,
- meas_suns,
- meas_jsc,
- wafer_thickness=meas_wafer_thickness,
- s_rear=meas_srv,
- )
+ meas_tau,
+ meas_temp,
+ meas_suns,
+ meas_jsc,
+ wafer_thickness=meas_wafer_thickness,
+ s_rear=meas_srv,
+ )
# What should the abs value be here, LARGE MAGNITUDE ANSWER
- assert result == pytest.approx(7.064647199192719e+20)
+ assert result == pytest.approx(7.064647199192719e20)
+
def test_convert_i_to_v():
- tau = 350 # us, lifetime of the wafer for demonstration purposes
- na = 7.2e21
+ tau = 350 # us, lifetime of the wafer for demonstration purposes
+ na = 7.2e21
current = 0.005
- wafer_thickness = 180 # um
+ wafer_thickness = 180 # um
srv = 41
result = letid.convert_i_to_v(tau, na, current, wafer_thickness, srv)
- assert result == pytest.approx(0.539020964053319, abs = 0.000005)
+ assert result == pytest.approx(0.539020964053319, abs=0.000005)
+
def test_j0_gray():
ni2 = 1.0e16 # m^-3
@@ -174,6 +186,7 @@ def test_j0_gray():
assert result == pytest.approx(4.806093780404662e-28)
+
def test_calc_voc_from_tau():
tau_1 = 115.000000
tau_2 = 60.631208
@@ -182,90 +195,112 @@ def test_calc_voc_from_tau():
jsc_now = 41.28092915355781
temperature = 25
- result_1 = letid.calc_voc_from_tau(tau_1, wafer_thickness, s_rear, jsc_now, temperature)
- result_2 = letid.calc_voc_from_tau(tau_2, wafer_thickness, s_rear, jsc_now, temperature)
+ result_1 = letid.calc_voc_from_tau(
+ tau_1, wafer_thickness, s_rear, jsc_now, temperature
+ )
+ result_2 = letid.calc_voc_from_tau(
+ tau_2, wafer_thickness, s_rear, jsc_now, temperature
+ )
+
+ assert result_1 == pytest.approx(0.6661350284244034, abs=5e-8)
+ assert result_2 == pytest.approx(0.6531169204120689, abs=5e-8)
- assert result_1 == pytest.approx(0.6661350284244034, abs = 5e-8 )
- assert result_2 == pytest.approx(0.6531169204120689, abs = 5e-8)
def test_calc_device_params():
results = letid.calc_device_params(REPINS_TIMESTEPS, cell_area=243)
pd.testing.assert_frame_equal(results, DEVICE_PARAMS, check_index_type=False)
+
def test_calc_energy_loss():
- # result = letid.calc_energy_loss(REPINS_TIMESTEPS) # old and just fixed in bug fix
+ # result = letid.calc_energy_loss(REPINS_TIMESTEPS) # old and just fixed in bug fix
timesteps = letid.calc_device_params(REPINS_TIMESTEPS)
result = letid.calc_energy_loss(timesteps)
- assert result == pytest.approx(8.873902787132071e-05, abs = 0.000005)
+ assert result == pytest.approx(8.873902787132071e-05, abs=0.000005)
-def test_calc_regeneration_time():
+def test_calc_regeneration_time():
# not functional
# had to set x to the closest value in my dataframe,
- # this is probably flawed but it avoids
+ # this is probably flawed but it avoids
# ``IndexError: single positional indexer is out-of-bounds``
result = letid.calc_regeneration_time(REPINS_TIMESTEPS, x=4.0463754402964416e-05)
pass
+
def test_calc_pmp_loss_from_tau_loss():
-# from B-O LID - Accelerated Test.ipynb
+ # from B-O LID - Accelerated Test.ipynb
+
+ wafer_thickness = 180 # um
+ s_rear = 46 # cm/s
+ cell_area = 243 # cm^2
+ tau_0 = (
+ 115 # us, carrier lifetime in non-degraded states, e.g. LETID/LID states A or C
+ )
+ tau_deg = 55 # us, carrier lifetime in fully-degraded state, e.g. LETID/LID state B
- wafer_thickness = 180 # um
- s_rear = 46 # cm/s
- cell_area = 243 # cm^2
- tau_0 = 115 # us, carrier lifetime in non-degraded states, e.g. LETID/LID states A or C
- tau_deg = 55 # us, carrier lifetime in fully-degraded state, e.g. LETID/LID state B
+ result = letid.calc_pmp_loss_from_tau_loss(
+ tau_0, tau_deg, cell_area, wafer_thickness, s_rear
+ ) # returns % power loss, pmp_0, pmp_deg
- result = letid.calc_pmp_loss_from_tau_loss(tau_0, tau_deg, cell_area, wafer_thickness, s_rear) # returns % power loss, pmp_0, pmp_deg
+ # are these ABS values reasonable?
+ assert result[0] == pytest.approx(0.03495240755084558, abs=5e-8)
+ assert result[1] == pytest.approx(5.663466529792824, abs=5e-8)
+ assert result[2] == pytest.approx(5.465514739492932, abs=5e-8)
- # are these ABS values reasonable?
- assert result[0] == pytest.approx(0.03495240755084558, abs = 5e-8)
- assert result[1] == pytest.approx(5.663466529792824, abs = 5e-8)
- assert result[2] == pytest.approx(5.465514739492932, abs = 5e-8)
def test_calc_ndd():
- tau_0 = 350 # us
- tau_deg = 41 # us
+ tau_0 = 350 # us
+ tau_deg = 41 # us
result = letid.calc_ndd(tau_0, tau_deg)
- assert result == pytest.approx(0.02153310104529617, abs = 0.000005)
+ assert result == pytest.approx(0.02153310104529617, abs=0.000005)
+
def test_ff_green():
voltage = 40
result = letid.ff_green(voltage)
- assert result == pytest.approx(0.9946395424055456, abs = 0.000005)
-
-def test_calc_injection_outdoors():
- temperature_model_parameters = TEMPERATURE_MODEL_PARAMETERS['sapm']['open_rack_glass_glass']
- sandia_modules = pvlib.pvsystem.retrieve_sam('SandiaMod')
- cec_inverters = pvlib.pvsystem.retrieve_sam('cecinverter')
- sandia_module = sandia_modules['Canadian_Solar_CS5P_220M___2009_']
- cec_inverter = cec_inverters['ABB__MICRO_0_25_I_OUTD_US_208__208V_']
+ assert result == pytest.approx(0.9946395424055456, abs=0.000005)
- location = Location(latitude=META['latitude'], longitude=META['longitude'])
- system = PVSystem(surface_tilt=20, surface_azimuth=200,
- module_parameters=sandia_module,
- inverter_parameters=cec_inverter,
- temperature_model_parameters=temperature_model_parameters)
+def test_calc_injection_outdoors():
+ temperature_model_parameters = TEMPERATURE_MODEL_PARAMETERS["sapm"][
+ "open_rack_glass_glass"
+ ]
+ sandia_modules = pvlib.pvsystem.retrieve_sam("SandiaMod")
+ cec_inverters = pvlib.pvsystem.retrieve_sam("cecinverter")
+ sandia_module = sandia_modules["Canadian_Solar_CS5P_220M___2009_"]
+ cec_inverter = cec_inverters["ABB__MICRO_0_25_I_OUTD_US_208__208V_"]
+
+ location = Location(latitude=META["latitude"], longitude=META["longitude"])
+
+ system = PVSystem(
+ surface_tilt=20,
+ surface_azimuth=200,
+ module_parameters=sandia_module,
+ inverter_parameters=cec_inverter,
+ temperature_model_parameters=temperature_model_parameters,
+ )
mc = ModelChain(system, location)
mc.run_model(WEATHER)
mc.complete_irradiance(WEATHER)
- result = letid.calc_injection_outdoors(mc.results)
+ result = letid.calc_injection_outdoors(mc.results)
result = pd.DataFrame(result)
- result.columns = ['0']
+ result.columns = ["0"]
+
+ pd.testing.assert_frame_equal(
+ result, INJECTION, check_column_type=False, check_index_type=False
+ )
- pd.testing.assert_frame_equal(result, INJECTION, check_column_type=False, check_index_type=False)
def test_calc_letid_outdoors():
tau_0 = 350
@@ -275,19 +310,34 @@ def test_calc_letid_outdoors():
nA_0 = 100
nB_0 = 0
nC_0 = 0
- mechanism_params = 'repins'
-
- generation_df = pd.read_excel(os.path.join(DATA_DIR, 'PVL_GenProfile.xlsx'), header = 0) # this is an optical generation profile generated by PVLighthouse's OPAL2 default model for 1-sun, normal incident AM1.5 sunlight on a 180-um thick SiNx-coated, pyramid-textured wafer.
-
- result = letid.calc_letid_outdoors(tau_0, tau_deg, wafer_thickness, s_rear, nA_0, nB_0, nC_0, WEATHER, META, mechanism_params, generation_df)
+ mechanism_params = "repins"
+
+ generation_df = pd.read_excel(
+ os.path.join(DATA_DIR, "PVL_GenProfile.xlsx"), header=0
+ ) # this is an optical generation profile generated by PVLighthouse's OPAL2 default model for 1-sun, normal incident AM1.5 sunlight on a 180-um thick SiNx-coated, pyramid-textured wafer.
+
+ result = letid.calc_letid_outdoors(
+ tau_0,
+ tau_deg,
+ wafer_thickness,
+ s_rear,
+ nA_0,
+ nB_0,
+ nC_0,
+ WEATHER,
+ META,
+ mechanism_params,
+ generation_df,
+ )
print("here it is", META)
pd.testing.assert_frame_equal(result, LETID_OUTDOORS)
+
def test_calc_letid_lab():
- tau_0 = 115 # us
- tau_deg = 55 # us
- wafer_thickness = 180 # um
- s_rear = 46 # cm/s
+ tau_0 = 115 # us
+ tau_deg = 55 # us
+ wafer_thickness = 180 # um
+ s_rear = 46 # cm/s
nA_0 = 100
nB_0 = 0
@@ -295,12 +345,29 @@ def test_calc_letid_lab():
injection = 0.1
temperature = 75
- mechanism_params = 'repins'
-
- result = letid.calc_letid_lab(tau_0, tau_deg, wafer_thickness, s_rear, nA_0, nB_0, nC_0, injection, temperature, mechanism_params, freq='h')
+ mechanism_params = "repins"
+
+ result = letid.calc_letid_lab(
+ tau_0,
+ tau_deg,
+ wafer_thickness,
+ s_rear,
+ nA_0,
+ nB_0,
+ nC_0,
+ injection,
+ temperature,
+ mechanism_params,
+ freq="h",
+ )
# Datetimes were causing this to fail, we dont care about them
- result_without_datetime = result.drop(columns=['Datetime'])
- letid_lab_without_datetime = LETID_LAB.drop(columns=['Datetime'])
-
- pd.testing.assert_frame_equal(result_without_datetime, letid_lab_without_datetime, check_index_type=False, check_dtype=False)
\ No newline at end of file
+ result_without_datetime = result.drop(columns=["Datetime"])
+ letid_lab_without_datetime = LETID_LAB.drop(columns=["Datetime"])
+
+ pd.testing.assert_frame_equal(
+ result_without_datetime,
+ letid_lab_without_datetime,
+ check_index_type=False,
+ check_dtype=False,
+ )
diff --git a/tests/test_montecarlo.py b/tests/test_montecarlo.py
index 414dd57..409d0d2 100644
--- a/tests/test_montecarlo.py
+++ b/tests/test_montecarlo.py
@@ -2,8 +2,8 @@
# correlation list is empty AND correlation list is populated with r = 0's
import pytest
-import os
-import json
+import os
+import json
import pandas as pd
import pvdeg
from pvdeg import TEST_DATA_DIR
@@ -11,7 +11,7 @@
WEATHER = pd.read_csv(
os.path.join(TEST_DATA_DIR, r"weather_day_pytest.csv"),
index_col=0,
- parse_dates=True
+ parse_dates=True,
)
with open(os.path.join(TEST_DATA_DIR, "meta.json"), "r") as file:
@@ -34,9 +34,10 @@
os.path.join(TEST_DATA_DIR, r"monte_carlo_arrhenius.csv"),
)
+
def test_generateCorrelatedSamples():
"""
- test pvdeg.montecarlo.generateCorrelatedSamples
+ test pvdeg.montecarlo.generateCorrelatedSamples
Requires:
---------
@@ -44,32 +45,53 @@ def test_generateCorrelatedSamples():
"""
# standard case
result_1 = pvdeg.montecarlo.generateCorrelatedSamples(
- corr=[pvdeg.montecarlo.Corr('Ea', 'X', 0.0269), pvdeg.montecarlo.Corr('Ea', 'LnR0', -0.9995), pvdeg.montecarlo.Corr('X', 'LnR0', -0.0400)],
- stats={'Ea' : {'mean' : 62.08, 'stdev' : 7.3858 }, 'LnR0' : {'mean' : 13.7223084 , 'stdev' : 2.47334772}, 'X' : {'mean' : 0.0341 , 'stdev' : 0.0992757}},
- n = 50,
- seed = 1
+ corr=[
+ pvdeg.montecarlo.Corr("Ea", "X", 0.0269),
+ pvdeg.montecarlo.Corr("Ea", "LnR0", -0.9995),
+ pvdeg.montecarlo.Corr("X", "LnR0", -0.0400),
+ ],
+ stats={
+ "Ea": {"mean": 62.08, "stdev": 7.3858},
+ "LnR0": {"mean": 13.7223084, "stdev": 2.47334772},
+ "X": {"mean": 0.0341, "stdev": 0.0992757},
+ },
+ n=50,
+ seed=1,
)
# EMPTY CORRELATION LIST
result_2 = pvdeg.montecarlo.generateCorrelatedSamples(
corr=[],
- stats = {'Ea' : {'mean' : 62.08, 'stdev' : 7.3858 }, 'LnR0' : {'mean' : 13.7223084 , 'stdev' : 2.47334772}, 'X' : {'mean' : 0.0341 , 'stdev' : 0.0992757 }},
- n = 50,
- seed = 1
+ stats={
+ "Ea": {"mean": 62.08, "stdev": 7.3858},
+ "LnR0": {"mean": 13.7223084, "stdev": 2.47334772},
+ "X": {"mean": 0.0341, "stdev": 0.0992757},
+ },
+ n=50,
+ seed=1,
)
# populated correlation list, ALL R = 0
result_3 = pvdeg.montecarlo.generateCorrelatedSamples(
- corr=[pvdeg.montecarlo.Corr('Ea', 'X', 0), pvdeg.montecarlo.Corr('Ea', 'LnR0', 0), pvdeg.montecarlo.Corr('X', 'LnR0', 0)],
- stats = {'Ea' : {'mean' : 62.08, 'stdev' : 7.3858 }, 'LnR0' : {'mean' : 13.7223084 , 'stdev' : 2.47334772}, 'X' : {'mean' : 0.0341 , 'stdev' : 0.0992757 }},
- n = 50,
- seed = 1
+ corr=[
+ pvdeg.montecarlo.Corr("Ea", "X", 0),
+ pvdeg.montecarlo.Corr("Ea", "LnR0", 0),
+ pvdeg.montecarlo.Corr("X", "LnR0", 0),
+ ],
+ stats={
+ "Ea": {"mean": 62.08, "stdev": 7.3858},
+ "LnR0": {"mean": 13.7223084, "stdev": 2.47334772},
+ "X": {"mean": 0.0341, "stdev": 0.0992757},
+ },
+ n=50,
+ seed=1,
)
pd.testing.assert_frame_equal(result_1, CORRELATED_SAMPLES_1)
pd.testing.assert_frame_equal(result_2, CORRELATED_SAMPLES_2)
pd.testing.assert_frame_equal(result_3, CORRELATED_SAMPLES_3)
+
def test_simulate():
"""
test pvdeg.montecarlo.simulate
@@ -80,20 +102,26 @@ def test_simulate():
"""
poa_irradiance = pvdeg.spectral.poa_irradiance(WEATHER, META)
- temp_mod = pvdeg.temperature.module(weather_df=WEATHER, meta=META, poa=poa_irradiance, conf='open_rack_glass_polymer')
+ temp_mod = pvdeg.temperature.module(
+ weather_df=WEATHER,
+ meta=META,
+ poa=poa_irradiance,
+ conf="open_rack_glass_polymer",
+ )
- poa_global = poa_irradiance['poa_global'].to_numpy()
+ poa_global = poa_irradiance["poa_global"].to_numpy()
cell_temperature = temp_mod.to_numpy()
- function_kwargs = {'poa_global': poa_global, 'module_temp': cell_temperature}
+ function_kwargs = {"poa_global": poa_global, "module_temp": cell_temperature}
new_results = pvdeg.montecarlo.simulate(
func=pvdeg.degradation.vecArrhenius,
- correlated_samples=CORRELATED_SAMPLES_1,
- **function_kwargs)
+ correlated_samples=CORRELATED_SAMPLES_1,
+ **function_kwargs,
+ )
new_results_df = pd.DataFrame(new_results)
new_results_df.columns = ARRHENIUS_RESULT.columns
- pd.testing.assert_frame_equal(new_results_df, ARRHENIUS_RESULT)
\ No newline at end of file
+ pd.testing.assert_frame_equal(new_results_df, ARRHENIUS_RESULT)
diff --git a/tests/test_scenario.py b/tests/test_scenario.py
index 5c08e17..9070142 100644
--- a/tests/test_scenario.py
+++ b/tests/test_scenario.py
@@ -2,44 +2,53 @@
from pvdeg.standards import standoff
from pvdeg import TEST_DATA_DIR
import json
-import pandas as pd
+import pandas as pd
import pytest
import os
# problems with scenario creating directory in test directory?
-EMAIL = 'user@mail.com'
-API_KEY = 'DEMO_KEY'
+EMAIL = "user@mail.com"
+API_KEY = "DEMO_KEY"
+
def test_Scenario_add():
- a = Scenario(name='test')
+ a = Scenario(name="test")
a.clean()
a.restore_credentials(email=EMAIL, api_key=API_KEY)
a.addLocation(lat_long=(40.63336, -73.99458))
- a.addModule(module_name='test-module')
- a.addJob(func=standoff,func_kwarg={'wind_factor' : 0.35})
-
+ a.addModule(module_name="test-module")
+ a.addJob(func=standoff, func_kwarg={"wind_factor": 0.35})
+
restored = Scenario.load_json(
- file_path=os.path.join(TEST_DATA_DIR, 'test-scenario.json')
- )
-
+ file_path=os.path.join(TEST_DATA_DIR, "test-scenario.json")
+ )
+
a.path, restored.path = None, None
a.file, restored.file = None, None
-
+
assert a == restored
+
def test_Scenario_run():
- a = Scenario.load_json(file_path=os.path.join(TEST_DATA_DIR, 'test-scenario.json'), email=EMAIL, api_key=API_KEY)
+ a = Scenario.load_json(
+ file_path=os.path.join(TEST_DATA_DIR, "test-scenario.json"),
+ email=EMAIL,
+ api_key=API_KEY,
+ )
a.run()
-
- res_df = a.results['test-module']["GLUSE"]
+
+ res_df = a.results["test-module"]["GLUSE"]
known_df = pd.DataFrame(
- {'x': {0: 0.0},
- 'T98_0': {0: 68.80867997141961},
- 'T98_inf': {0: 46.362946615593664}
- } )
+ {
+ "x": {0: 0.0},
+ "T98_0": {0: 68.80867997141961},
+ "T98_inf": {0: 46.362946615593664},
+ }
+ )
pd.testing.assert_frame_equal(res_df, known_df, check_dtype=False)
+
# def test_clean():
# a = Scenario(name='clean-a')
# a.file = 'non-existent-file.json'
@@ -50,20 +59,27 @@ def test_Scenario_run():
# with pytest.raises(ValueError):
# b.clean()
+
def test_addLocation_pvgis():
- a = Scenario(name='location-test')
+ a = Scenario(name="location-test")
with pytest.raises(ValueError):
- a.addLocation((40.63336, -73.99458), weather_db='PSM3') # no api key
+ a.addLocation((40.63336, -73.99458), weather_db="PSM3") # no api key
+
def test_addModule_badmat(capsys):
- a = Scenario.load_json(file_path=os.path.join(TEST_DATA_DIR, 'test-scenario.json'), email=EMAIL, api_key=API_KEY)
+ a = Scenario.load_json(
+ file_path=os.path.join(TEST_DATA_DIR, "test-scenario.json"),
+ email=EMAIL,
+ api_key=API_KEY,
+ )
+
+ a.addModule(module_name="fail", material="fake-material")
- a.addModule(module_name='fail', material='fake-material')
-
captured = capsys.readouterr()
assert "Material Not Found - No module added to scenario." in captured.out
assert "If you need to add a custom material, use .add_material()" in captured.out
+
# def test_addModule_existingmod(capsys):
# b = Scenario.load_json(file_path=os.path.join(TEST_DATA_DIR, 'test-scenario.json'), email=EMAIL, api_key=API_KEY)
@@ -82,15 +98,17 @@ def test_addModule_badmat(capsys):
# captured = capsys.readouterr()
# assert 'Module "works-see-added" added.' in captured.out
+
def test_addJob_bad(capsys):
- a = Scenario(name='non-callable-pipeline-func')
+ a = Scenario(name="non-callable-pipeline-func")
- a.addJob(func='str_not_callable')
+ a.addJob(func="str_not_callable")
captured = capsys.readouterr()
assert 'FAILED: Requested function "str_not_callable" not found' in captured.out
assert "Function has not been added to pipeline." in captured.out
-
+
+
# def test_addJob_seeadded():
# a = Scenario(name='good-func-see-added')
# func=standoff
@@ -108,5 +126,3 @@ def test_addJob_bad(capsys):
# geospatial tests should only run if on hpc, ask martin about protocol. load meta csv and weather nc (for very small scenario?)
-
-
diff --git a/tests/test_standards.py b/tests/test_standards.py
index 284148a..80caa39 100644
--- a/tests/test_standards.py
+++ b/tests/test_standards.py
@@ -76,39 +76,58 @@ def test_standoff():
pd.testing.assert_frame_equal(result_l1, df_expected_result_l1)
pd.testing.assert_frame_equal(result_l2, df_expected_result_l2)
-def test_eff_gap():
- weather_file = os.path.join(TEST_DATA_DIR,'xeff_test.csv')
- xeff_weather, xeff_meta = pvdeg.weather.read(weather_file,'csv')
+
+def test_eff_gap():
+ weather_file = os.path.join(TEST_DATA_DIR, "xeff_test.csv")
+ xeff_weather, xeff_meta = pvdeg.weather.read(weather_file, "csv")
T_0, T_inf, xeff_poa = pvdeg.standards.eff_gap_parameters(
- weather_df = xeff_weather,
- meta = xeff_meta,
- sky_model = "isotropic",
- temp_model = "sapm",
- conf_0 = "insulated_back_glass_polymer",
- conf_inf = "open_rack_glass_polymer",
- wind_factor = 0.33)
- eff_gap = pvdeg.standards.eff_gap(T_0, T_inf, xeff_weather['module_temperature'], xeff_weather["temp_air"], xeff_poa["poa_global"], x_0=6.5, poa_min=400, t_amb_min=0)
-
- assert eff_gap==pytest.approx(3.6767284845789825)
+ weather_df=xeff_weather,
+ meta=xeff_meta,
+ sky_model="isotropic",
+ temp_model="sapm",
+ conf_0="insulated_back_glass_polymer",
+ conf_inf="open_rack_glass_polymer",
+ wind_factor=0.33,
+ )
+ eff_gap = pvdeg.standards.eff_gap(
+ T_0,
+ T_inf,
+ xeff_weather["module_temperature"],
+ xeff_weather["temp_air"],
+ xeff_poa["poa_global"],
+ x_0=6.5,
+ poa_min=400,
+ t_amb_min=0,
+ )
+
+ assert eff_gap == pytest.approx(3.6767284845789825)
# assert expected_result_l1 == pytest.approx(result_l1)
# assert expected_result_l2 == pytest.approx(result_l2, abs=1e-5)
def test_T98():
- weather_db = 'PVGIS' #This pulls data for most of the world.
- weather_id = (24.7136, 46.6753) #Riyadh, Saudi Arabia
- #weather_id = (35.6754, 139.65) #Tokyo, Japan
- weather_arg = {'map_variables': True}
+ weather_db = "PVGIS" # This pulls data for most of the world.
+ weather_id = (24.7136, 46.6753) # Riyadh, Saudi Arabia
+ # weather_id = (35.6754, 139.65) #Tokyo, Japan
+ weather_arg = {"map_variables": True}
WEATHER_df, META = pvdeg.weather.get(weather_db, weather_id)
- df, meta_data=pvdeg.weather.read(
- os.path.join(TEST_DATA_DIR, "psm3_pytest.csv"),"csv"
+ df, meta_data = pvdeg.weather.read(
+ os.path.join(TEST_DATA_DIR, "psm3_pytest.csv"), "csv"
)
standoff = pvdeg.standards.standoff(weather_df=WEATHER_df, meta=META)
- standoff_1 = pvdeg.standards.standoff(weather_df=WEATHER_df, meta=META,
- T98=70, tilt=META['latitude'], azimuth=None,
- sky_model='isotropic', temp_model='sapm', conf_0='insulated_back_glass_polymer', conf_inf='open_rack_glass_polymer',
- x_0=6.5, wind_factor=0.33)
- assert standoff.x[0]== pytest.approx(9.261615)
- assert standoff.T98_0[0]== pytest.approx(89.5806502251565)
- assert standoff.T98_inf[0]== pytest.approx(63.79827740597881)
-
+ standoff_1 = pvdeg.standards.standoff(
+ weather_df=WEATHER_df,
+ meta=META,
+ T98=70,
+ tilt=META["latitude"],
+ azimuth=None,
+ sky_model="isotropic",
+ temp_model="sapm",
+ conf_0="insulated_back_glass_polymer",
+ conf_inf="open_rack_glass_polymer",
+ x_0=6.5,
+ wind_factor=0.33,
+ )
+ assert standoff.x[0] == pytest.approx(9.261615)
+ assert standoff.T98_0[0] == pytest.approx(89.5806502251565)
+ assert standoff.T98_inf[0] == pytest.approx(63.79827740597881)
diff --git a/tests/test_symbolic.py b/tests/test_symbolic.py
index 92c5ffd..d7e98aa 100644
--- a/tests/test_symbolic.py
+++ b/tests/test_symbolic.py
@@ -3,14 +3,14 @@
import json
import numpy as np
import pandas as pd
-import sympy as sp # not a dependency, may cause issues
+import sympy as sp # not a dependency, may cause issues
import pvdeg
from pvdeg import TEST_DATA_DIR
WEATHER = pd.read_csv(
os.path.join(TEST_DATA_DIR, r"weather_day_pytest.csv"),
index_col=0,
- parse_dates=True
+ parse_dates=True,
)
with open(os.path.join(TEST_DATA_DIR, "meta.json"), "r") as file:
@@ -20,7 +20,7 @@
# degradation rate, D
# degradation constant, k_d
-# electric field, E = Vbias / d
+# electric field, E = Vbias / d
# Vbias, potential diference between cells and frame
# d, encapsulant thickness
@@ -28,89 +28,95 @@
# Vbias, potential diference between cells and frame
# Rencap, resistance of encapsulant
-k_d, Vbias, Rencap, d = sp.symbols('k_d Vbias Rencap d')
+k_d, Vbias, Rencap, d = sp.symbols("k_d Vbias Rencap d")
pid = k_d * (Vbias / d) * (Vbias / Rencap)
pid_kwarg = {
- 'Vbias' : 1000,
- 'Rencap' : 1e9,
- 'd': 0.0005,
- 'k_d': 1e-9,
+ "Vbias": 1000,
+ "Rencap": 1e9,
+ "d": 0.0005,
+ "k_d": 1e-9,
}
+
def test_symbolic_floats():
- res = pvdeg.symbolic.calc_kwarg_floats(
- expr=pid,
- kwarg=pid_kwarg
- )
+ res = pvdeg.symbolic.calc_kwarg_floats(expr=pid, kwarg=pid_kwarg)
assert res == pytest.approx(2e-9)
+
def test_symbolic_df():
pid_df = pd.DataFrame([pid_kwarg] * 5)
- res_series = pvdeg.symbolic.calc_df_symbolic(
- expr=pid,
- df=pid_df
- )
+ res_series = pvdeg.symbolic.calc_df_symbolic(expr=pid, df=pid_df)
- pid_values = pd.Series([2e-9]*5)
+ pid_values = pd.Series([2e-9] * 5)
pd.testing.assert_series_equal(res_series, pid_values, check_dtype=False)
def test_symbolic_timeseries():
- lnR_0, I, X, Ea, k, T = sp.symbols('lnR_0 I X Ea k T')
- ln_R_D_expr = lnR_0 * I**X * sp.exp( (-Ea)/(k * T) )
+ lnR_0, I, X, Ea, k, T = sp.symbols("lnR_0 I X Ea k T")
+ ln_R_D_expr = lnR_0 * I**X * sp.exp((-Ea) / (k * T))
module_temps = pvdeg.temperature.module(
- weather_df=WEATHER,
- meta=META,
- conf="open_rack_glass_glass"
+ weather_df=WEATHER, meta=META, conf="open_rack_glass_glass"
)
- poa_irradiance = pvdeg.spectral.poa_irradiance(
- weather_df=WEATHER,
- meta=META
- )
+ poa_irradiance = pvdeg.spectral.poa_irradiance(weather_df=WEATHER, meta=META)
- module_temps_k = module_temps + 273.15 # convert C -> K
- poa_global = poa_irradiance['poa_global'] # take only the global irradiance series from the total irradiance dataframe
- poa_global_kw = poa_global / 1000 # [W/m^2] -> [kW/m^2]
+ module_temps_k = module_temps + 273.15 # convert C -> K
+ poa_global = poa_irradiance[
+ "poa_global"
+ ] # take only the global irradiance series from the total irradiance dataframe
+ poa_global_kw = poa_global / 1000 # [W/m^2] -> [kW/m^2]
values_kwarg = {
- 'Ea': 62.08, # activation energy, [kJ/mol]
- 'k': 8.31446e-3, # boltzmans constant, [kJ/(mol * K)]
- 'T': module_temps_k, # module temperature, [K]
- 'I': poa_global_kw, # module plane of array irradiance, [W/m2]
- 'X': 0.0341, # irradiance relation, [unitless]
- 'lnR_0': 13.72, # prefactor degradation [ln(%/h)]
+ "Ea": 62.08, # activation energy, [kJ/mol]
+ "k": 8.31446e-3, # boltzmans constant, [kJ/(mol * K)]
+ "T": module_temps_k, # module temperature, [K]
+ "I": poa_global_kw, # module plane of array irradiance, [W/m2]
+ "X": 0.0341, # irradiance relation, [unitless]
+ "lnR_0": 13.72, # prefactor degradation [ln(%/h)]
}
res = pvdeg.symbolic.calc_kwarg_timeseries(
- expr=ln_R_D_expr,
- kwarg=values_kwarg
+ expr=ln_R_D_expr, kwarg=values_kwarg
).sum()
assert res == pytest.approx(6.5617e-09)
+
def test_calc_df_symbolic_bad():
- expr = sp.symbols('not_in_columns')
- df = pd.DataFrame([[1,2,3,5]],columns=['a','b','c','d'])
+ expr = sp.symbols("not_in_columns")
+ df = pd.DataFrame([[1, 2, 3, 5]], columns=["a", "b", "c", "d"])
with pytest.raises(ValueError):
pvdeg.symbolic.calc_df_symbolic(expr=expr, df=df)
+
def test_calc_kwarg_timeseries_bad_type():
# try passing an invalid argument type
with pytest.raises(ValueError, match="only simple numerics or timeseries allowed"):
- pvdeg.symbolic.calc_kwarg_timeseries(expr=None, kwarg={'bad':pd.DataFrame()})
+ pvdeg.symbolic.calc_kwarg_timeseries(expr=None, kwarg={"bad": pd.DataFrame()})
+
def test_calc_kwarg_timeseries_bad_mismatch_lengths():
# arrays of different lengths
- with pytest.raises(NotImplementedError, match="arrays/series are different lengths. fix mismatched length. otherwise arbitrary symbolic solution is too complex for solver. nested loops or loops dependent on previous results not supported."):
- pvdeg.symbolic.calc_kwarg_timeseries(expr=None, kwarg={'len1':np.zeros((5,)), 'len2':np.zeros(10,)})
+ with pytest.raises(
+ NotImplementedError,
+ match="arrays/series are different lengths. fix mismatched length. otherwise arbitrary symbolic solution is too complex for solver. nested loops or loops dependent on previous results not supported.",
+ ):
+ pvdeg.symbolic.calc_kwarg_timeseries(
+ expr=None,
+ kwarg={
+ "len1": np.zeros((5,)),
+ "len2": np.zeros(
+ 10,
+ ),
+ },
+ )
-def test_calc_kwarg_timeseries_no_index():
- v1, v2 = sp.symbols('v1 v2')
- expr = v1 * v2
\ No newline at end of file
+def test_calc_kwarg_timeseries_no_index():
+ v1, v2 = sp.symbols("v1 v2")
+ expr = v1 * v2
diff --git a/tests/test_temperature.py b/tests/test_temperature.py
index 9053c0c..8b322d6 100644
--- a/tests/test_temperature.py
+++ b/tests/test_temperature.py
@@ -256,6 +256,7 @@ def test_wind_speed_factor_noct_sam():
assert 2 == height_above_3m
assert 1 == height_below_3m
+
def test_wind_speed_factor_prillman():
wind_speed_factor = pvdeg.temperature._wind_speed_factor(
temp_model="prillman", meta=META, wind_factor=None
@@ -263,6 +264,7 @@ def test_wind_speed_factor_prillman():
assert 1 == wind_speed_factor
+
def test_wind_speed_factor_generic_linear():
wind_speed_factor = pvdeg.temperature._wind_speed_factor(
temp_model="generic_linear", meta=META, wind_factor=0.15
@@ -270,7 +272,10 @@ def test_wind_speed_factor_generic_linear():
pytest.approx(1.2730501155464236, wind_speed_factor)
-# this test will fail when the else condition becomes deprecated in the future.
+
+# this test will fail when the else condition becomes deprecated in the future.
# It is fine to remove this when it stops working, added for coverage purposes
def test_wind_speed_factor_null():
- assert 1 == pvdeg.temperature._wind_speed_factor(temp_model='', meta=META, wind_factor=None)
\ No newline at end of file
+ assert 1 == pvdeg.temperature._wind_speed_factor(
+ temp_model="", meta=META, wind_factor=None
+ )
diff --git a/tests/test_weather.py b/tests/test_weather.py
index cc8f8bf..f50b044 100644
--- a/tests/test_weather.py
+++ b/tests/test_weather.py
@@ -32,16 +32,16 @@
META_KEYS = [""]
-
def test_colum_name():
- df, meta_data=pvdeg.weather.read(
- os.path.join(TEST_DATA_DIR, "psm3_pytest.csv"),"csv"
-)
+ df, meta_data = pvdeg.weather.read(
+ os.path.join(TEST_DATA_DIR, "psm3_pytest.csv"), "csv"
+ )
assert "City" in meta_data.keys()
assert "tz" in meta_data.keys()
assert "Year" in df
assert "dew_point" in df
+
def test_get():
"""
Test with (lat,lon) and gid options