-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathconftest.py
127 lines (90 loc) · 3.55 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
"""
Re-useable fixtures etc. for tests that are shared across the whole project
See https://docs.pytest.org/en/7.1.x/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files
"""
import pathlib
from pathlib import Path
import pandas as pd
import pytest
from click.testing import Result
from typer.testing import CliRunner
from cmip_ref import cli
from cmip_ref.config import Config
from cmip_ref.datasets.cmip6 import CMIP6DatasetAdapter
from cmip_ref.testing import TEST_DATA_DIR, fetch_sample_data
from cmip_ref_core.datasets import SourceDatasetType
from cmip_ref_core.metrics import DataRequirement, MetricExecutionDefinition, MetricResult
from cmip_ref_core.providers import MetricsProvider
@pytest.fixture
def sample_data_dir() -> Path:
return TEST_DATA_DIR / "sample-data"
@pytest.fixture(autouse=True, scope="session")
def sample_data() -> None:
# Downloads the sample data if it doesn't exist
fetch_sample_data()
@pytest.fixture
def cmip6_data_catalog(sample_data_dir) -> pd.DataFrame:
adapter = CMIP6DatasetAdapter()
return adapter.find_local_datasets(sample_data_dir / "CMIP6")
@pytest.fixture(autouse=True)
def config(tmp_path, monkeypatch) -> Config:
monkeypatch.setenv("REF_CONFIGURATION", str(tmp_path / "cmip_ref"))
# Uses the default configuration
cfg = Config.load(tmp_path / "cmip_ref" / "cmip_ref.toml")
# Allow adding datasets from outside the tree for testing
cfg.paths.allow_out_of_tree_datasets = True
# Use a SQLite in-memory database for testing
# cfg.db.database_url = "sqlite:///:memory:"
cfg.save()
return cfg
@pytest.fixture
def invoke_cli():
"""
Invoke the CLI with the given arguments and verify the exit code
"""
# We want to split stderr and stdout
# stderr == logging
# stdout == output from commands
runner = CliRunner(mix_stderr=False)
def _invoke_cli(args: list[str], expected_exit_code: int = 0) -> Result:
result = runner.invoke(
app=cli.app,
args=args,
)
if result.exit_code != expected_exit_code:
print(result.stdout)
print(result.stderr)
if result.exception:
raise result.exception
raise ValueError(f"Expected exit code {expected_exit_code}, got {result.exit_code}")
return result
return _invoke_cli
class MockMetric:
name = "mock"
slug = "mock"
def __init__(self, temp_dir: pathlib.Path) -> None:
self.temp_dir = temp_dir
# This runs on every dataset
data_requirements = (DataRequirement(source_type=SourceDatasetType.CMIP6, filters=(), group_by=None),)
def run(self, definition: MetricExecutionDefinition) -> MetricResult:
# TODO: This doesn't write output.json, use build function?
return MetricResult(
bundle_filename=self.temp_dir / definition.output_fragment / "output.json",
successful=True,
definition=definition,
)
class FailedMetric:
name = "failed"
slug = "failed"
data_requirements = (DataRequirement(source_type=SourceDatasetType.CMIP6, filters=(), group_by=None),)
def run(self, definition: MetricExecutionDefinition) -> MetricResult:
return MetricResult.build_from_failure(definition)
@pytest.fixture
def provider(tmp_path) -> MetricsProvider:
provider = MetricsProvider("mock_provider", "v0.1.0")
provider.register(MockMetric(tmp_path))
provider.register(FailedMetric())
return provider
@pytest.fixture
def mock_metric(tmp_path) -> MockMetric:
return MockMetric(tmp_path)