Skip to content

Commit

Permalink
test(plugins): add aggregate and regexp plugin
Browse files Browse the repository at this point in the history
  • Loading branch information
k4black committed Dec 12, 2023
1 parent 973ec16 commit 2a70e6d
Show file tree
Hide file tree
Showing 32 changed files with 245 additions and 220 deletions.
12 changes: 10 additions & 2 deletions checker/plugins/aggregate.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,24 @@ class Args(PluginABC.Args):
scores: list[float]
weights: list[float] | None = None
strategy: Literal["mean", "sum", "min", "max", "product"] = "mean"
# TODO: validate for weights: len weights should be equal to len scores
# TODO: validate not empty scores

def _run(self, args: Args, *, verbose: bool = False) -> str:
weights = args.weights or [1.0] * len(args.scores)
weights = args.weights or ([1.0] * len(args.scores))

if len(args.scores) != len(args.weights):
if len(args.scores) != len(weights):
raise ExecutionFailedError(
f"Length of scores ({len(args.scores)}) and weights ({len(weights)}) does not match",
output=f"Length of scores ({len(args.scores)}) and weights ({len(weights)}) does not match",
)

if len(args.scores) == 0 or len(weights) == 0:
raise ExecutionFailedError(
f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero",
output=f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero",
)

weighted_scores = [score * weight for score, weight in zip(args.scores, weights)]

if args.strategy == "mean":
Expand Down
9 changes: 6 additions & 3 deletions checker/plugins/regex.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,17 @@ class Args(PluginABC.Args):
origin: str
patterns: list[str]
regexps: list[str]
# TODO: Add validation for patterns and regexps

def _run(self, args: Args, *, verbose: bool = False) -> str:
# TODO: add verbose output with files list
import re
from pathlib import Path

# TODO: move to Args validation
if not Path(args.origin).exists():
raise ExecutionFailedError(
f"Origin {args.origin} does not exist",
f"Origin '{args.origin}' does not exist",
output=f"Origin {args.origin} does not exist",
)

Expand All @@ -31,7 +34,7 @@ def _run(self, args: Args, *, verbose: bool = False) -> str:
for regexp in args.regexps:
if re.search(regexp, file_content, re.MULTILINE):
raise ExecutionFailedError(
f"File {file} matches regexp {regexp}",
output=f"File {file} matches regexp {regexp}",
f"File '{file.name}' matches regexp '{regexp}'",
output=f"File '{file}' matches regexp '{regexp}'",
)
return "No forbidden regexps found"
181 changes: 0 additions & 181 deletions checker/tester/executor.py

This file was deleted.

4 changes: 1 addition & 3 deletions checker/tester/tester.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from __future__ import annotations

import tempfile
from collections.abc import Generator
from dataclasses import dataclass
from pathlib import Path
from typing import Any

Expand All @@ -21,7 +19,7 @@ class Tester:
1. Create temporary directory
2. Execute global pipeline
3. Execute task pipeline for each task
4. Collect results and return them
4. Collect results and push to them
5. Remove temporary directory
"""
__test__ = False # do not collect this class for pytest
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
27 changes: 27 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import pytest


def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
'--integration',
action='store_true',
dest="integration",
default=False,
help="enable integration tests",
)


def pytest_configure(config: pytest.Config) -> None:
config.addinivalue_line("markers", "integration: mark test as integration test")


def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None:
if config.getoption("--integration"):
# --integration given in cli: do not skip integration tests
return

skip_integration = pytest.mark.skip(reason="need --integration option to run")

for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
63 changes: 63 additions & 0 deletions tests/plugins/test_aggregate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from __future__ import annotations

from typing import Any

import pytest
from pydantic import ValidationError

from checker.plugins.aggregate import AggregatePlugin
from checker.exceptions import ExecutionFailedError


class TestAggregatePlugin:

@pytest.mark.parametrize("parameters, expected_exception", [
({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3], 'strategy': 'mean'}, None),
({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3]}, None),
({'scores': [0.5, 1.0, 1], 'weights': None}, None),
({'scores': [0.5, 1.0, 1], 'strategy': 'mean'}, None),
({'scores': [0.5, 1.0, 1]}, None),
({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3], 'strategy': 'invalid_strategy'}, ValidationError),
({}, ValidationError),
])
def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None:
if expected_exception:
with pytest.raises(expected_exception):
AggregatePlugin.Args(**parameters)
else:
AggregatePlugin.Args(**parameters)

@pytest.mark.parametrize("scores, weights, strategy, expected", [
([10, 20, 30], None, "mean", "Score: 20.00"),
([1, 2, 3], [0.5, 0.5, 0.5], "sum", "Score: 3.00"),
([2, 4, 6], [1, 2, 3], "min", "Score: 2.00"),
([5, 10, 15], [1, 1, 1], "max", "Score: 15.00"),
([3, 3, 3], [1, 1, 1], "product", "Score: 27.00"),
])
def test_aggregate_strategies(self, scores, weights, strategy, expected):
plugin = AggregatePlugin()
args = AggregatePlugin.Args(scores=scores, weights=weights, strategy=strategy)

result = plugin._run(args)
assert expected in result # TODO: test float output when implemented

@pytest.mark.parametrize("scores, weights", [
([1, 2, 3], [1, 2]),
([1], [1, 2]),
([], []),
])
def test_length_mismatch(self, scores: list[float], weights: list[float]) -> None:
# TODO: move to args validation
plugin = AggregatePlugin()
args = AggregatePlugin.Args(scores=scores, weights=weights)

with pytest.raises(ExecutionFailedError) as e:
plugin._run(args)
assert "Length of scores" in str(e.value)

def test_default_weights(self) -> None:
plugin = AggregatePlugin()
args = AggregatePlugin.Args(scores=[10, 20, 30], strategy="mean")

result = plugin._run(args)
assert "Score: 20.00" in result # TODO: test float output when implemented
1 change: 1 addition & 0 deletions tests/plugins/test_load_plugins.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# TODO: test plugin loader
Loading

0 comments on commit 2a70e6d

Please sign in to comment.