diff --git a/checker/__main__.py b/checker/__main__.py index eaf242f..524876e 100644 --- a/checker/__main__.py +++ b/checker/__main__.py @@ -11,22 +11,34 @@ from checker.course import Course, FileSystemTask from checker.tester import Tester from checker.utils import print_info + from .configs import CheckerConfig, DeadlinesConfig from .exceptions import BadConfig, TestingError from .plugins import load_plugins from .tester.pipeline import PipelineRunner -ClickReadableFile = click.Path(exists=True, file_okay=True, readable=True, path_type=Path) -ClickReadableDirectory = click.Path(exists=True, file_okay=False, readable=True, path_type=Path) + +ClickReadableFile = click.Path( + exists=True, file_okay=True, readable=True, path_type=Path +) +ClickReadableDirectory = click.Path( + exists=True, file_okay=False, readable=True, path_type=Path +) ClickWritableDirectory = click.Path(file_okay=False, writable=True, path_type=Path) @click.group(context_settings={"show_default": True}) @click.option( - "--checker-config", type=ClickReadableFile, default=".checker.yml", help="Path to the checker config file." + "--checker-config", + type=ClickReadableFile, + default=".checker.yml", + help="Path to the checker config file.", ) @click.option( - "--deadlines-config", type=ClickReadableFile, default=".deadlines.yml", help="Path to the deadlines config file." + "--deadlines-config", + type=ClickReadableFile, + default=".deadlines.yml", + help="Path to the deadlines config file.", ) @click.version_option(package_name="manytask-checker") @click.pass_context @@ -45,7 +57,9 @@ def cli( @cli.command() @click.argument("root", type=ClickReadableDirectory, default=".") -@click.option("-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose output") +@click.option( + "-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose output" +) @click.pass_context def validate( ctx: click.Context, @@ -64,42 +78,76 @@ def validate( checker_config = CheckerConfig.from_yaml(ctx.obj["course_config_path"]) deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) except BadConfig as e: - print_info("Configuration Failed", color='red') + print_info("Configuration Failed", color="red") print_info(e) exit(1) - print_info("Ok", color='green') + print_info("Ok", color="green") print_info("Validating Course Structure (and tasks configs)...") try: course = Course(checker_config, deadlines_config, root) course.validate() except BadConfig as e: - print_info("Course Validation Failed", color='red') + print_info("Course Validation Failed", color="red") print_info(e) exit(1) - print_info("Ok", color='green') + print_info("Ok", color="green") print_info("Validating tester...") try: tester = Tester(course, checker_config, verbose=verbose) tester.validate() except BadConfig as e: - print_info("Tester Validation Failed", color='red') + print_info("Tester Validation Failed", color="red") print_info(e) exit(1) - print_info("Ok", color='green') + print_info("Ok", color="green") @cli.command() @click.argument("root", type=ClickReadableDirectory, default=".") @click.argument("reference_root", type=ClickReadableDirectory, default=".") -@click.option("-t", "--task", type=str, multiple=True, default=None, help="Task name to check (multiple possible)") -@click.option("-g", "--group", type=str, multiple=True, default=None, help="Group name to check (multiple possible)") -@click.option("-p", "--parallelize", is_flag=True, default=True, help="Execute parallel checking of tasks") -@click.option("-n", "--num-processes", type=int, default=os.cpu_count(), help="Num of processes parallel checking") +@click.option( + "-t", + "--task", + type=str, + multiple=True, + default=None, + help="Task name to check (multiple possible)", +) +@click.option( + "-g", + "--group", + type=str, + multiple=True, + default=None, + help="Group name to check (multiple possible)", +) +@click.option( + "-p", + "--parallelize", + is_flag=True, + default=True, + help="Execute parallel checking of tasks", +) +@click.option( + "-n", + "--num-processes", + type=int, + default=os.cpu_count(), + help="Num of processes parallel checking", +) @click.option("--no-clean", is_flag=True, help="Clean or not check tmp folders") -@click.option("-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose tests output") -@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") +@click.option( + "-v/-s", + "--verbose/--silent", + is_flag=True, + default=True, + help="Verbose tests output", +) +@click.option( + "--dry-run", is_flag=True, help="Do not execute anything, only log actions" +) @click.pass_context def check( ctx: click.Context, @@ -122,7 +170,7 @@ def check( deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) # read filesystem, check existing tasks - course = Course(checker_config, deadlines_config, root, username='private') + course = Course(checker_config, deadlines_config, root, username="private") # validate tasks and groups if passed filesystem_tasks: dict[str, FileSystemTask] = dict() @@ -139,32 +187,51 @@ def check( print_info(f"Checking tasks: {', '.join(filesystem_tasks.keys())}") # create tester to... to test =) - tester = Tester(course, checker_config, verbose=verbose, cleanup=not no_clean, dry_run=dry_run) + tester = Tester( + course, checker_config, verbose=verbose, cleanup=not no_clean, dry_run=dry_run + ) # run tests # TODO: progressbar on parallelize try: - tester.run(tasks=list(filesystem_tasks.values()) if filesystem_tasks else None, report=False) + tester.run( + tasks=list(filesystem_tasks.values()) if filesystem_tasks else None, + report=False, + ) except TestingError as e: - print_info("TESTING FAILED", color='red') + print_info("TESTING FAILED", color="red") print_info(e) exit(1) except Exception as e: - print_info("UNEXPECTED ERROR", color='red') + print_info("UNEXPECTED ERROR", color="red") print_info(e) exit(1) - print_info("TESTING PASSED", color='green') + print_info("TESTING PASSED", color="green") @cli.command() @click.argument("root", type=ClickReadableDirectory, default=".") @click.argument("reference_root", type=ClickReadableDirectory, default=".") -@click.option("--submit-score", is_flag=True, help="Submit score to the Manytask server") -@click.option("--timestamp", type=str, default=None, help="Timestamp to use for the submission") -@click.option("--username", type=str, default=None, help="Username to use for the submission") +@click.option( + "--submit-score", is_flag=True, help="Submit score to the Manytask server" +) +@click.option( + "--timestamp", type=str, default=None, help="Timestamp to use for the submission" +) +@click.option( + "--username", type=str, default=None, help="Username to use for the submission" +) @click.option("--no-clean", is_flag=True, help="Clean or not check tmp folders") -@click.option("-v/-s", "--verbose/--silent", is_flag=True, default=False, help="Verbose tests output") -@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") +@click.option( + "-v/-s", + "--verbose/--silent", + is_flag=True, + default=False, + help="Verbose tests output", +) +@click.option( + "--dry-run", is_flag=True, help="Do not execute anything, only log actions" +) @click.pass_context def grade( ctx: click.Context, @@ -185,30 +252,35 @@ def grade( deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) # read filesystem, check existing tasks - course = Course(checker_config, deadlines_config, root, reference_root, username=username) + course = Course( + checker_config, deadlines_config, root, reference_root, username=username + ) # detect changes to test filesystem_tasks: list[FileSystemTask] = list() # TODO: detect changes - filesystem_tasks = [task for task in course.get_tasks(enabled=True) if task.name == 'hello_world'] + filesystem_tasks = [ + task for task in course.get_tasks(enabled=True) if task.name == "hello_world" + ] # create tester to... to test =) - tester = Tester(course, checker_config, verbose=verbose, cleanup=not no_clean, dry_run=dry_run) + tester = Tester( + course, checker_config, verbose=verbose, cleanup=not no_clean, dry_run=dry_run + ) # run tests # TODO: progressbar on parallelize try: tester.run(tasks=filesystem_tasks, report=True) except TestingError as e: - print_info("TESTING FAILED", color='red') + print_info("TESTING FAILED", color="red") print_info(e) exit(1) except Exception as e: - print_info("UNEXPECTED ERROR", color='red') + print_info("UNEXPECTED ERROR", color="red") print_info(e) - raise e exit(1) - print_info("TESTING PASSED", color='green') + print_info("TESTING PASSED", color="green") if __name__ == "__main__": diff --git a/checker/configs/__init__.py b/checker/configs/__init__.py index 60e9635..4cb8e05 100644 --- a/checker/configs/__init__.py +++ b/checker/configs/__init__.py @@ -1,9 +1,14 @@ +from .checker import CheckerTestingConfig # noqa: F401 from .checker import ( CheckerConfig, CheckerExportConfig, CheckerManytaskConfig, - CheckerTestingConfig, # noqa: F401 PipelineStageConfig, ) -from .deadlines import DeadlinesConfig, DeadlinesGroupConfig, DeadlinesSettingsConfig, DeadlinesTaskConfig # noqa: F401 +from .deadlines import ( + DeadlinesConfig, + DeadlinesGroupConfig, + DeadlinesSettingsConfig, + DeadlinesTaskConfig, +) # noqa: F401 from .task import TaskConfig # noqa: F401 diff --git a/checker/configs/checker.py b/checker/configs/checker.py index 4993511..08c7402 100644 --- a/checker/configs/checker.py +++ b/checker/configs/checker.py @@ -48,7 +48,6 @@ class CheckerManytaskConfig(CustomBaseModel): class PipelineStageConfig(CustomBaseModel): - class FailType(Enum): FAST = "fast" AFTER_ALL = "after_all" diff --git a/checker/configs/deadlines.py b/checker/configs/deadlines.py index 837e0dd..ae63004 100644 --- a/checker/configs/deadlines.py +++ b/checker/configs/deadlines.py @@ -4,13 +4,16 @@ from datetime import datetime, timedelta from enum import Enum + if sys.version_info < (3, 8): - from pytz import timezone as ZoneInfo - from pytz import ZoneInfoNotFoundError as ZoneInfoNotFoundError + from pytz import ( + ZoneInfoNotFoundError as ZoneInfoNotFoundError, + timezone as ZoneInfo, + ) else: from zoneinfo import ZoneInfo, ZoneInfoNotFoundError -from pydantic import Field, field_validator, model_validator, AnyUrl +from pydantic import AnyUrl, Field, field_validator, model_validator from .utils import CustomBaseModel, YamlLoaderMixin @@ -90,12 +93,18 @@ def check_dates(self) -> "DeadlinesGroupConfig": if isinstance(self.end, timedelta) and self.end < timedelta(): raise ValueError(f"end timedelta <{self.end}> should be positive") if isinstance(self.end, datetime) and self.end < self.start: - raise ValueError(f"end datetime <{self.end}> should be after the start <{self.start}>") + raise ValueError( + f"end datetime <{self.end}> should be after the start <{self.start}>" + ) # check steps last_step_date_or_delta = self.start for _, date_or_delta in self.steps.items(): - step_date = self.start + date_or_delta if isinstance(date_or_delta, timedelta) else date_or_delta + step_date = ( + self.start + date_or_delta + if isinstance(date_or_delta, timedelta) + else date_or_delta + ) last_step_date = ( self.start + last_step_date_or_delta if isinstance(last_step_date_or_delta, timedelta) @@ -105,7 +114,9 @@ def check_dates(self) -> "DeadlinesGroupConfig": if isinstance(date_or_delta, timedelta) and date_or_delta < timedelta(): raise ValueError(f"step timedelta <{date_or_delta}> should be positive") if isinstance(date_or_delta, datetime) and date_or_delta <= self.start: - raise ValueError(f"step datetime <{date_or_delta}> should be after the start {self.start}") + raise ValueError( + f"step datetime <{date_or_delta}> should be after the start {self.start}" + ) if step_date <= last_step_date: raise ValueError( @@ -125,8 +136,8 @@ class DeadlinesConfig(CustomBaseModel, YamlLoaderMixin): schedule: list[DeadlinesGroupConfig] def get_groups( - self, - enabled: bool | None = None, + self, + enabled: bool | None = None, ) -> list[DeadlinesGroupConfig]: groups = [group for group in self.schedule] @@ -138,10 +149,12 @@ def get_groups( return groups def get_tasks( - self, - enabled: bool | None = None, + self, + enabled: bool | None = None, ) -> list[DeadlinesTaskConfig]: - tasks = [task for group in self.get_groups(enabled=enabled) for task in group.tasks] + tasks = [ + task for group in self.get_groups(enabled=enabled) for task in group.tasks + ] if enabled is not None: tasks = [task for task in tasks if task.enabled == enabled] @@ -159,7 +172,9 @@ def check_version(cls, data: int) -> int: @field_validator("schedule") @classmethod - def check_group_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: + def check_group_names_unique( + cls, data: list[DeadlinesGroupConfig] + ) -> list[DeadlinesGroupConfig]: groups = [group.name for group in data] duplicates = [name for name in groups if groups.count(name) > 1] if duplicates: @@ -168,7 +183,9 @@ def check_group_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[Dead @field_validator("schedule") @classmethod - def check_task_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: + def check_task_names_unique( + cls, data: list[DeadlinesGroupConfig] + ) -> list[DeadlinesGroupConfig]: tasks_names = [task.name for group in data for task in group.tasks] duplicates = [name for name in tasks_names if tasks_names.count(name) > 1] if duplicates: diff --git a/checker/configs/task.py b/checker/configs/task.py index 6f1c632..e9702d9 100644 --- a/checker/configs/task.py +++ b/checker/configs/task.py @@ -2,7 +2,11 @@ from pydantic import Field, model_validator -from .checker import CheckerParametersConfig, CheckerStructureConfig, PipelineStageConfig +from .checker import ( + CheckerParametersConfig, + CheckerStructureConfig, + PipelineStageConfig, +) from .utils import CustomBaseModel, YamlLoaderMixin diff --git a/checker/course.py b/checker/course.py index 6adef7c..de56d86 100644 --- a/checker/course.py +++ b/checker/course.py @@ -6,8 +6,8 @@ from pathlib import Path from typing import Any -from .configs import TaskConfig, DeadlinesConfig -from .configs.checker import CheckerParametersConfig, CheckerConfig +from .configs import DeadlinesConfig, TaskConfig +from .configs.checker import CheckerConfig, CheckerParametersConfig from .exceptions import BadConfig @@ -47,8 +47,15 @@ def __init__( self.username = username or "unknown" - self.potential_groups = {group.name: group for group in self._search_potential_groups(self.repository_root)} - self.potential_tasks = {task.name: task for group in self.potential_groups.values() for task in group.tasks} + self.potential_groups = { + group.name: group + for group in self._search_potential_groups(self.repository_root) + } + self.potential_tasks = { + task.name: task + for group in self.potential_groups.values() + for task in group.tasks + } def validate(self) -> None: # check all groups and tasks mentioned in deadlines exists @@ -60,16 +67,18 @@ def validate(self) -> None: deadlines_tasks = self.deadlines.get_tasks(enabled=True) for deadlines_task in deadlines_tasks: if deadlines_task.name not in self.potential_tasks: - raise BadConfig(f"Task {deadlines_task.name} of not found in repository") + raise BadConfig( + f"Task {deadlines_task.name} of not found in repository" + ) def _copy_files_accounting_sub_rules( - self, - root: Path, - destination: Path, - search_pattern: str, - copy_patterns: Iterable[str], - ignore_patterns: Iterable[str], - sub_rules: dict[Path, tuple[Iterable[str], Iterable[str]]], + self, + root: Path, + destination: Path, + search_pattern: str, + copy_patterns: Iterable[str], + ignore_patterns: Iterable[str], + sub_rules: dict[Path, tuple[Iterable[str], Iterable[str]]], ): """ Copy files as usual, if face some folder from `sub_rules`, apply patterns from `sub_rules[folder]`. @@ -92,28 +101,34 @@ def _copy_files_accounting_sub_rules( relative_filename = str(path.relative_to(root)) if path.is_dir(): if path in sub_rules: - print(f" - Check Dir {path} to {destination / relative_filename} with sub rules (rec)") + print( + f" - Check Dir {path} to {destination / relative_filename} with sub rules (rec)" + ) self._copy_files_accounting_sub_rules( path, destination / relative_filename, - search_pattern='*', + search_pattern="*", copy_patterns=sub_rules[path][0], ignore_patterns=sub_rules[path][1], sub_rules=sub_rules, ) else: - print(f" - Check Dir {path} to {destination / relative_filename} (rec)") + print( + f" - Check Dir {path} to {destination / relative_filename} (rec)" + ) self._copy_files_accounting_sub_rules( path, destination / relative_filename, - search_pattern='*', + search_pattern="*", copy_patterns=copy_patterns, ignore_patterns=ignore_patterns, sub_rules=sub_rules, ) else: if any(path.match(copy_pattern) for copy_pattern in copy_patterns): - print(f" - Copy File {path} to {destination / relative_filename}") + print( + f" - Copy File {path} to {destination / relative_filename}" + ) destination.mkdir(parents=True, exist_ok=True) shutil.copyfile( path, @@ -127,25 +142,41 @@ def copy_files_for_testing(self, destination: Path) -> None: global_public_patterns = self.checker.structure.public_patterns or [] global_private_patterns = self.checker.structure.private_patterns or [] - print('REPO') + print("REPO") print(f"Copy files from {self.repository_root} to {destination}") self._copy_files_accounting_sub_rules( self.repository_root, destination, - search_pattern='*', - copy_patterns=['*'], + search_pattern="*", + copy_patterns=["*"], ignore_patterns=[ *global_ignore_patterns, *global_public_patterns, *global_private_patterns, ], sub_rules={ - self.repository_root / task.relative_path: ( - ['*'], + self.repository_root + / task.relative_path: ( + ["*"], [ - *(task_ignore if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns), - *(task_public if (task_public := task.config.structure.public_patterns) is not None else global_public_patterns), - *(task_private if (task_private := task.config.structure.private_patterns) is not None else global_private_patterns), + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) + is not None + else global_ignore_patterns + ), + *( + task_public + if (task_public := task.config.structure.public_patterns) + is not None + else global_public_patterns + ), + *( + task_private + if (task_private := task.config.structure.private_patterns) + is not None + else global_private_patterns + ), ], ) for task in tasks @@ -153,12 +184,12 @@ def copy_files_for_testing(self, destination: Path) -> None: }, ) - print('REFERECNE') + print("REFERECNE") print(f"Copy files from {self.reference_root} to {destination}") self._copy_files_accounting_sub_rules( self.reference_root, destination, - search_pattern='*', + search_pattern="*", copy_patterns=[ *global_public_patterns, *global_private_patterns, @@ -167,13 +198,29 @@ def copy_files_for_testing(self, destination: Path) -> None: *self.checker.structure.ignore_patterns, ], sub_rules={ - self.reference_root / task.relative_path: ( + self.reference_root + / task.relative_path: ( [ - *(task_public if (task_public := task.config.structure.public_patterns) is not None else global_public_patterns), - *(task_private if (task_private := task.config.structure.private_patterns) is not None else global_private_patterns), + *( + task_public + if (task_public := task.config.structure.public_patterns) + is not None + else global_public_patterns + ), + *( + task_private + if (task_private := task.config.structure.private_patterns) + is not None + else global_private_patterns + ), ], [ - *(task_ignore if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns), + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) + is not None + else global_ignore_patterns + ), ], ) for task in tasks @@ -184,18 +231,18 @@ def copy_files_for_testing(self, destination: Path) -> None: def list_files(startpath): for root, dirs, files in sorted(os.walk(startpath)): - level = root.replace(startpath, '').count(os.sep) - indent = ' ' * 4 * (level) - print('{}{}/'.format(indent, os.path.basename(root))) - subindent = ' ' * 4 * (level + 1) + level = root.replace(startpath, "").count(os.sep) + indent = " " * 4 * (level) + print("{}{}/".format(indent, os.path.basename(root))) + subindent = " " * 4 * (level + 1) for f in files: - print('{}{}'.format(subindent, f)) + print("{}{}".format(subindent, f)) list_files(str(destination)) def get_groups( - self, - enabled: bool | None = None, + self, + enabled: bool | None = None, ) -> list[FileSystemGroup]: return [ self.potential_groups[deadline_group.name] @@ -204,8 +251,8 @@ def get_groups( ] def get_tasks( - self, - enabled: bool | None = None, + self, + enabled: bool | None = None, ) -> list[FileSystemTask]: return [ self.potential_tasks[deadline_task.name] @@ -233,7 +280,9 @@ def _search_potential_groups(self, root: Path) -> list[FileSystemGroup]: try: task_config = TaskConfig.from_yaml(task_config_path) except BadConfig as e: - raise BadConfig(f"Task config {task_config_path} is invalid:\n{e}") + raise BadConfig( + f"Task config {task_config_path} is invalid:\n{e}" + ) potential_tasks.append( FileSystemTask( @@ -264,7 +313,9 @@ def _search_for_tasks_by_configs(self, root: Path) -> list[FileSystemTask]: def list_all_public_files(self, root: Path) -> list[Path]: # read global files glob_patterns = self.checker.structure.public_patterns - global_files = [file for pattern in glob_patterns for file in root.glob(pattern)] + global_files = [ + file for pattern in glob_patterns for file in root.glob(pattern) + ] # remove all task directories, wi # filter with tasks specific configs task_files = [ @@ -273,4 +324,3 @@ def list_all_public_files(self, root: Path) -> list[Path]: for pattern in task.config.structure.public_patterns for file in (root / task.relative_path).glob(pattern) ] - diff --git a/checker/exceptions.py b/checker/exceptions.py index 389ce4a..8bebeb1 100644 --- a/checker/exceptions.py +++ b/checker/exceptions.py @@ -18,26 +18,31 @@ class CheckerValidationError(CheckerException): class BadConfig(CheckerValidationError): """All configs exceptions: deadlines, checker and tasks configs""" + pass class BadStructure(CheckerValidationError): """Course structure exception: some files are missing, etc.""" + pass class ExportError(CheckerException): """Export stage exception""" + pass class ReportError(CheckerException): """Report stage exception""" + pass class TestingError(CheckerException): """All testers exceptions can occur during testing stage""" + pass diff --git a/checker/plugins/__init__.py b/checker/plugins/__init__.py index ea0ee0f..e976848 100644 --- a/checker/plugins/__init__.py +++ b/checker/plugins/__init__.py @@ -9,6 +9,7 @@ from .base import PluginABC # noqa: F401 + __all__ = [ "PluginABC", "load_plugins", @@ -16,7 +17,9 @@ def get_all_subclasses(cls: Type[PluginABC]) -> set[Type[PluginABC]]: - return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in get_all_subclasses(c)]) + return set(cls.__subclasses__()).union( + [s for c in cls.__subclasses__() for s in get_all_subclasses(c)] + ) def load_plugins( @@ -30,7 +33,9 @@ def load_plugins( :param verbose: verbose output """ search_directories = search_directories or [] - search_directories = [Path(__file__).parent] + search_directories # add local plugins first + search_directories = [ + Path(__file__).parent + ] + search_directories # add local plugins first # force load plugins print("Loading plugins...") diff --git a/checker/plugins/aggregate.py b/checker/plugins/aggregate.py index 6f39c09..57508e7 100644 --- a/checker/plugins/aggregate.py +++ b/checker/plugins/aggregate.py @@ -2,8 +2,8 @@ from typing import Literal -from .base import PluginABC, PluginOutput from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput class AggregatePlugin(PluginABC): @@ -33,7 +33,9 @@ def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: output=f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero", ) - weighted_scores = [score * weight for score, weight in zip(args.scores, weights)] + weighted_scores = [ + score * weight for score, weight in zip(args.scores, weights) + ] if args.strategy == "mean": score = sum(weighted_scores) / len(weighted_scores) @@ -45,6 +47,7 @@ def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: score = max(weighted_scores) elif args.strategy == "product": from functools import reduce + score = reduce(lambda x, y: x * y, weighted_scores) else: raise PluginExecutionFailed( diff --git a/checker/plugins/base.py b/checker/plugins/base.py index 543f335..bc554d6 100644 --- a/checker/plugins/base.py +++ b/checker/plugins/base.py @@ -31,6 +31,7 @@ class Args(BaseModel): """Base class for plugin arguments. You have to subclass this class in your plugin. """ + pass def run(self, args: dict[str, Any], *, verbose: bool = False) -> PluginOutput: diff --git a/checker/plugins/regex.py b/checker/plugins/regex.py index b9404b5..29576d6 100644 --- a/checker/plugins/regex.py +++ b/checker/plugins/regex.py @@ -1,5 +1,5 @@ -from .base import PluginABC, PluginOutput from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput class CheckRegexpsPlugin(PluginABC): diff --git a/checker/plugins/scripts.py b/checker/plugins/scripts.py index 25413f2..43d4399 100644 --- a/checker/plugins/scripts.py +++ b/checker/plugins/scripts.py @@ -2,8 +2,8 @@ from pydantic import Field -from .base import PluginABC, PluginOutput from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput class RunScriptPlugin(PluginABC): @@ -16,13 +16,14 @@ class Args(PluginABC.Args): script: str | list[str] timeout: float | None = None isolate: bool = False - env_whitelist: list[str] = Field(default_factory=lambda: ['PATH']) + env_whitelist: list[str] = Field(default_factory=lambda: ["PATH"]) def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: import subprocess def set_up_env_sandbox() -> None: # pragma: nocover import os + env = os.environ.copy() os.environ.clear() for variable in args.env_whitelist: @@ -41,7 +42,7 @@ def set_up_env_sandbox() -> None: # pragma: nocover ) except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: output = e.output or "" - output = output if isinstance(output, str) else output.decode('utf-8') + output = output if isinstance(output, str) else output.decode("utf-8") if isinstance(e, subprocess.TimeoutExpired): raise PluginExecutionFailed( @@ -55,5 +56,5 @@ def set_up_env_sandbox() -> None: # pragma: nocover ) from e return PluginOutput( - output=result.stdout.decode('utf-8'), + output=result.stdout.decode("utf-8"), ) diff --git a/checker/tester/pipeline.py b/checker/tester/pipeline.py index 3deee0b..cbfdcea 100644 --- a/checker/tester/pipeline.py +++ b/checker/tester/pipeline.py @@ -19,7 +19,7 @@ class PipelineStageResult: skipped: bool percentage: float = 0.0 elapsed_time: float | None = None - output: str = '' + output: str = "" def __str__(self) -> str: # pragma: no cover return f"PipelineStageResult: failed={int(self.failed)}, skipped={int(self.skipped)}, percentage={self.percentage:.2f}, name='{self.name}'" @@ -34,7 +34,9 @@ def __bool__(self) -> bool: return not self.failed def __str__(self) -> str: # pragma: no cover - return f'PipelineResult: failed={int(self.failed)}\n' + '\n'.join([f' {stage_result}' for stage_result in self.stage_results]) + return f"PipelineResult: failed={int(self.failed)}\n" + "\n".join( + [f" {stage_result}" for stage_result in self.stage_results] + ) class ParametersResolver: @@ -66,7 +68,9 @@ def resolve(self, template: str | list[str] | Any, context: dict[str, Any]) -> A elif isinstance(template, list): return [self.resolve(item, context) for item in template] elif isinstance(template, dict): - return {key: self.resolve(value, context) for key, value in template.items()} + return { + key: self.resolve(value, context) for key, value in template.items() + } else: return template @@ -98,9 +102,9 @@ def __init__( self.validate({}, validate_placeholders=False) def validate( - self, - context: dict[str, Any], - validate_placeholders: bool = True, + self, + context: dict[str, Any], + validate_placeholders: bool = True, ) -> None: """ Validate the pipeline configuration. @@ -111,17 +115,23 @@ def validate( for pipeline_stage in self.pipeline: # validate plugin exists if pipeline_stage.run not in self.plugins: - raise BadConfig(f"Unknown plugin {pipeline_stage.run} in pipeline stage {pipeline_stage.name}") + raise BadConfig( + f"Unknown plugin {pipeline_stage.run} in pipeline stage {pipeline_stage.name}" + ) plugin_class = self.plugins[pipeline_stage.run] # validate args of the plugin (first resolve placeholders) if validate_placeholders: - resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context) + resolved_args = self.parameters_resolver.resolve( + pipeline_stage.args, context + ) plugin_class.validate(resolved_args) # validate run_if condition if validate_placeholders and pipeline_stage.run_if: - resolved_run_if = self.parameters_resolver.resolve(pipeline_stage.run_if, context) + resolved_run_if = self.parameters_resolver.resolve( + pipeline_stage.run_if, context + ) if not isinstance(resolved_run_if, bool): raise BadConfig( f"Invalid run_if condition {pipeline_stage.run_if} in pipeline stage {pipeline_stage.name}" @@ -129,55 +139,66 @@ def validate( # add output to context if set register parameter if pipeline_stage.register_output: - context.setdefault('outputs', {})[pipeline_stage.register_output] = PipelineStageResult( + context.setdefault("outputs", {})[ + pipeline_stage.register_output + ] = PipelineStageResult( name=pipeline_stage.name, failed=False, skipped=True, ) def run( - self, - context: dict[str, Any], - *, - dry_run: bool = False, + self, + context: dict[str, Any], + *, + dry_run: bool = False, ) -> PipelineResult: - pipeline_stage_results = [] pipeline_passed = True skip_the_rest = False for pipeline_stage in self.pipeline: # resolve placeholders in arguments - resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context=context) - resolved_run_if = self.parameters_resolver.resolve(pipeline_stage.run_if, context=context) if pipeline_stage.run_if else None - - print_info(f'--> Running "{pipeline_stage.name}" stage:', color='orange') + resolved_args = self.parameters_resolver.resolve( + pipeline_stage.args, context=context + ) + resolved_run_if = ( + self.parameters_resolver.resolve(pipeline_stage.run_if, context=context) + if pipeline_stage.run_if + else None + ) + + print_info(f'--> Running "{pipeline_stage.name}" stage:', color="orange") if self.verbose: - print_info(f' run_if: {pipeline_stage.run_if}', color='grey') - print_info(f' resolved_run_if: {resolved_run_if}', color='grey') - print_info(f' fail: {pipeline_stage.fail}', color='grey') - print_info(f' run: {pipeline_stage.run}', color='grey') - print_info(f' args: {pipeline_stage.args}', color='grey') - print_info(f' resolved_args: {resolved_args}', color='grey') + print_info(f" run_if: {pipeline_stage.run_if}", color="grey") + print_info(f" resolved_run_if: {resolved_run_if}", color="grey") + print_info(f" fail: {pipeline_stage.fail}", color="grey") + print_info(f" run: {pipeline_stage.run}", color="grey") + print_info(f" args: {pipeline_stage.args}", color="grey") + print_info(f" resolved_args: {resolved_args}", color="grey") # skip the rest of stages if failed before if skip_the_rest: - print_info('skipped! (got error above)', color='blue') - pipeline_stage_results.append(PipelineStageResult( - name=pipeline_stage.name, - failed=False, - skipped=True, - )) + print_info("skipped! (got error above)", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=True, + ) + ) continue # resolve run condition if any; skip if run_if=False if pipeline_stage.run_if: if not resolved_run_if: - print_info(f'skipped! (run_if={resolved_run_if})', color='blue') - pipeline_stage_results.append(PipelineStageResult( - name=pipeline_stage.name, - failed=False, - skipped=True, - )) + print_info(f"skipped! (run_if={resolved_run_if})", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=True, + ) + ) continue # select the plugin to run @@ -186,14 +207,16 @@ def run( # skip if dry run if dry_run: - print_info('[output here]') - print_info('dry run!', color='blue') - pipeline_stage_results.append(PipelineStageResult( - name=pipeline_stage.name, - failed=False, - skipped=False, - percentage=1.0, - )) + print_info("[output here]") + print_info("dry run!", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=False, + percentage=1.0, + ) + ) continue # run the plugin with executor @@ -201,30 +224,38 @@ def run( try: result = plugin.run(resolved_args, verbose=self.verbose) _end_time = time.perf_counter() - print_info(result.output or '[empty output]') - print_info(f'> elapsed time: {_end_time-_start_time:.2f}s', color='grey') - print_info('ok!', color='green') - pipeline_stage_results.append(PipelineStageResult( - name=pipeline_stage.name, - failed=False, - skipped=False, - output=result.output, - percentage=1.0, # TODO: get percentage from plugin - elapsed_time=_end_time-_start_time, - )) + print_info(result.output or "[empty output]") + print_info( + f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey" + ) + print_info("ok!", color="green") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=False, + output=result.output, + percentage=1.0, # TODO: get percentage from plugin + elapsed_time=_end_time - _start_time, + ) + ) except PluginExecutionFailed as e: _end_time = time.perf_counter() - print_info(e.output or '[empty output]') - print_info(f'> elapsed time: {_end_time-_start_time:.2f}s', color='grey') - print_info('error!', color='red') - pipeline_stage_results.append(PipelineStageResult( - name=pipeline_stage.name, - failed=True, - skipped=False, - output=e.output or '', - percentage=e.percentage, - elapsed_time=_end_time-_start_time, - )) + print_info(e.output or "[empty output]") + print_info( + f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey" + ) + print_info("error!", color="red") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=True, + skipped=False, + output=e.output or "", + percentage=e.percentage, + elapsed_time=_end_time - _start_time, + ) + ) if pipeline_stage.fail == PipelineStageConfig.FailType.FAST: skip_the_rest = True pipeline_passed = False @@ -237,7 +268,9 @@ def run( # register output if required if pipeline_stage.register_output: - context.setdefault('outputs', {})[pipeline_stage.register_output] = pipeline_stage_results[-1] + context.setdefault("outputs", {})[ + pipeline_stage.register_output + ] = pipeline_stage_results[-1] return PipelineResult( failed=not pipeline_passed, diff --git a/checker/tester/tester.py b/checker/tester/tester.py index dcdff2e..2d45206 100644 --- a/checker/tester/tester.py +++ b/checker/tester/tester.py @@ -7,17 +7,22 @@ from typing import Any from ..configs import CheckerTestingConfig -from ..configs.checker import CheckerStructureConfig, CheckerConfig, CheckerParametersConfig +from ..configs.checker import ( + CheckerConfig, + CheckerParametersConfig, + CheckerStructureConfig, +) from ..course import Course, FileSystemTask from ..exceptions import PluginExecutionFailed, TestingError -from .pipeline import PipelineRunner, PipelineResult, PipelineStageResult from ..plugins import load_plugins -from ..utils import print_info, print_header_info, print_separator +from ..utils import print_header_info, print_info, print_separator +from .pipeline import PipelineResult, PipelineRunner, PipelineStageResult @dataclass class GlobalPipelineVariables: """Base variables passed in pipeline stages.""" + ref_dir: str repo_dir: str temp_dir: str @@ -29,6 +34,7 @@ class GlobalPipelineVariables: @dataclass class TaskPipelineVariables: """Variables passed in pipeline stages for each task.""" + task_name: str task_sub_path: str @@ -42,6 +48,7 @@ class Tester: 4. Collect results and push to them 5. Remove temporary directory """ + __test__ = False # do not collect this class for pytest def __init__( @@ -70,9 +77,15 @@ def __init__( self.default_params = checker_config.default_parameters self.plugins = load_plugins(self.testing_config.search_plugins, verbose=verbose) - self.global_pipeline = PipelineRunner(self.testing_config.global_pipeline, self.plugins, verbose=verbose) - self.task_pipeline = PipelineRunner(self.testing_config.tasks_pipeline, self.plugins, verbose=verbose) - self.report_pipeline = PipelineRunner(self.testing_config.report_pipeline, self.plugins, verbose=verbose) + self.global_pipeline = PipelineRunner( + self.testing_config.global_pipeline, self.plugins, verbose=verbose + ) + self.task_pipeline = PipelineRunner( + self.testing_config.tasks_pipeline, self.plugins, verbose=verbose + ) + self.report_pipeline = PipelineRunner( + self.testing_config.report_pipeline, self.plugins, verbose=verbose + ) self.repository_dir = self.course.repository_root self.reference_dir = self.course.reference_root @@ -83,7 +96,9 @@ def __init__( self.verbose = verbose self.dry_run = dry_run - def _get_global_pipeline_parameters(self, tasks: list[FileSystemTask]) -> GlobalPipelineVariables: + def _get_global_pipeline_parameters( + self, tasks: list[FileSystemTask] + ) -> GlobalPipelineVariables: return GlobalPipelineVariables( ref_dir=self.reference_dir.absolute().as_posix(), repo_dir=self.repository_dir.absolute().as_posix(), @@ -93,19 +108,21 @@ def _get_global_pipeline_parameters(self, tasks: list[FileSystemTask]) -> Global task_sub_paths=[task.relative_path for task in tasks], ) - def _get_task_pipeline_parameters(self, task: FileSystemTask) -> TaskPipelineVariables: + def _get_task_pipeline_parameters( + self, task: FileSystemTask + ) -> TaskPipelineVariables: return TaskPipelineVariables( task_name=task.name, task_sub_path=task.relative_path, ) def _get_context( - self, - global_variables: GlobalPipelineVariables, - task_variables: TaskPipelineVariables | None, - outputs: dict[str, PipelineStageResult], - default_parameters: CheckerParametersConfig, - task_parameters: CheckerParametersConfig | None, + self, + global_variables: GlobalPipelineVariables, + task_variables: TaskPipelineVariables | None, + outputs: dict[str, PipelineStageResult], + default_parameters: CheckerParametersConfig, + task_parameters: CheckerParametersConfig | None, ) -> dict[str, Any]: return { "global": global_variables, @@ -124,7 +141,9 @@ def validate(self) -> None: # validate global pipeline (only default params and variables available) print("- global pipeline...") global_variables = self._get_global_pipeline_parameters(tasks) - context = self._get_context(global_variables, None, outputs, self.default_params, None) + context = self._get_context( + global_variables, None, outputs, self.default_params, None + ) self.global_pipeline.validate(context, validate_placeholders=True) print(" ok") @@ -134,7 +153,13 @@ def validate(self) -> None: # create task context task_variables = self._get_task_pipeline_parameters(task) - context = self._get_context(global_variables, task_variables, outputs, self.default_params, task.config.parameters) + context = self._get_context( + global_variables, + task_variables, + outputs, + self.default_params, + task.config.parameters, + ) # check task parameter are # TODO: read pipeline from task config if any @@ -144,9 +169,9 @@ def validate(self) -> None: print(" ok") def run( - self, - tasks: list[FileSystemTask] | None = None, - report: bool = True, + self, + tasks: list[FileSystemTask] | None = None, + report: bool = True, ) -> None: # copy files for testing self.course.copy_files_for_testing(self.temporary_dir) @@ -158,12 +183,16 @@ def run( outputs: dict[str, PipelineStageResult] = {} # run global pipeline - print_header_info("Run global pipeline:", color='pink') + print_header_info("Run global pipeline:", color="pink") global_variables = self._get_global_pipeline_parameters(tasks) - context = self._get_context(global_variables, None, outputs, self.default_params, None) - global_pipeline_result: PipelineResult = self.global_pipeline.run(context, dry_run=self.dry_run) - print_separator('-') - print_info(str(global_pipeline_result), color='pink') + context = self._get_context( + global_variables, None, outputs, self.default_params, None + ) + global_pipeline_result: PipelineResult = self.global_pipeline.run( + context, dry_run=self.dry_run + ) + print_separator("-") + print_info(str(global_pipeline_result), color="pink") if not global_pipeline_result: raise TestingError("Global pipeline failed") @@ -171,31 +200,41 @@ def run( failed_tasks = [] for task in tasks: # run task pipeline - print_header_info(f"Run <{task.name}> task pipeline:", color='pink') + print_header_info(f"Run <{task.name}> task pipeline:", color="pink") # create task context task_variables = self._get_task_pipeline_parameters(task) - context = self._get_context(global_variables, task_variables, outputs, self.default_params, task.config.parameters) + context = self._get_context( + global_variables, + task_variables, + outputs, + self.default_params, + task.config.parameters, + ) # TODO: read pipeline from task config if any - task_pipeline_result: PipelineResult = self.task_pipeline.run(context, dry_run=self.dry_run) - print_separator('-') + task_pipeline_result: PipelineResult = self.task_pipeline.run( + context, dry_run=self.dry_run + ) + print_separator("-") - print_info(str(task_pipeline_result), color='pink') - print_separator('-') + print_info(str(task_pipeline_result), color="pink") + print_separator("-") # Report score if task pipeline succeeded if task_pipeline_result: - print_info(f"Reporting <{task.name}> task tests:", color='pink') + print_info(f"Reporting <{task.name}> task tests:", color="pink") if report: - task_report_result: PipelineResult = self.report_pipeline.run(context, dry_run=self.dry_run) + task_report_result: PipelineResult = self.report_pipeline.run( + context, dry_run=self.dry_run + ) if task_report_result: print_info("->Reporting succeeded") else: print_info("->Reporting failed") else: print_info("->Reporting disabled") - print_separator('-') + print_separator("-") else: failed_tasks.append(task.name) diff --git a/checker/utils.py b/checker/utils.py index 73b5693..9828777 100644 --- a/checker/utils.py +++ b/checker/utils.py @@ -5,51 +5,51 @@ def print_info( - *args: Any, - file: Any = None, - color: str | None = None, - **kwargs: Any, + *args: Any, + file: Any = None, + color: str | None = None, + **kwargs: Any, ) -> None: colors = { - 'white': '\033[97m', - 'cyan': '\033[96m', - 'pink': '\033[95m', - 'blue': '\033[94m', - 'orange': '\033[93m', - 'green': '\033[92m', - 'red': '\033[91m', - 'grey': '\033[90m', - 'endc': '\033[0m', + "white": "\033[97m", + "cyan": "\033[96m", + "pink": "\033[95m", + "blue": "\033[94m", + "orange": "\033[93m", + "green": "\033[92m", + "red": "\033[91m", + "grey": "\033[90m", + "endc": "\033[0m", } file = file or sys.stderr - data = ' '.join(map(str, args)) + data = " ".join(map(str, args)) if color in colors: - print(colors[color] + data + colors['endc'], file=file, **kwargs) + print(colors[color] + data + colors["endc"], file=file, **kwargs) else: print(data, file=file, **kwargs) file.flush() def print_separator( - symbol: str, - file: Any = None, - color: str = 'pink', - string_length: int = 80, + symbol: str, + file: Any = None, + color: str = "pink", + string_length: int = 80, ) -> None: print_info(symbol * string_length, color=color) def print_header_info( - header_string: str, - file: Any = None, - color: str = 'pink', - string_length: int = 80, - **kwargs: Any, + header_string: str, + file: Any = None, + color: str = "pink", + string_length: int = 80, + **kwargs: Any, ) -> None: - info_extended_string = ' ' + header_string + ' ' - print_info('', file=file) - print_separator(symbol='+', string_length=string_length, color=color, file=file) + info_extended_string = " " + header_string + " " + print_info("", file=file) + print_separator(symbol="+", string_length=string_length, color=color, file=file) print_info(f"{info_extended_string :+^{string_length}}", color=color, file=file) - print_separator(symbol='+', string_length=string_length, color=color, file=file) + print_separator(symbol="+", string_length=string_length, color=color, file=file) diff --git a/pyproject.toml b/pyproject.toml index 290cf83..f9cedd2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ dependencies = [ "python-gitlab >=3.0.0,<4.0.0", "requests >=2.20.0,<3.0.0", "unshare >=0.22,<0.30; sys_platform != 'darwin'", + "pytz >=2022.0,<2023.4; python_version < '3.9'", ] dynamic = ["version"] diff --git a/tests/conftest.py b/tests/conftest.py index 3b75f9c..ae93b74 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,8 +3,8 @@ def pytest_addoption(parser: pytest.Parser) -> None: parser.addoption( - '--integration', - action='store_true', + "--integration", + action="store_true", dest="integration", default=False, help="enable integration tests", @@ -15,7 +15,10 @@ def pytest_configure(config: pytest.Config) -> None: config.addinivalue_line("markers", "integration: mark test as integration test") -def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None: +def pytest_collection_modifyitems( + config: pytest.Config, + items: list[pytest.Item], +) -> None: if config.getoption("--integration"): # --integration given in cli: do not skip integration tests return diff --git a/tests/plugins/test_aggregate.py b/tests/plugins/test_aggregate.py index e60f291..2506d87 100644 --- a/tests/plugins/test_aggregate.py +++ b/tests/plugins/test_aggregate.py @@ -5,36 +5,56 @@ import pytest from pydantic import ValidationError -from checker.plugins.aggregate import AggregatePlugin from checker.exceptions import PluginExecutionFailed +from checker.plugins.aggregate import AggregatePlugin class TestAggregatePlugin: - - @pytest.mark.parametrize("parameters, expected_exception", [ - ({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3], 'strategy': 'mean'}, None), - ({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3]}, None), - ({'scores': [0.5, 1.0, 1], 'weights': None}, None), - ({'scores': [0.5, 1.0, 1], 'strategy': 'mean'}, None), - ({'scores': [0.5, 1.0, 1]}, None), - ({'scores': [0.5, 1.0, 1], 'weights': [1, 2, 3], 'strategy': 'invalid_strategy'}, ValidationError), - ({}, ValidationError), - ]) - def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ({"scores": [0.5, 1.0, 1], "weights": [1, 2, 3], "strategy": "mean"}, None), + ({"scores": [0.5, 1.0, 1], "weights": [1, 2, 3]}, None), + ({"scores": [0.5, 1.0, 1], "weights": None}, None), + ({"scores": [0.5, 1.0, 1], "strategy": "mean"}, None), + ({"scores": [0.5, 1.0, 1]}, None), + ( + { + "scores": [0.5, 1.0, 1], + "weights": [1, 2, 3], + "strategy": "invalid_strategy", + }, + ValidationError, + ), + ({}, ValidationError), + ], + ) + def test_plugin_args( + self, parameters: dict[str, Any], expected_exception: Exception | None + ) -> None: if expected_exception: with pytest.raises(expected_exception): AggregatePlugin.Args(**parameters) else: AggregatePlugin.Args(**parameters) - @pytest.mark.parametrize("scores, weights, strategy, expected", [ - ([10, 20, 30], None, "mean", 20.0), - ([1, 2, 3], [0.5, 0.5, 0.5], "sum", 3.0), - ([2, 4, 6], [1, 2, 3], "min", 2.0), - ([5, 10, 15], [1, 1, 1], "max", 15.0), - ([3, 3, 3], [1, 1, 1], "product", 27.0), - ]) - def test_aggregate_strategies(self, scores: list[float], weights: list[float] | None, strategy: str, expected: float) -> None: + @pytest.mark.parametrize( + "scores, weights, strategy, expected", + [ + ([10, 20, 30], None, "mean", 20.0), + ([1, 2, 3], [0.5, 0.5, 0.5], "sum", 3.0), + ([2, 4, 6], [1, 2, 3], "min", 2.0), + ([5, 10, 15], [1, 1, 1], "max", 15.0), + ([3, 3, 3], [1, 1, 1], "product", 27.0), + ], + ) + def test_aggregate_strategies( + self, + scores: list[float], + weights: list[float] | None, + strategy: str, + expected: float, + ) -> None: plugin = AggregatePlugin() args = AggregatePlugin.Args(scores=scores, weights=weights, strategy=strategy) @@ -42,11 +62,14 @@ def test_aggregate_strategies(self, scores: list[float], weights: list[float] | assert expected == result.percentage assert f"Score: {expected:.2f}" in result.output - @pytest.mark.parametrize("scores, weights", [ - ([1, 2, 3], [1, 2]), - ([1], [1, 2]), - ([], []), - ]) + @pytest.mark.parametrize( + "scores, weights", + [ + ([1, 2, 3], [1, 2]), + ([1], [1, 2]), + ([], []), + ], + ) def test_length_mismatch(self, scores: list[float], weights: list[float]) -> None: # TODO: move to args validation plugin = AggregatePlugin() diff --git a/tests/plugins/test_regex.py b/tests/plugins/test_regex.py index bcef3d2..1e3cded 100644 --- a/tests/plugins/test_regex.py +++ b/tests/plugins/test_regex.py @@ -7,12 +7,11 @@ import pytest from pydantic import ValidationError -from checker.plugins.regex import CheckRegexpsPlugin from checker.exceptions import PluginExecutionFailed +from checker.plugins.regex import CheckRegexpsPlugin class TestCheckRegexpsPlugin: - T_CREATE_TEST_FILES = Callable[[dict[str, str]], Path] @pytest.fixture @@ -23,31 +22,51 @@ def _create_test_files(files_content: dict[str, str]) -> Path: with open(file, "w") as f: f.write(content) return tmpdir + return _create_test_files # TODO: add tests with wrong patterns and regexps - @pytest.mark.parametrize("parameters, expected_exception", [ - ({'origin': '/tmp/123', 'patterns': ['*', '*.py'], 'regexps': ['error']}, None), - ({'patterns': ['*', '*.py'], 'regexps': ['error']}, ValidationError), - ({'origin': '/tmp/123', 'patterns': ['*', '*.py']}, ValidationError), - ({'origin': '/tmp/123', 'patterns': None, 'regexps': None}, ValidationError), - ]) - def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ( + {"origin": "/tmp/123", "patterns": ["*", "*.py"], "regexps": ["error"]}, + None, + ), + ({"patterns": ["*", "*.py"], "regexps": ["error"]}, ValidationError), + ({"origin": "/tmp/123", "patterns": ["*", "*.py"]}, ValidationError), + ( + {"origin": "/tmp/123", "patterns": None, "regexps": None}, + ValidationError, + ), + ], + ) + def test_plugin_args( + self, parameters: dict[str, Any], expected_exception: Exception | None + ) -> None: if expected_exception: with pytest.raises(expected_exception): CheckRegexpsPlugin.Args(**parameters) else: CheckRegexpsPlugin.Args(**parameters) - @pytest.mark.parametrize("patterns, expected_exception", [ - (["*.txt"], PluginExecutionFailed), - (["test2.txt", "*cpp"], None), - (["*"], PluginExecutionFailed), - (["*.md"], PluginExecutionFailed), - (["test?.txt"], PluginExecutionFailed), - (["test2.txt", "test1.txt"], PluginExecutionFailed), - ]) - def test_pattern_matching(self, create_test_files: T_CREATE_TEST_FILES, patterns: list[str], expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "patterns, expected_exception", + [ + (["*.txt"], PluginExecutionFailed), + (["test2.txt", "*cpp"], None), + (["*"], PluginExecutionFailed), + (["*.md"], PluginExecutionFailed), + (["test?.txt"], PluginExecutionFailed), + (["test2.txt", "test1.txt"], PluginExecutionFailed), + ], + ) + def test_pattern_matching( + self, + create_test_files: T_CREATE_TEST_FILES, + patterns: list[str], + expected_exception: Exception | None, + ) -> None: files_content = { "test1.txt": "This is a test file with forbidden content", "test2.txt": "This file is safe", @@ -59,7 +78,9 @@ def test_pattern_matching(self, create_test_files: T_CREATE_TEST_FILES, patterns regexps = ["forbidden"] plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) + args = CheckRegexpsPlugin.Args( + origin=str(origin), patterns=patterns, regexps=regexps + ) if expected_exception: with pytest.raises(expected_exception): @@ -67,14 +88,22 @@ def test_pattern_matching(self, create_test_files: T_CREATE_TEST_FILES, patterns else: assert plugin._run(args).output == "No forbidden regexps found" - @pytest.mark.parametrize("regexps, expected_exception", [ - (["not_found"], None), - (["forbidden"], PluginExecutionFailed), - (["fo.*en"], PluginExecutionFailed), - (["not_found", "fo.?bi.?den"], PluginExecutionFailed), - (["fo.?bi.?den", "not_found"], PluginExecutionFailed), - ]) - def test_check_regexps(self, create_test_files: T_CREATE_TEST_FILES, regexps: list[str], expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "regexps, expected_exception", + [ + (["not_found"], None), + (["forbidden"], PluginExecutionFailed), + (["fo.*en"], PluginExecutionFailed), + (["not_found", "fo.?bi.?den"], PluginExecutionFailed), + (["fo.?bi.?den", "not_found"], PluginExecutionFailed), + ], + ) + def test_check_regexps( + self, + create_test_files: T_CREATE_TEST_FILES, + regexps: list[str], + expected_exception: Exception | None, + ) -> None: files_content = { "test1.txt": "This is a test file with forbidden content", "test2.txt": "This file is safe", @@ -86,7 +115,9 @@ def test_check_regexps(self, create_test_files: T_CREATE_TEST_FILES, regexps: li patterns = ["*"] plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) + args = CheckRegexpsPlugin.Args( + origin=str(origin), patterns=patterns, regexps=regexps + ) if expected_exception: with pytest.raises(expected_exception) as exc_info: @@ -94,12 +125,18 @@ def test_check_regexps(self, create_test_files: T_CREATE_TEST_FILES, regexps: li assert "matches regexp" in str(exc_info.value) else: assert plugin._run(args).output == "No forbidden regexps found" - assert plugin._run(args, verbose=True).output == "No forbidden regexps found" - assert plugin._run(args, verbose=False).output == "No forbidden regexps found" + assert ( + plugin._run(args, verbose=True).output == "No forbidden regexps found" + ) + assert ( + plugin._run(args, verbose=False).output == "No forbidden regexps found" + ) def test_non_existent_origin(self) -> None: plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args(origin="/tmp/non_existent", patterns=["*.txt"], regexps=["forbidden"]) + args = CheckRegexpsPlugin.Args( + origin="/tmp/non_existent", patterns=["*.txt"], regexps=["forbidden"] + ) with pytest.raises(PluginExecutionFailed) as exc_info: plugin._run(args) diff --git a/tests/plugins/test_scripts.py b/tests/plugins/test_scripts.py index 2bbf7e1..e6a3291 100644 --- a/tests/plugins/test_scripts.py +++ b/tests/plugins/test_scripts.py @@ -1,44 +1,65 @@ from __future__ import annotations +import subprocess from collections.abc import Callable from pathlib import Path from typing import Any -import subprocess +from unittest.mock import MagicMock, patch import pytest -from unittest.mock import patch, MagicMock from pydantic import ValidationError -from checker.plugins.scripts import RunScriptPlugin from checker.exceptions import PluginExecutionFailed +from checker.plugins.scripts import RunScriptPlugin class TestRunScriptPlugin: - - @pytest.mark.parametrize("parameters, expected_exception", [ - ({'origin': '/tmp/123', 'script': 'echo Hello'}, None), - ({'origin': '/tmp/123', 'script': 123}, ValidationError), - ({'origin': '/tmp/123', 'script': ['echo', 'Hello']}, None), - ({'origin': '/tmp/123', 'script': 'echo Hello', 'timeout': 10}, None), - # ({'origin': '/tmp/123', 'script': 'echo Hello', 'timeout': '10'}, ValidationError), - ({'origin': '/tmp/123', 'script': 'echo Hello', 'isolate': True}, None), - ({'origin': '/tmp/123', 'script': 'echo Hello', 'env_whitelist': ['PATH']}, None), - ]) - def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ({"origin": "/tmp", "script": "echo Hello"}, None), + ({"origin": "/tmp", "script": 123}, ValidationError), + ({"origin": "/tmp", "script": ["echo", "Hello"]}, None), + ({"origin": "/tmp", "script": "echo Hello", "timeout": 10}, None), + # TODO: check why timeout is not validated + pytest.param( + {"origin": "/tmp", "script": "echo Hello", "timeout": "10"}, + ValidationError, + marks=pytest.mark.xfail(), + ), + ({"origin": "/tmp", "script": "echo Hello", "isolate": True}, None), + ( + { + "origin": "/tmp", + "script": "echo Hello", + "env_whitelist": ["PATH"], + }, + None, + ), + ], + ) + def test_plugin_args( + self, parameters: dict[str, Any], expected_exception: Exception | None + ) -> None: if expected_exception: with pytest.raises(expected_exception): RunScriptPlugin.Args(**parameters) else: RunScriptPlugin.Args(**parameters) - @pytest.mark.parametrize("script, output, expected_exception", [ - ("echo Hello", "Hello", None), - ("sleep 0.1", "", None), - ("true", "", None), - ("false", "", PluginExecutionFailed), - ("echo Hello && false", "Hello", PluginExecutionFailed), - ]) - def test_simple_cases(self, script: str, output: str, expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "script, output, expected_exception", + [ + ("echo Hello", "Hello", None), + ("sleep 0.1", "", None), + ("true", "", None), + ("false", "", PluginExecutionFailed), + ("echo Hello && false", "Hello", PluginExecutionFailed), + ], + ) + def test_simple_cases( + self, script: str, output: str, expected_exception: Exception | None + ) -> None: plugin = RunScriptPlugin() args = RunScriptPlugin.Args(origin="/tmp", script=script) @@ -50,13 +71,18 @@ def test_simple_cases(self, script: str, output: str, expected_exception: Except result = plugin._run(args) assert result.output.strip() == output - @pytest.mark.parametrize("script, timeout, expected_exception", [ - ("echo Hello", 10, None), - ("sleep 0.5", 1, None), - ("sleep 0.5", None, None), - ("sleep 1", 0.5, PluginExecutionFailed), - ]) - def test_timeout(self, script: str, timeout: float, expected_exception: Exception | None) -> None: + @pytest.mark.parametrize( + "script, timeout, expected_exception", + [ + ("echo Hello", 10, None), + ("sleep 0.5", 1, None), + ("sleep 0.5", None, None), + ("sleep 1", 0.5, PluginExecutionFailed), + ], + ) + def test_timeout( + self, script: str, timeout: float, expected_exception: Exception | None + ) -> None: # TODO: check if timeout float plugin = RunScriptPlugin() args = RunScriptPlugin.Args(origin="/tmp", script=script, timeout=timeout) @@ -67,17 +93,23 @@ def test_timeout(self, script: str, timeout: float, expected_exception: Exceptio else: plugin._run(args) - @pytest.mark.parametrize("script, env_whitelist, mocked_env", [ - ("env", ["CUSTOM_VAR"], {"FILTERED_ONE": "1", "CUSTOM_VAR": "test_value"}), - # TODO: expand this test - ]) - def test_run_with_environment_variable(self, script: str, env_whitelist: list[str], mocked_env: dict[str, str]) -> None: + @pytest.mark.parametrize( + "script, env_whitelist, mocked_env", + [ + ("env", ["CUSTOM_VAR"], {"FILTERED_ONE": "1", "CUSTOM_VAR": "test_value"}), + # TODO: expand this test + ], + ) + def test_run_with_environment_variable( + self, script: str, env_whitelist: list[str], mocked_env: dict[str, str] + ) -> None: plugin = RunScriptPlugin() - args = RunScriptPlugin.Args(origin="/tmp", script=script, env_whitelist=env_whitelist) - - with patch.dict('os.environ', mocked_env, clear=True): + args = RunScriptPlugin.Args( + origin="/tmp", script=script, env_whitelist=env_whitelist + ) + + with patch.dict("os.environ", mocked_env, clear=True): result = plugin._run(args) assert "CUSTOM_VAR" in result.output assert mocked_env["CUSTOM_VAR"] in result.output assert "FILTERED_ONE" not in result.output - \ No newline at end of file diff --git a/tests/test_dummy.py b/tests/test_dummy.py index 111246f..f4f5361 100644 --- a/tests/test_dummy.py +++ b/tests/test_dummy.py @@ -1,4 +1,2 @@ - - def test_dummy(): assert True diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 9dd69df..578432a 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -104,14 +104,18 @@ def test_plugins(self, sample_plugins: dict[str, Type[PluginABC]]) -> None: class TestPipelineRunnerValidation: - def test_correct_pipeline(self, sample_correct_pipeline: list[PipelineStageConfig], sample_plugins: dict[str, Type[PluginABC]]) -> None: + def test_correct_pipeline( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + ) -> None: pipeline_runner = PipelineRunner( pipeline=sample_correct_pipeline, plugins=sample_plugins, verbose=False, ) pipeline_runner.validate({}, validate_placeholders=False) - pipeline_runner.validate({'message': 'Hello'}, validate_placeholders=True) + pipeline_runner.validate({"message": "Hello"}, validate_placeholders=True) with pytest.raises(BadConfig): pipeline_runner.validate({}, validate_placeholders=True) diff --git a/tests/test_resolver.py b/tests/test_resolver.py index 22e4089..d49d599 100644 --- a/tests/test_resolver.py +++ b/tests/test_resolver.py @@ -10,71 +10,126 @@ class TestParametersResolver: - @pytest.mark.parametrize("template, context, expected", [ - ("${{ a }}", {"a": 2}, 2), - pytest.param("${{ b }}", {"b": "2"}, "2", marks=pytest.mark.xfail()), # TODO: check why returned as int - ("${{ c }}", {"c": [1, 2, "4"]}, [1, 2, "4"]), - (" ${{ d }}", {"d": 2}, 2), - ("${{ e }} ", {"e": 2}, 2), - ("${{ f }} some string", {"f": 2}, "2 some string"), - ("${{ g }} + ${{ g }}", {"g": 2}, "2 + 2"), - ("${{ h }}", {"h": 2.1}, 2.1), - ("${{ i }}", {"i": 2.0}, 2.0), - ]) - def test_keep_native_type(self, template: str, context: dict[str, Any], expected: Any) -> None: + @pytest.mark.parametrize( + "template, context, expected", + [ + ("${{ a }}", {"a": 2}, 2), + pytest.param( + "${{ b }}", {"b": "2"}, "2", marks=pytest.mark.xfail() + ), # TODO: check why returned as int + ("${{ c }}", {"c": [1, 2, "4"]}, [1, 2, "4"]), + (" ${{ d }}", {"d": 2}, 2), + ("${{ e }} ", {"e": 2}, 2), + ("${{ f }} some string", {"f": 2}, "2 some string"), + ("${{ g }} + ${{ g }}", {"g": 2}, "2 + 2"), + ("${{ h }}", {"h": 2.1}, 2.1), + ("${{ i }}", {"i": 2.0}, 2.0), + ], + ) + def test_keep_native_type( + self, template: str, context: dict[str, Any], expected: Any + ) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected - @pytest.mark.parametrize("template, context, expected", [ - ("${{ a }}", {"a": 2}, 2), - ("Hello, ${{ name }}!", {"name": "World"}, "Hello, World!"), - ("${{ a }} + ${{ b }} = ${{ a + b }}", {"a": 2, "b": 3}, "2 + 3 = 5"), - ("${{ a }}", {"a": 2, "b": 3}, 2), - ]) - def test_string_input(self, template: str, context: dict[str, Any], expected: Any) -> None: + @pytest.mark.parametrize( + "template, context, expected", + [ + ("${{ a }}", {"a": 2}, 2), + ("Hello, ${{ name }}!", {"name": "World"}, "Hello, World!"), + ("${{ a }} + ${{ b }} = ${{ a + b }}", {"a": 2, "b": 3}, "2 + 3 = 5"), + ("${{ a }}", {"a": 2, "b": 3}, 2), + ], + ) + def test_string_input( + self, template: str, context: dict[str, Any], expected: Any + ) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected - @pytest.mark.parametrize("template, context, expected", [ - (["${{ item }}", "${{ item }}2"], {"item": "test"}, ["test", "test2"]), - (["${{ a }}", "${{ b }}"], {"a": 1, "b": 2}, [1, 2]), - (["${{ a }}", ["${{ b }}", "${{ c }}"]], {"a": 1, "b": 2, "c": 3}, [1, [2, 3]]), - ]) - def test_list_input(self, template: list[Any], context: dict[str, Any], expected: list[Any]) -> None: + @pytest.mark.parametrize( + "template, context, expected", + [ + (["${{ item }}", "${{ item }}2"], {"item": "test"}, ["test", "test2"]), + (["${{ a }}", "${{ b }}"], {"a": 1, "b": 2}, [1, 2]), + ( + ["${{ a }}", ["${{ b }}", "${{ c }}"]], + {"a": 1, "b": 2, "c": 3}, + [1, [2, 3]], + ), + ], + ) + def test_list_input( + self, template: list[Any], context: dict[str, Any], expected: list[Any] + ) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected - @pytest.mark.parametrize("template, context, expected", [ - ({"key1": "${{ a }}", "key2": "${{ b }}"}, {"a": "x", "b": "y"}, {"key1": "x", "key2": "y"}), - ({"name": "Hello, ${{ name }}!"}, {"name": "Alice"}, {"name": "Hello, Alice!"}), - ({"key1": "${{ a }}", "key2": {"key3": "${{ b }}"}}, {"a": 1, "b": 2}, {"key1": 1, "key2": {"key3": 2}}), - ]) - def test_dict_input(self, template: dict[str, Any], context: dict[str, Any], expected: dict[str, Any]) -> None: + @pytest.mark.parametrize( + "template, context, expected", + [ + ( + {"key1": "${{ a }}", "key2": "${{ b }}"}, + {"a": "x", "b": "y"}, + {"key1": "x", "key2": "y"}, + ), + ( + {"name": "Hello, ${{ name }}!"}, + {"name": "Alice"}, + {"name": "Hello, Alice!"}, + ), + ( + {"key1": "${{ a }}", "key2": {"key3": "${{ b }}"}}, + {"a": 1, "b": 2}, + {"key1": 1, "key2": {"key3": 2}}, + ), + ], + ) + def test_dict_input( + self, + template: dict[str, Any], + context: dict[str, Any], + expected: dict[str, Any], + ) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected - @pytest.mark.parametrize("template, context", [ - (1, {}), - (1, {"a": 1}), - (1.0, {"a": 1}), - ("some string", {"a": 1}), - ("a", {"a": 1}), - ("{a}", {"a": 1}), - ({}, {"a": 1}), - ([None, {1, 2, 3}, ["a", "b"]], {"a": 1}), - ]) + @pytest.mark.parametrize( + "template, context", + [ + (1, {}), + (1, {"a": 1}), + (1.0, {"a": 1}), + ("some string", {"a": 1}), + ("a", {"a": 1}), + ("{a}", {"a": 1}), + ({}, {"a": 1}), + ([None, {1, 2, 3}, ["a", "b"]], {"a": 1}), + ], + ) def test_non_template(self, template: Any, context: dict[str, Any]) -> None: resolver = ParametersResolver() template_copy = copy.deepcopy(template) assert resolver.resolve(template, context) == template_copy - @pytest.mark.parametrize("template, context", [ - ("${{ invalid_syntax", {"invalid_syntax": 2}), - pytest.param("${{ valid_var.invalid_field }}", {"valid_var": {'valid_field': 1}}, marks=pytest.mark.xfail()), - pytest.param("${{ not_existing }} ${{ a }}", {"a": 2}, marks=pytest.mark.xfail()), - pytest.param("${{ not_existing }}", {"a": 2}, marks=pytest.mark.xfail()), - pytest.param("invalid_syntax }}", {"invalid_syntax": 2}, marks=pytest.mark.xfail()), - ]) + @pytest.mark.parametrize( + "template, context", + [ + ("${{ invalid_syntax", {"invalid_syntax": 2}), + pytest.param( + "${{ valid_var.invalid_field }}", + {"valid_var": {"valid_field": 1}}, + marks=pytest.mark.xfail(), + ), + pytest.param( + "${{ not_existing }} ${{ a }}", {"a": 2}, marks=pytest.mark.xfail() + ), + pytest.param("${{ not_existing }}", {"a": 2}, marks=pytest.mark.xfail()), + pytest.param( + "invalid_syntax }}", {"invalid_syntax": 2}, marks=pytest.mark.xfail() + ), + ], + ) def test_invalid_template(self, template: Any, context: dict[str, Any]) -> None: resolver = ParametersResolver() with pytest.raises(BadConfig):