diff --git a/checker/__main__.py b/checker/__main__.py index a1be499..a6b120c 100644 --- a/checker/__main__.py +++ b/checker/__main__.py @@ -15,12 +15,8 @@ from .exceptions import BadConfig, TestingError -ClickReadableFile = click.Path( - exists=True, file_okay=True, readable=True, path_type=Path -) -ClickReadableDirectory = click.Path( - exists=True, file_okay=False, readable=True, path_type=Path -) +ClickReadableFile = click.Path(exists=True, file_okay=True, readable=True, path_type=Path) +ClickReadableDirectory = click.Path(exists=True, file_okay=False, readable=True, path_type=Path) ClickWritableDirectory = click.Path(file_okay=False, writable=True, path_type=Path) @@ -54,9 +50,7 @@ def cli( @cli.command() @click.argument("root", type=ClickReadableDirectory, default=".") -@click.option( - "-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose output" -) +@click.option("-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose output") @click.pass_context def validate( ctx: click.Context, @@ -159,9 +153,7 @@ def validate( default=True, help="Verbose tests output", ) -@click.option( - "--dry-run", is_flag=True, help="Do not execute anything, only log actions" -) +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context def check( ctx: click.Context, @@ -244,15 +236,9 @@ def check( @cli.command() @click.argument("root", type=ClickReadableDirectory, default=".") @click.argument("reference_root", type=ClickReadableDirectory, default=".") -@click.option( - "--submit-score", is_flag=True, help="Submit score to the Manytask server" -) -@click.option( - "--timestamp", type=str, default=None, help="Timestamp to use for the submission" -) -@click.option( - "--username", type=str, default=None, help="Username to use for the submission" -) +@click.option("--submit-score", is_flag=True, help="Submit score to the Manytask server") +@click.option("--timestamp", type=str, default=None, help="Timestamp to use for the submission") +@click.option("--username", type=str, default=None, help="Username to use for the submission") @click.option("--no-clean", is_flag=True, help="Clean or not check tmp folders") @click.option( "-v/-s", @@ -261,9 +247,7 @@ def check( default=False, help="Verbose tests output", ) -@click.option( - "--dry-run", is_flag=True, help="Do not execute anything, only log actions" -) +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context def grade( ctx: click.Context, @@ -305,9 +289,7 @@ def grade( # detect changes to test filesystem_tasks: list[FileSystemTask] = list() # TODO: detect changes - filesystem_tasks = [ - task for task in course.get_tasks(enabled=True) if task.name == "hello_world" - ] + filesystem_tasks = [task for task in course.get_tasks(enabled=True) if task.name == "hello_world"] # create tester to... to test =) tester = Tester(course, checker_config, verbose=verbose, dry_run=dry_run) @@ -334,12 +316,8 @@ def grade( @cli.command() @click.argument("reference_root", type=ClickReadableDirectory, default=".") @click.argument("export_root", type=ClickWritableDirectory, default="./export") -@click.option( - "--commit", is_flag=True, help="Commit and push changes to the repository" -) -@click.option( - "--dry-run", is_flag=True, help="Do not execute anything, only log actions" -) +@click.option("--commit", is_flag=True, help="Commit and push changes to the repository") +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context def export( ctx: click.Context, diff --git a/checker/configs/deadlines.py b/checker/configs/deadlines.py index 6f8442b..4cda0fa 100644 --- a/checker/configs/deadlines.py +++ b/checker/configs/deadlines.py @@ -94,18 +94,12 @@ def check_dates(self) -> "DeadlinesGroupConfig": if isinstance(self.end, timedelta) and self.end < timedelta(): raise ValueError(f"end timedelta <{self.end}> should be positive") if isinstance(self.end, datetime) and self.end < self.start: - raise ValueError( - f"end datetime <{self.end}> should be after the start <{self.start}>" - ) + raise ValueError(f"end datetime <{self.end}> should be after the start <{self.start}>") # check steps last_step_date_or_delta: datetime | timedelta = self.start for _, date_or_delta in self.steps.items(): - step_date = ( - self.start + date_or_delta - if isinstance(date_or_delta, timedelta) - else date_or_delta - ) + step_date = self.start + date_or_delta if isinstance(date_or_delta, timedelta) else date_or_delta last_step_date = ( self.start + last_step_date_or_delta if isinstance(last_step_date_or_delta, timedelta) @@ -115,9 +109,7 @@ def check_dates(self) -> "DeadlinesGroupConfig": if isinstance(date_or_delta, timedelta) and date_or_delta < timedelta(): raise ValueError(f"step timedelta <{date_or_delta}> should be positive") if isinstance(date_or_delta, datetime) and date_or_delta <= self.start: - raise ValueError( - f"step datetime <{date_or_delta}> should be after the start {self.start}" - ) + raise ValueError(f"step datetime <{date_or_delta}> should be after the start {self.start}") if step_date <= last_step_date: raise ValueError( @@ -154,9 +146,7 @@ def get_tasks( self, enabled: bool | None = None, ) -> list[DeadlinesTaskConfig]: - tasks = [ - task for group in self.get_groups(enabled=enabled) for task in group.tasks - ] + tasks = [task for group in self.get_groups(enabled=enabled) for task in group.tasks] if enabled is not None: tasks = [task for task in tasks if task.enabled == enabled] @@ -174,9 +164,7 @@ def check_version(cls, data: int) -> int: @field_validator("schedule") @classmethod - def check_group_names_unique( - cls, data: list[DeadlinesGroupConfig] - ) -> list[DeadlinesGroupConfig]: + def check_group_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: groups = [group.name for group in data] duplicates = [name for name in groups if groups.count(name) > 1] if duplicates: @@ -185,9 +173,7 @@ def check_group_names_unique( @field_validator("schedule") @classmethod - def check_task_names_unique( - cls, data: list[DeadlinesGroupConfig] - ) -> list[DeadlinesGroupConfig]: + def check_task_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: tasks_names = [task.name for group in data for task in group.tasks] duplicates = [name for name in tasks_names if tasks_names.count(name) > 1] if duplicates: diff --git a/checker/course.py b/checker/course.py index c635e25..d12ab33 100644 --- a/checker/course.py +++ b/checker/course.py @@ -42,15 +42,8 @@ def __init__( self.repository_root = repository_root self.reference_root = reference_root or repository_root - self.potential_groups = { - group.name: group - for group in self._search_potential_groups(self.repository_root) - } - self.potential_tasks = { - task.name: task - for group in self.potential_groups.values() - for task in group.tasks - } + self.potential_groups = {group.name: group for group in self._search_potential_groups(self.repository_root)} + self.potential_tasks = {task.name: task for group in self.potential_groups.values() for task in group.tasks} def validate(self) -> None: # check all groups and tasks mentioned in deadlines exists @@ -62,9 +55,7 @@ def validate(self) -> None: deadlines_tasks = self.deadlines.get_tasks(enabled=True) for deadlines_task in deadlines_tasks: if deadlines_task.name not in self.potential_tasks: - raise BadConfig( - f"Task {deadlines_task.name} of not found in repository" - ) + raise BadConfig(f"Task {deadlines_task.name} of not found in repository") def get_groups( self, @@ -107,9 +98,7 @@ def _search_potential_groups(root: Path) -> list[FileSystemGroup]: try: task_config = TaskConfig.from_yaml(task_config_path) except BadConfig as e: - raise BadConfig( - f"Task config {task_config_path} is invalid:\n{e}" - ) + raise BadConfig(f"Task config {task_config_path} is invalid:\n{e}") potential_tasks.append( FileSystemTask( diff --git a/checker/exporter.py b/checker/exporter.py index 3d4f169..f44d83e 100644 --- a/checker/exporter.py +++ b/checker/exporter.py @@ -78,22 +78,19 @@ def export_public( "*", *( task_ignore - if (task_ignore := task.config.structure.public_patterns) - is not None + if (task_ignore := task.config.structure.public_patterns) is not None else global_public_patterns ), ], [ *( task_ignore - if (task_ignore := task.config.structure.private_patterns) - is not None + if (task_ignore := task.config.structure.private_patterns) is not None else global_private_patterns ), *( task_ignore - if (task_ignore := task.config.structure.ignore_patterns) - is not None + if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns ), ], @@ -134,20 +131,17 @@ def export_for_testing( [ *( task_ignore - if (task_ignore := task.config.structure.ignore_patterns) - is not None + if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns ), *( task_public - if (task_public := task.config.structure.public_patterns) - is not None + if (task_public := task.config.structure.public_patterns) is not None else global_public_patterns ), *( task_private - if (task_private := task.config.structure.private_patterns) - is not None + if (task_private := task.config.structure.private_patterns) is not None else global_private_patterns ), ], @@ -176,22 +170,19 @@ def export_for_testing( [ *( task_public - if (task_public := task.config.structure.public_patterns) - is not None + if (task_public := task.config.structure.public_patterns) is not None else global_public_patterns ), *( task_private - if (task_private := task.config.structure.private_patterns) - is not None + if (task_private := task.config.structure.private_patterns) is not None else global_private_patterns ), ], [ *( task_ignore - if (task_ignore := task.config.structure.ignore_patterns) - is not None + if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns ), ], @@ -211,9 +202,7 @@ def export_for_contribution( global_ignore_patterns = self.structure_config.ignore_patterns or [] global_public_patterns = self.structure_config.public_patterns or [] - global_private_patterns = ( # noqa: F841 - self.structure_config.private_patterns or [] - ) + global_private_patterns = self.structure_config.private_patterns or [] # noqa: F841 print("REPO") print(f"Copy files from {self.repository_root} to {target}") @@ -233,16 +222,14 @@ def export_for_contribution( [ *( task_public - if (task_public := task.config.structure.public_patterns) - is not None + if (task_public := task.config.structure.public_patterns) is not None else global_public_patterns ), ], [ *( task_ignore - if (task_ignore := task.config.structure.ignore_patterns) - is not None + if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns ), ], @@ -270,14 +257,12 @@ def export_for_contribution( [ *( task_ignore - if (task_ignore := task.config.structure.public_patterns) - is not None + if (task_ignore := task.config.structure.public_patterns) is not None else global_public_patterns ), *( task_ignore - if (task_ignore := task.config.structure.ignore_patterns) - is not None + if (task_ignore := task.config.structure.ignore_patterns) is not None else global_ignore_patterns ), ], @@ -317,9 +302,7 @@ def _copy_files_accounting_sub_rules( relative_filename = str(path.relative_to(root)) if path.is_dir(): if path in sub_rules: - print( - f" - Check Dir {path} to {destination / relative_filename} with sub rules (rec)" - ) + print(f" - Check Dir {path} to {destination / relative_filename} with sub rules (rec)") self._copy_files_accounting_sub_rules( path, destination / relative_filename, @@ -329,9 +312,7 @@ def _copy_files_accounting_sub_rules( sub_rules=sub_rules, ) else: - print( - f" - Check Dir {path} to {destination / relative_filename} (rec)" - ) + print(f" - Check Dir {path} to {destination / relative_filename} (rec)") self._copy_files_accounting_sub_rules( path, destination / relative_filename, @@ -342,9 +323,7 @@ def _copy_files_accounting_sub_rules( ) else: if any(path.match(copy_pattern) for copy_pattern in copy_patterns): - print( - f" - Copy File {path} to {destination / relative_filename}" - ) + print(f" - Copy File {path} to {destination / relative_filename}") destination.mkdir(parents=True, exist_ok=True) shutil.copyfile( path, diff --git a/checker/pipeline.py b/checker/pipeline.py index 81868d8..bc67917 100644 --- a/checker/pipeline.py +++ b/checker/pipeline.py @@ -80,9 +80,7 @@ def resolve(self, template: str | list[str] | Any, context: dict[str, Any]) -> A elif isinstance(template, list): return [self.resolve(item, context) for item in template] elif isinstance(template, dict): - return { - key: self.resolve(value, context) for key, value in template.items() - } + return {key: self.resolve(value, context) for key, value in template.items()} else: return template @@ -127,23 +125,17 @@ def validate( for pipeline_stage in self.pipeline: # validate plugin exists if pipeline_stage.run not in self.plugins: - raise BadConfig( - f"Unknown plugin {pipeline_stage.run} in pipeline stage {pipeline_stage.name}" - ) + raise BadConfig(f"Unknown plugin {pipeline_stage.run} in pipeline stage {pipeline_stage.name}") plugin_class = self.plugins[pipeline_stage.run] # validate args of the plugin (first resolve placeholders) if validate_placeholders: - resolved_args = self.parameters_resolver.resolve( - pipeline_stage.args, context - ) + resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context) plugin_class.validate(resolved_args) # validate run_if condition if validate_placeholders and pipeline_stage.run_if: - resolved_run_if = self.parameters_resolver.resolve( - pipeline_stage.run_if, context - ) + resolved_run_if = self.parameters_resolver.resolve(pipeline_stage.run_if, context) if not isinstance(resolved_run_if, bool): raise BadConfig( f"Invalid run_if condition {pipeline_stage.run_if} in pipeline stage {pipeline_stage.name}" @@ -151,9 +143,7 @@ def validate( # add output to context if set register parameter if pipeline_stage.register_output: - context.setdefault("outputs", {})[ - pipeline_stage.register_output - ] = PipelineStageResult( + context.setdefault("outputs", {})[pipeline_stage.register_output] = PipelineStageResult( name=pipeline_stage.name, failed=False, skipped=True, @@ -171,9 +161,7 @@ def run( skip_the_rest = False for pipeline_stage in self.pipeline: # resolve placeholders in arguments - resolved_args = self.parameters_resolver.resolve( - pipeline_stage.args, context=context - ) + resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context=context) resolved_run_if = ( self.parameters_resolver.resolve(pipeline_stage.run_if, context=context) if pipeline_stage.run_if is not None @@ -233,9 +221,7 @@ def run( # register output if required if pipeline_stage.register_output: - context.setdefault("outputs", {})[ - pipeline_stage.register_output - ] = pipeline_stage_results[-1] + context.setdefault("outputs", {})[pipeline_stage.register_output] = pipeline_stage_results[-1] continue @@ -245,9 +231,7 @@ def run( result = plugin.run(resolved_args, verbose=self.verbose) _end_time = time.perf_counter() print_info(result.output or "[empty output]") - print_info( - f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey" - ) + print_info(f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey") print_info("ok!", color="green") pipeline_stage_results.append( PipelineStageResult( @@ -262,9 +246,7 @@ def run( except PluginExecutionFailed as e: _end_time = time.perf_counter() print_info(e.output or "[empty output]") - print_info( - f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey" - ) + print_info(f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey") pipeline_stage_results.append( PipelineStageResult( name=pipeline_stage.name, @@ -286,15 +268,11 @@ def run( print_info("error! (ignored as fail=never)", color="red") pass else: - assert ( - False - ), f"Unknown fail type {pipeline_stage.fail}" # pragma: no cover + assert False, f"Unknown fail type {pipeline_stage.fail}" # pragma: no cover # register output if required if pipeline_stage.register_output: - context.setdefault("outputs", {})[ - pipeline_stage.register_output - ] = pipeline_stage_results[-1] + context.setdefault("outputs", {})[pipeline_stage.register_output] = pipeline_stage_results[-1] return PipelineResult( failed=not pipeline_passed, diff --git a/checker/plugins/__init__.py b/checker/plugins/__init__.py index 804e90e..ee53417 100644 --- a/checker/plugins/__init__.py +++ b/checker/plugins/__init__.py @@ -18,9 +18,7 @@ def get_all_subclasses(cls: type[PluginABC]) -> set[type[PluginABC]]: - return set(cls.__subclasses__()).union( - [s for c in cls.__subclasses__() for s in get_all_subclasses(c)] - ) + return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in get_all_subclasses(c)]) def load_plugins( diff --git a/checker/plugins/aggregate.py b/checker/plugins/aggregate.py index 2f8ccde..c780ad3 100644 --- a/checker/plugins/aggregate.py +++ b/checker/plugins/aggregate.py @@ -13,9 +13,7 @@ class AggregatePlugin(PluginABC): class Args(PluginABC.Args): scores: list[float] - weights: Union[ - list[float], None - ] = None # as pydantic does not support | in older python versions + weights: Union[list[float], None] = None # as pydantic does not support | in older python versions strategy: Literal["mean", "sum", "min", "max", "product"] = "mean" # TODO: validate for weights: len weights should be equal to len scores # TODO: validate not empty scores @@ -35,9 +33,7 @@ def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: i output=f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero", ) - weighted_scores = [ - score * weight for score, weight in zip(args.scores, weights) - ] + weighted_scores = [score * weight for score, weight in zip(args.scores, weights)] if args.strategy == "mean": score = sum(weighted_scores) / len(weighted_scores) diff --git a/checker/plugins/manytask.py b/checker/plugins/manytask.py index 960e1c1..53f3bc6 100644 --- a/checker/plugins/manytask.py +++ b/checker/plugins/manytask.py @@ -24,9 +24,7 @@ class ManytaskPlugin(PluginABC): name = "report_score_manytask" class Args(PluginABC.Args): - origin: Optional[ - str - ] = None # as pydantic does not support | in older python versions + origin: Optional[str] = None # as pydantic does not support | in older python versions patterns: list[str] = ["*"] username: str task_name: str @@ -40,9 +38,7 @@ def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: i output: list[str] = [] if not args.send_time.tzinfo: - output.append( - "Warning: No timezone provided for send_time, possible time miscalculations" - ) + output.append("Warning: No timezone provided for send_time, possible time miscalculations") send_time_formatted = args.send_time.strftime(self.DEFAULT_TIME_FORMAT) # Do not expose token in logs. @@ -80,9 +76,7 @@ def _post_with_retries( data: dict[str, Any], files: dict[str, tuple[str, IO[bytes]]] | None, ) -> requests.Response: - retry_strategy = urllib3.Retry( - total=3, backoff_factor=1, status_forcelist=[408, 500, 502, 503, 504] - ) + retry_strategy = urllib3.Retry(total=3, backoff_factor=1, status_forcelist=[408, 500, 502, 503, 504]) adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy) session = requests.Session() session.mount("https://", adapter) @@ -95,9 +89,7 @@ def _post_with_retries( return response @staticmethod - def _collect_files_to_send( - origin: str, patterns: list[str] - ) -> dict[str, tuple[str, IO[bytes]]]: + def _collect_files_to_send(origin: str, patterns: list[str]) -> dict[str, tuple[str, IO[bytes]]]: source_dir = Path(origin) return { path.name: (str(path.relative_to(source_dir)), open(path, "rb")) diff --git a/checker/plugins/scripts.py b/checker/plugins/scripts.py index b33fc04..1ec4814 100644 --- a/checker/plugins/scripts.py +++ b/checker/plugins/scripts.py @@ -15,12 +15,8 @@ class RunScriptPlugin(PluginABC): class Args(PluginABC.Args): origin: str - script: Union[ - str, list[str] - ] # as pydantic does not support | in older python versions - timeout: Union[ - float, None - ] = None # as pydantic does not support | in older python versions + script: Union[str, list[str]] # as pydantic does not support | in older python versions + timeout: Union[float, None] = None # as pydantic does not support | in older python versions isolate: bool = False env_whitelist: list[str] = Field(default_factory=lambda: ["PATH"]) diff --git a/checker/tester.py b/checker/tester.py index 7c08569..5c03245 100644 --- a/checker/tester.py +++ b/checker/tester.py @@ -69,15 +69,9 @@ def __init__( self.plugins = load_plugins(self.testing_config.search_plugins, verbose=verbose) - self.global_pipeline = PipelineRunner( - self.testing_config.global_pipeline, self.plugins, verbose=verbose - ) - self.task_pipeline = PipelineRunner( - self.testing_config.tasks_pipeline, self.plugins, verbose=verbose - ) - self.report_pipeline = PipelineRunner( - self.testing_config.report_pipeline, self.plugins, verbose=verbose - ) + self.global_pipeline = PipelineRunner(self.testing_config.global_pipeline, self.plugins, verbose=verbose) + self.task_pipeline = PipelineRunner(self.testing_config.tasks_pipeline, self.plugins, verbose=verbose) + self.report_pipeline = PipelineRunner(self.testing_config.report_pipeline, self.plugins, verbose=verbose) self.repository_dir = self.course.repository_root self.reference_dir = self.course.reference_root @@ -119,8 +113,7 @@ def _get_context( "global": global_variables, "task": task_variables, "outputs": outputs, - "parameters": default_parameters.__dict__ - | (task_parameters.__dict__ if task_parameters else {}), + "parameters": default_parameters.__dict__ | (task_parameters.__dict__ if task_parameters else {}), "env": os.environ.__dict__, } @@ -134,9 +127,7 @@ def validate(self) -> None: # validate global pipeline (only default params and variables available) print("- global pipeline...") global_variables = self._get_global_pipeline_parameters(Path(), tasks) - context = self._get_context( - global_variables, None, outputs, self.default_params, None - ) + context = self._get_context(global_variables, None, outputs, self.default_params, None) self.global_pipeline.validate(context, validate_placeholders=True) print(" ok") @@ -176,12 +167,8 @@ def run( # run global pipeline print_header_info("Run global pipeline:", color="pink") global_variables = self._get_global_pipeline_parameters(origin, tasks) - context = self._get_context( - global_variables, None, outputs, self.default_params, None - ) - global_pipeline_result: PipelineResult = self.global_pipeline.run( - context, dry_run=self.dry_run - ) + context = self._get_context(global_variables, None, outputs, self.default_params, None) + global_pipeline_result: PipelineResult = self.global_pipeline.run(context, dry_run=self.dry_run) print_separator("-") print_info(str(global_pipeline_result), color="pink") @@ -204,9 +191,7 @@ def run( ) # TODO: read pipeline from task config if any - task_pipeline_result: PipelineResult = self.task_pipeline.run( - context, dry_run=self.dry_run - ) + task_pipeline_result: PipelineResult = self.task_pipeline.run(context, dry_run=self.dry_run) print_separator("-") print_info(str(task_pipeline_result), color="pink") @@ -216,9 +201,7 @@ def run( if task_pipeline_result: print_info(f"Reporting <{task.name}> task tests:", color="pink") if report: - task_report_result: PipelineResult = self.report_pipeline.run( - context, dry_run=self.dry_run - ) + task_report_result: PipelineResult = self.report_pipeline.run(context, dry_run=self.dry_run) if task_report_result: print_info("->Reporting succeeded") else: diff --git a/pyproject.toml b/pyproject.toml index 03dbc5b..58d45b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -169,3 +169,7 @@ lines_after_imports = 2 combine_as_imports = true default_section = "THIRDPARTY" known_first_party = "checker,tests" + +[tool.black] +line-length = 120 +target-version = ['py312'] diff --git a/tests/plugins/test_aggregate.py b/tests/plugins/test_aggregate.py index 8236650..7944af4 100644 --- a/tests/plugins/test_aggregate.py +++ b/tests/plugins/test_aggregate.py @@ -29,9 +29,7 @@ class TestAggregatePlugin: ({}, ValidationError), ], ) - def test_plugin_args( - self, parameters: dict[str, Any], expected_exception: Exception | None - ) -> None: + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: if expected_exception: with pytest.raises(expected_exception): AggregatePlugin.Args(**parameters) diff --git a/tests/plugins/test_manytask.py b/tests/plugins/test_manytask.py index a7c76cc..2eaf465 100644 --- a/tests/plugins/test_manytask.py +++ b/tests/plugins/test_manytask.py @@ -103,9 +103,7 @@ def get_default_full_args_dict() -> dict[str, Any]: ({"report_url": "invalidurl"}, ValidationError), ], ) - def test_plugin_args( - self, parameters: dict[str, Any], expected_exception: Type[BaseException] | None - ) -> None: + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Type[BaseException] | None) -> None: args = self.get_default_args_dict() args.update(parameters) if expected_exception: @@ -124,9 +122,7 @@ def test_date_without_timezone_throws_warning(self) -> None: args["send_time"] = self.TEST_NOW_DATETIME.replace(tzinfo=None) with Mocker() as mocker: - mocker.post( - f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}' - ) + mocker.post(f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}') output = plugin.run(args) @@ -138,9 +134,7 @@ def test_date_with_timezone_doesnt_throw_warning(self) -> None: args["send_time"] = self.TEST_NOW_DATETIME.astimezone() with Mocker() as mocker: - mocker.post( - f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}' - ) + mocker.post(f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}') output = plugin.run(args) @@ -177,9 +171,7 @@ def test_collect_files_to_send( assert result is not None, "Didn't collect files" assert len(result) == taken_files_num, "Wrong file quantity are collected" - assert sorted(result.keys()) == sorted( - expected_filenames - ), "Wrong files are collected" + assert sorted(result.keys()) == sorted(expected_filenames), "Wrong files are collected" if taken_files_num: open.assert_called_with(mocker.ANY, "rb") # type: ignore[attr-defined] @@ -207,19 +199,11 @@ def test_post_with_retries( if expected_exception: with pytest.raises(expected_exception) as exc: - ManytaskPlugin._post_with_retries( - self.BASE_URL, {"key": "value"}, None - ) - assert str(response_status_code) in str( - exc.value - ), "Status code wasn't provided in exception message" - assert response_text in str( - exc.value - ), "Error text wasn't provided in exception message" + ManytaskPlugin._post_with_retries(self.BASE_URL, {"key": "value"}, None) + assert str(response_status_code) in str(exc.value), "Status code wasn't provided in exception message" + assert response_text in str(exc.value), "Error text wasn't provided in exception message" else: - result = ManytaskPlugin._post_with_retries( - self.BASE_URL, {"key": "value"}, None - ) + result = ManytaskPlugin._post_with_retries(self.BASE_URL, {"key": "value"}, None) assert result.status_code == 200 assert result.text == "Success" diff --git a/tests/plugins/test_regex.py b/tests/plugins/test_regex.py index f6a65f5..5226df5 100644 --- a/tests/plugins/test_regex.py +++ b/tests/plugins/test_regex.py @@ -45,9 +45,7 @@ class TestCheckRegexpsPlugin: ), ], ) - def test_plugin_args( - self, parameters: dict[str, Any], expected_exception: Exception | None - ) -> None: + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: if expected_exception: with pytest.raises(expected_exception): CheckRegexpsPlugin.Args(**parameters) @@ -82,9 +80,7 @@ def test_pattern_matching( regexps = ["forbidden"] plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args( - origin=str(origin), patterns=patterns, regexps=regexps - ) + args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) if expected_exception: with pytest.raises(expected_exception): @@ -119,9 +115,7 @@ def test_check_regexps( patterns = ["*"] plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args( - origin=str(origin), patterns=patterns, regexps=regexps - ) + args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) if expected_exception: with pytest.raises(expected_exception) as exc_info: @@ -129,18 +123,12 @@ def test_check_regexps( assert "matches regexp" in str(exc_info.value) else: assert plugin._run(args).output == "No forbidden regexps found" - assert ( - plugin._run(args, verbose=True).output == "No forbidden regexps found" - ) - assert ( - plugin._run(args, verbose=False).output == "No forbidden regexps found" - ) + assert plugin._run(args, verbose=True).output == "No forbidden regexps found" + assert plugin._run(args, verbose=False).output == "No forbidden regexps found" def test_non_existent_origin(self) -> None: plugin = CheckRegexpsPlugin() - args = CheckRegexpsPlugin.Args( - origin="/tmp/non_existent", patterns=["*.txt"], regexps=["forbidden"] - ) + args = CheckRegexpsPlugin.Args(origin="/tmp/non_existent", patterns=["*.txt"], regexps=["forbidden"]) with pytest.raises(PluginExecutionFailed) as exc_info: plugin._run(args) diff --git a/tests/plugins/test_scripts.py b/tests/plugins/test_scripts.py index 876b1cc..e9ab017 100644 --- a/tests/plugins/test_scripts.py +++ b/tests/plugins/test_scripts.py @@ -35,9 +35,7 @@ class TestRunScriptPlugin: ), ], ) - def test_plugin_args( - self, parameters: dict[str, Any], expected_exception: Exception | None - ) -> None: + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: if expected_exception: with pytest.raises(expected_exception): RunScriptPlugin.Args(**parameters) @@ -54,9 +52,7 @@ def test_plugin_args( ("echo Hello && false", "Hello", PluginExecutionFailed), ], ) - def test_simple_cases( - self, script: str, output: str, expected_exception: Exception | None - ) -> None: + def test_simple_cases(self, script: str, output: str, expected_exception: Exception | None) -> None: plugin = RunScriptPlugin() args = RunScriptPlugin.Args(origin="/tmp", script=script) @@ -77,9 +73,7 @@ def test_simple_cases( ("sleep 1", 0.5, PluginExecutionFailed), ], ) - def test_timeout( - self, script: str, timeout: float, expected_exception: Exception | None - ) -> None: + def test_timeout(self, script: str, timeout: float, expected_exception: Exception | None) -> None: # TODO: check if timeout float plugin = RunScriptPlugin() args = RunScriptPlugin.Args(origin="/tmp", script=script, timeout=timeout) @@ -101,9 +95,7 @@ def test_run_with_environment_variable( self, script: str, env_whitelist: list[str], mocked_env: dict[str, str] ) -> None: plugin = RunScriptPlugin() - args = RunScriptPlugin.Args( - origin="/tmp", script=script, env_whitelist=env_whitelist - ) + args = RunScriptPlugin.Args(origin="/tmp", script=script, env_whitelist=env_whitelist) with patch.dict("os.environ", mocked_env, clear=True): result = plugin._run(args) diff --git a/tests/test_course.py b/tests/test_course.py index 7ae0000..b20fc3d 100644 --- a/tests/test_course.py +++ b/tests/test_course.py @@ -92,16 +92,12 @@ def repository_root(tmp_path: Path) -> Path: class TestCourse: def test_init(self, repository_root: Path) -> None: - test_course = Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ) + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) assert test_course.repository_root == repository_root assert test_course.deadlines == TEST_DEADLINES_CONFIG def test_validate(self, repository_root: Path) -> None: - test_course = Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ) + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) try: test_course.validate() @@ -111,35 +107,23 @@ def test_validate(self, repository_root: Path) -> None: def test_validate_with_no_group(self, repository_root: Path) -> None: shutil.rmtree(repository_root / "group1") with pytest.raises(BadConfig): - Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ).validate() + Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root).validate() def test_validate_with_no_task(self, repository_root: Path) -> None: shutil.rmtree(repository_root / "group1" / "task1_1") with pytest.raises(BadConfig): - Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ).validate() + Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root).validate() def test_init_task_with_bad_config(self, repository_root: Path) -> None: - with open( - repository_root / "group1" / "task1_1" / Course.TASK_CONFIG_NAME, "w" - ) as f: + with open(repository_root / "group1" / "task1_1" / Course.TASK_CONFIG_NAME, "w") as f: f.write("bad_config") with pytest.raises(BadConfig): Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) - @pytest.mark.parametrize( - "enabled, expected_num_groups", [(None, 4), (True, 3), (False, 1)] - ) - def test_get_groups( - self, enabled: bool | None, expected_num_groups, repository_root: Path - ) -> None: - test_course = Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ) + @pytest.mark.parametrize("enabled, expected_num_groups", [(None, 4), (True, 3), (False, 1)]) + def test_get_groups(self, enabled: bool | None, expected_num_groups, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) groups = test_course.get_groups(enabled=enabled) assert isinstance(groups, list) @@ -150,12 +134,8 @@ def test_get_groups( "enabled, expected_num_tasks", [(None, 6), (True, 3), pytest.param(False, 3, marks=pytest.mark.xfail())], ) - def test_get_tasks( - self, enabled: bool | None, expected_num_tasks, repository_root: Path - ) -> None: - test_course = Course( - deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root - ) + def test_get_tasks(self, enabled: bool | None, expected_num_tasks, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) tasks = test_course.get_tasks(enabled=enabled) assert isinstance(tasks, list) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index d009fab..053a920 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -31,9 +31,7 @@ def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: percentage=args.score, ) else: - return PluginOutput( - output=f"Score: {args.score:.2f}", percentage=args.score - ) + return PluginOutput(output=f"Score: {args.score:.2f}", percentage=args.score) class _EchoPlugin(PluginABC): @@ -149,9 +147,7 @@ def test_unknown_plugin(self, sample_plugins: dict[str, Type[PluginABC]]) -> Non ) assert "Unknown plugin" in str(exc_info.value) - def test_validate_placeholders( - self, sample_correct_pipeline: list[PipelineStageConfig] - ) -> None: + def test_validate_placeholders(self, sample_correct_pipeline: list[PipelineStageConfig]) -> None: with pytest.raises(BadConfig) as exc_info: _ = PipelineRunner( pipeline=sample_correct_pipeline, @@ -200,9 +196,7 @@ def test_invalid_register_output( verbose=False, ) with pytest.raises(BadConfig) as exc_info: - pipeline_runner.validate( - {"message": "some valid message"}, validate_placeholders=True - ) + pipeline_runner.validate({"message": "some valid message"}, validate_placeholders=True) assert "Invalid template" in str(exc_info.value) def test_run_correct_pipeline_verbose( diff --git a/tests/test_resolver.py b/tests/test_resolver.py index 2f56c7e..29ecbf5 100644 --- a/tests/test_resolver.py +++ b/tests/test_resolver.py @@ -14,9 +14,7 @@ class TestParametersResolver: "template, context, expected", [ ("${{ a }}", {"a": 2}, 2), - pytest.param( - "${{ b }}", {"b": "2"}, "2", marks=pytest.mark.xfail() - ), # TODO: check why returned as int + pytest.param("${{ b }}", {"b": "2"}, "2", marks=pytest.mark.xfail()), # TODO: check why returned as int ("${{ c }}", {"c": [1, 2, "4"]}, [1, 2, "4"]), (" ${{ d }}", {"d": 2}, 2), ("${{ e }} ", {"e": 2}, 2), @@ -26,9 +24,7 @@ class TestParametersResolver: ("${{ i }}", {"i": 2.0}, 2.0), ], ) - def test_keep_native_type( - self, template: str, context: dict[str, Any], expected: Any - ) -> None: + def test_keep_native_type(self, template: str, context: dict[str, Any], expected: Any) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected @@ -41,9 +37,7 @@ def test_keep_native_type( ("${{ a }}", {"a": 2, "b": 3}, 2), ], ) - def test_string_input( - self, template: str, context: dict[str, Any], expected: Any - ) -> None: + def test_string_input(self, template: str, context: dict[str, Any], expected: Any) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected @@ -59,9 +53,7 @@ def test_string_input( ), ], ) - def test_list_input( - self, template: list[Any], context: dict[str, Any], expected: list[Any] - ) -> None: + def test_list_input(self, template: list[Any], context: dict[str, Any], expected: list[Any]) -> None: resolver = ParametersResolver() assert resolver.resolve(template, context) == expected @@ -121,13 +113,9 @@ def test_non_template(self, template: Any, context: dict[str, Any]) -> None: {"valid_var": {"valid_field": 1}}, marks=pytest.mark.xfail(), ), - pytest.param( - "${{ not_existing }} ${{ a }}", {"a": 2}, marks=pytest.mark.xfail() - ), + pytest.param("${{ not_existing }} ${{ a }}", {"a": 2}, marks=pytest.mark.xfail()), pytest.param("${{ not_existing }}", {"a": 2}, marks=pytest.mark.xfail()), - pytest.param( - "invalid_syntax }}", {"invalid_syntax": 2}, marks=pytest.mark.xfail() - ), + pytest.param("invalid_syntax }}", {"invalid_syntax": 2}, marks=pytest.mark.xfail()), ], ) def test_invalid_template(self, template: Any, context: dict[str, Any]) -> None: