diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 3e9b812..9ba34cc 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -1,15 +1,19 @@ name: Test behavex in latest python versions - on: [push, pull_request] jobs: test-behavex: - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} + + env: + PYTHONIOENCODING: utf-8 + PYTHONLEGACYWINDOWSSTDIO: utf-8 strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11'] + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout repository @@ -20,11 +24,29 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Install setuptools + run: | + python -m pip install 'setuptools>=61' + - name: Install behavex run: | python -m pip install --upgrade pip python setup.py sdist - pip install ./dist/*.tar.gz + pip install dist/behavex-4.0.9.tar.gz - name: Verify behavex command - run: behavex ./tests/features/*.feature + shell: bash + run: | + if [ "$RUNNER_OS" == "Windows" ]; then + dir + behavex .\\tests\\features\\*.feature + else + behavex ./tests/features/*.feature + fi + + # Set report.json to be available as an artifact for debugging + - name: Set report.json to be available as an artifact + uses: actions/upload-artifact@v4 + with: + name: report-${{ matrix.os }}-py${{ matrix.python-version }} + path: /home/runner/work/behavex/behavex/output/report.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eb46272..e7d6068 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: hooks: - id: bandit exclude: tests - args: [--skip, "B322"] # ignore assert_used + args: [--skip, "B322,B110,B605,B607"] # ignore assert_used and B110 - repo: https://github.com/pre-commit/mirrors-isort rev: v5.6.4 diff --git a/CHANGES.rst b/CHANGES.rst index d26efcf..8011904 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,21 @@ Version History =============================================================================== +Version: 4.0.9 +------------------------------------------------------------------------------- + +ENHANCEMENTS: + +* Adding support for latest Python versions (3.12) +* Performing cross-platform validations as part of github actions workflow (Linux, Windows and MacOS) +* Enabling adding scenario lines in feature paths when running BehaveX +* Fixing issue when performing dry runs, as internal @BHX_MANUAL_DRY_RUN tag was not removed from the scenario tags + +FIXES: + +* Fixing execution issues in Windows OS when running BehaveX with a feature path different than the current path. +* Fixing encoding issues in progress bar in Windows OS + Version: 4.0.8 ------------------------------------------------------------------------------- ENHANCEMENTS: diff --git a/MANIFEST.in b/MANIFEST.in index d1f134f..80e9325 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include LICENSE include README.md recursive-include behavex *.* -prune behavex\tests/ +prune behavex\tests diff --git a/behavex/outputs/report_html.py b/behavex/outputs/report_html.py index 8acac91..7eb87b2 100644 --- a/behavex/outputs/report_html.py +++ b/behavex/outputs/report_html.py @@ -19,7 +19,7 @@ from behavex.outputs.report_utils import (gather_steps_with_definition, get_environment_details, get_save_function, - try_operate_descriptor) + retry_file_operation) def generate_report(output, joined=None, report=None): @@ -47,7 +47,7 @@ def _create_manifest(relative, page): os.makedirs(folder) file_manifest = os.path.join(folder, page.replace('html', 'manifest')) - try_operate_descriptor( + retry_file_operation( file_manifest, execution=get_save_function(file_manifest, output_text) ) @@ -80,7 +80,7 @@ def _create_files_report(content_to_file): # pylint: disable= W0703 except Exception as ex: print(ex) - try_operate_descriptor(path_file, get_save_function(path_file, content)) + retry_file_operation(path_file, get_save_function(path_file, content)) def get_metrics_variables(scenarios): diff --git a/behavex/outputs/report_json.py b/behavex/outputs/report_json.py index f8673d0..5b36a56 100644 --- a/behavex/outputs/report_json.py +++ b/behavex/outputs/report_json.py @@ -22,13 +22,13 @@ from behave.model import ScenarioOutline from behave.step_registry import registry -from behavex.conf_mgr import get_env +from behavex.conf_mgr import get_env, get_param from behavex.global_vars import global_vars from behavex.outputs.report_utils import (get_environment_details, get_error_message, match_for_execution, text) -from behavex.utils import (generate_hash, get_scenario_tags, - try_operate_descriptor) +from behavex.utils import (generate_hash, generate_uuid, get_scenario_tags, + retry_file_operation) def add_step_info(step, parent_node): @@ -105,7 +105,7 @@ def generate_json_report(feature_list): def write_json(): file_info.write(json.dumps(output)) - try_operate_descriptor(path_info, execution=write_json) + retry_file_operation(path_info, execution=write_json) if multiprocessing.current_process().name != 'MainProcess': return output except IOError: @@ -150,13 +150,17 @@ def _processing_background_feature(feature): def _processing_scenarios(scenarios, scenario_list, id_feature): scenario_outline_index = 0 overall_status = 'passed' + is_dry_run = get_param('dry_run') for scenario in scenarios: + # Remove BHX_MANUAL_DRY_RUN tag if it is a dry run + scenario_tags = get_scenario_tags(scenario) + if is_dry_run and 'BHX_MANUAL_DRY_RUN' in scenario_tags: + scenario.tags.remove('BHX_MANUAL_DRY_RUN') # Set MANUAL to False in order filter regardless of it error_msg, error_lines, error_step, error_background = _get_error_scenario( scenario ) # pylint: disable=W0123 - scenario_tags = get_scenario_tags(scenario) if match_for_execution(scenario_tags): # Scenario was selectable scenario_info = {} @@ -181,8 +185,7 @@ def _processing_scenarios(scenarios, scenario_list, id_feature): scenario_info['error_lines'] = error_lines scenario_info['error_step'] = error_step scenario_info['error_background'] = error_background - scenario_info['id_hash'] = generate_hash("{}:{}".format(scenario.filename, - scenario.line)) + scenario_info['id_hash'] = generate_uuid() if scenario.feature.name in global_vars.retried_scenarios: if ( scenario.name diff --git a/behavex/outputs/report_utils.py b/behavex/outputs/report_utils.py index 234b276..a482d63 100644 --- a/behavex/outputs/report_utils.py +++ b/behavex/outputs/report_utils.py @@ -190,7 +190,23 @@ def resolving_type(step, scenario, background=True): return step['step_type'].title() -def try_operate_descriptor(dest_path, execution, return_value=False): +def retry_file_operation(dest_path, execution, return_value=False): + """Executes a file operation with retry logic for handling file access conflicts. + + This function attempts to execute a file operation (like copy, delete, etc.) and + provides an interactive retry mechanism if the files are locked by another process. + + Args: + dest_path (str): The file system path where the operation will be performed + operation (callable): A function containing the file operation to execute + return_value (bool, optional): Whether to return the result of the operation. Defaults to False. + + Returns: + Any: The result of the operation if return_value is True, otherwise None + + Example: + >>> retry_file_operation('/path/to/dir', lambda: shutil.rmtree('/path/to/dir')) + """ passed = False following = True retry_number = 1 @@ -290,8 +306,8 @@ def copy_bootstrap_html_generator(output): bootstrap_path = ['outputs', 'bootstrap'] bootstrap_path = os.path.join(global_vars.execution_path, *bootstrap_path) if os.path.exists(dest_path): - try_operate_descriptor(dest_path, lambda: shutil.rmtree(dest_path)) - try_operate_descriptor( + retry_file_operation(dest_path, lambda: shutil.rmtree(dest_path)) + retry_file_operation( dest_path, lambda: shutil.copytree(bootstrap_path, dest_path) ) @@ -451,5 +467,5 @@ def get_environment_details(): environment_details = environment_details_raw_data.split(',') if environment_details_raw_data else [] return environment_details -def strip_ansi_codes(from_str: str): +def strip_ansi_codes(from_str: str): return re.sub(r'\x1B\[[0-?9;]*[mGJK]', '', from_str) diff --git a/behavex/outputs/report_xml.py b/behavex/outputs/report_xml.py index df095f8..4ca51c5 100644 --- a/behavex/outputs/report_xml.py +++ b/behavex/outputs/report_xml.py @@ -16,8 +16,8 @@ from behavex.global_vars import global_vars from behavex.outputs.jinja_mgr import TemplateHandler from behavex.outputs.report_utils import (get_save_function, - match_for_execution, text, - try_operate_descriptor) + match_for_execution, + retry_file_operation, text) from behavex.utils import get_scenario_tags @@ -101,7 +101,7 @@ def get_status(scenario_): junit_path = os.path.join(get_env('OUTPUT'), 'behave') path_output = os.path.join(junit_path, u'TESTS-' + name + u'.xml') - try_operate_descriptor( + retry_file_operation( path_output + '.xml', get_save_function(path_output, output_text) ) diff --git a/behavex/progress_bar.py b/behavex/progress_bar.py index 08258b4..58492f1 100644 --- a/behavex/progress_bar.py +++ b/behavex/progress_bar.py @@ -1,17 +1,34 @@ +import platform import sys import time from behavex.global_vars import global_vars +def get_progress_chars(): + """Get appropriate progress bar characters based on platform.""" + if platform.system() == 'Windows': + return { + 'bar': '#', + 'edge': '|', + 'empty': '-' + } + return { + 'bar': '█', + 'edge': '|', + 'empty': '-' + } + + class ProgressBar: - def __init__(self, prefix, total, bar_length=15, print_updates_in_new_lines=False): + def __init__(self, prefix, total, print_updates_in_new_lines=False): self.prefix = prefix self.total = total - self.bar_length = bar_length + self.print_updates_in_new_lines = print_updates_in_new_lines + self.progress_chars = get_progress_chars() + self.bar_length = 15 self.current_iteration = 0 self.start_time = global_vars.execution_start_time - self.print_in_new_lines = print_updates_in_new_lines def start(self, start_increment=0): self.current_iteration = start_increment @@ -34,11 +51,11 @@ def _print_progress_bar(self, new_line=False): else: percent = 100 * float(self.current_iteration / float(self.total)) filled_length = int(self.bar_length * self.current_iteration // self.total) - bar = '█' * filled_length + '-' * (self.bar_length - filled_length) + bar = self.progress_chars['bar'] * filled_length + self.progress_chars['empty'] * (self.bar_length - filled_length) elapsed_time = global_vars.execution_elapsed_time elapsed_formatted = time.strftime("%M:%S", time.gmtime(elapsed_time)) progress_bar_content = f"\r{prefix}{percent:.0f}%|{bar}| {self.current_iteration}/{self.total} [{elapsed_formatted}]\r" - if self.print_in_new_lines or new_line or percent == 100: + if self.print_updates_in_new_lines or new_line or percent == 100: print(progress_bar_content) else: sys.stdout.write(progress_bar_content) diff --git a/behavex/runner.py b/behavex/runner.py index a5ef17d..ce32ca6 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -44,14 +44,15 @@ from behavex.outputs.report_json import generate_execution_info from behavex.outputs.report_utils import (get_overall_status, match_for_execution, - pretty_print_time, text, - try_operate_descriptor) + pretty_print_time, + retry_file_operation, text) from behavex.progress_bar import ProgressBar from behavex.utils import (IncludeNameMatch, IncludePathsMatch, MatchInclude, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_execution_complete_callback_function, - explore_features, generate_hash, generate_reports, + expand_paths, explore_features, generate_hash, + generate_reports, get_feature_and_scenario_line, get_json_results, get_logging_level, get_scenario_tags, get_scenarios_instances, get_text, join_feature_reports, @@ -86,7 +87,6 @@ def main(): # force exit sys.exit(exit_code) - def run(args): """Run BehaveX with the given arguments. @@ -110,24 +110,19 @@ def run(args): if execution_code == EXIT_ERROR: return EXIT_ERROR else: + # Handle paths parameter if len(get_param('paths')) > 0: - for path in get_param('paths'): - if not os.path.exists(path): - print('\nSpecified path was not found: {}'.format(path)) - exit() - paths = ",".join(get_param('paths')) + paths = ",".join(expand_paths(get_param('paths'))) os.environ['FEATURES_PATH'] = paths + + # Handle include_paths parameter if len(get_param('include_paths')) > 0: - for path in get_param('paths'): - if not os.path.exists(path): - print('\nSpecified path was not found: {}'.format(path)) - exit() - paths = ",".join(get_param('include_paths')) + include_paths = ",".join(expand_paths(get_param('include_paths'))) features_path = os.environ.get('FEATURES_PATH') - if features_path == '' or features_path is None: - os.environ['FEATURES_PATH'] = paths + if features_path == '' or features_path is None or not os.path.exists(features_path): + os.environ['FEATURES_PATH'] = include_paths else: - os.environ['FEATURES_PATH'] = features_path + ',' + paths + os.environ['FEATURES_PATH'] = features_path + ',' + include_paths features_path = os.environ.get('FEATURES_PATH') if features_path == '' or features_path is None: os.environ['FEATURES_PATH'] = 'features' @@ -386,19 +381,20 @@ def create_scenario_line_references(features): for scenario in scenarios: scenario_filename = text(scenario.filename) if global_vars.rerun_failures or ".feature:" in feature_path: - feature_without_scenario_line = feature_path.split(":")[0] - if feature_without_scenario_line not in updated_features: - updated_features[feature_without_scenario_line] = [] + if feature_path not in updated_features: + updated_features[feature_path] = [] if isinstance(scenario, ScenarioOutline): for scenario_outline_instance in scenario.scenarios: - if scenario_outline_instance.line == int(feature_path.split(":")[1]): - if scenario_outline_instance not in updated_features[feature_without_scenario_line]: - updated_features[feature_without_scenario_line].append(scenario_outline_instance) + outline_scenario_line = get_feature_and_scenario_line(scenario_outline_instance.name)[1] + if outline_scenario_line and scenario_outline_instance.line == int(outline_scenario_line): + if scenario_outline_instance not in updated_features[pure_feature_path]: + updated_features[feature_path].append(scenario_outline_instance) break else: - if scenario.line == int(feature_path.split(":")[1]): - if scenario not in updated_features[feature_without_scenario_line]: - updated_features[feature_without_scenario_line].append(scenario) + scenario_line = get_feature_and_scenario_line(feature_path)[1] + if scenario_line and scenario.line == int(scenario_line): + if scenario not in updated_features[feature_path]: + updated_features[feature_path].append(scenario) else: updated_features_path = scenario.feature.filename if updated_features_path not in updated_features: @@ -434,11 +430,13 @@ def launch_by_feature(features, parallel_features = [] for features_path in features: feature = features[features_path][0].feature + pure_feature_path, scenario_line = get_feature_and_scenario_line(features_path) + feature_filename = feature.filename if not scenario_line else "{}:{}".format(pure_feature_path, scenario_line) if 'SERIAL' in feature.tags: - serial_features.append({"feature_filename": feature.filename, + serial_features.append({"feature_filename": feature_filename, "feature_json_skeleton": _get_feature_json_skeleton(feature)}) else: - parallel_features.append({"feature_filename": feature.filename, + parallel_features.append({"feature_filename": feature_filename, "feature_json_skeleton": _get_feature_json_skeleton(feature)}) if show_progress_bar: total_features = len(serial_features) + len(parallel_features) @@ -723,16 +721,17 @@ def _launch_behave(behave_args): Returns: tuple: Execution code and whether to generate a report. """ - # Save tags configuration to report only selected scenarios - # Check for tags in config file generate_report = True + execution_code = 0 + stdout_file = None + try: stdout_file = behave_args[behave_args.index('--outfile') + 1] execution_code = behave_script.main(behave_args) if not os.path.exists(stdout_file): - # Code 2 means the execution crashed and test was not properly executed execution_code = 2 generate_report = True + except KeyboardInterrupt: execution_code = 1 generate_report = False @@ -745,10 +744,27 @@ def _launch_behave(behave_args): except: execution_code = 2 generate_report = True - if os.path.exists(stdout_file): - with open(os.path.join(get_env('OUTPUT'), 'merged_behave_outputs.log'), 'a+') as behave_log_file: - behave_log_file.write(open(stdout_file, 'r').read()) - os.remove(stdout_file) + + if stdout_file and os.path.exists(stdout_file): + def _merge_and_remove(): + with open(os.path.join(get_env('OUTPUT'), 'merged_behave_outputs.log'), 'a+') as behave_log_file: + with open(stdout_file, 'r') as source_file: + behave_log_file.write(source_file.read()) + # nosec B110 + try: + if not source_file.closed: + os.close(source_file.fileno()) + if os.path.exists(stdout_file): + os.remove(stdout_file) + except: + pass + try: + retry_file_operation(stdout_file, _merge_and_remove) + except Exception as remove_ex: + logging.warning(f"Could not remove stdout file {stdout_file}: {remove_ex}") + # Don't fail the execution if we can't remove the file + pass + return execution_code, generate_report @@ -843,9 +859,7 @@ def remove_temporary_files(parallel_processes, json_reports): json_reports = [json_reports] for json_report in json_reports: if 'features' in json_report and json_report['features']: - feature_name = os.path.join( - get_env('OUTPUT'), u'{}.tmp'.format(json_report['features'][0]['name']) - ) + feature_name = os.path.join(get_env('OUTPUT'), u'{}.tmp'.format(json_report['features'][0]['name'])) if os.path.exists(feature_name): try: os.remove(feature_name) @@ -1173,7 +1187,7 @@ def _load_json(): json_output = {'environment': [], 'features': [], 'steps_definition': []} if os.path.exists(path_info): - json_output = try_operate_descriptor(path_info, _load_json, return_value=True) + json_output = retry_file_operation(path_info, _load_json, return_value=True) return json_output diff --git a/behavex/utils.py b/behavex/utils.py index 6c8d4dc..32af33d 100644 --- a/behavex/utils.py +++ b/behavex/utils.py @@ -9,6 +9,7 @@ import codecs import functools +import glob import hashlib import json import logging @@ -18,6 +19,7 @@ import shutil import sys import time +import uuid from functools import reduce from tempfile import gettempdir @@ -32,7 +34,7 @@ from behavex.outputs.output_strings import TEXTS from behavex.outputs.report_utils import (get_save_function, get_string_hash, match_for_execution, - try_operate_descriptor) + retry_file_operation) LOGGING_CFG = ConfigObj(os.path.join(global_vars.execution_path, 'conf_logging.cfg')) LOGGING_LEVELS = { @@ -176,24 +178,37 @@ def join_scenario_reports(json_reports): def explore_features(features_path, features_list=None): if features_list is None: features_list = [] - if global_vars.rerun_failures or ".feature:" in features_path: - features_path = features_path.split(":")[0] - if os.path.isfile(features_path): - if features_path.endswith('.feature'): - path_feature = os.path.abspath(features_path) - feature = should_feature_be_run(path_feature) + + # Normalize path separators + pure_feature_path, scenario_line = get_feature_and_scenario_line(features_path) + normalized_features_path = os.path.normpath(pure_feature_path) + if os.path.isfile(normalized_features_path): + if normalized_features_path.endswith('.feature'): + abs_feature_path = os.path.abspath(normalized_features_path) + feature = should_feature_be_run(abs_feature_path) if feature: - features_list.extend(feature.scenarios) + if scenario_line: + # iterate over scenarios and add the scenario that matches the scenario line + for scenario in feature.scenarios: + if scenario.line == int(scenario_line): + features_list.append(scenario) + else: + features_list.extend(feature.scenarios) else: - for node in os.listdir(features_path): - if os.path.isdir(os.path.join(features_path, node)): - explore_features(os.path.join(features_path, node), features_list) - else: - if node.endswith('.feature'): - path_feature = os.path.abspath(os.path.join(features_path, node)) - feature = should_feature_be_run(path_feature) + try: + for node in os.listdir(normalized_features_path): + node_path = os.path.join(normalized_features_path, node) + if os.path.isdir(node_path): + explore_features(node_path, features_list) + elif node.endswith('.feature'): + abs_feature_path = os.path.abspath(node_path) + feature = should_feature_be_run(abs_feature_path) if feature: features_list.extend(feature.scenarios) + except OSError as e: + print(f"Error accessing path {features_path}: {e}") + return features_list + return features_list @@ -249,10 +264,10 @@ def copy_bootstrap_html_generator(): bootstrap_path = ['outputs', 'bootstrap'] bootstrap_path = os.path.join(global_vars.execution_path, *bootstrap_path) if os.path.exists(destination_path): - try_operate_descriptor( + retry_file_operation( destination_path, lambda: shutil.rmtree(destination_path) ) - try_operate_descriptor( + retry_file_operation( destination_path, lambda: shutil.copytree(bootstrap_path, destination_path) ) @@ -264,18 +279,18 @@ def cleanup_folders(): def execution(): return shutil.rmtree(output_folder, ignore_errors=True) - try_operate_descriptor(output_folder, execution) + retry_file_operation(output_folder, execution) if not os.path.exists(output_folder): - try_operate_descriptor(output_folder, lambda: os.makedirs(output_folder)) + retry_file_operation(output_folder, lambda: os.makedirs(output_folder)) # temp folder temp_folder = get_env('temp') def execution(): return shutil.rmtree(temp_folder, ignore_errors=True) - try_operate_descriptor(temp_folder, execution) + retry_file_operation(temp_folder, execution) if not os.path.exists(temp_folder): - try_operate_descriptor(temp_folder, lambda: os.makedirs(temp_folder)) + retry_file_operation(temp_folder, lambda: os.makedirs(temp_folder)) # behave folder behave_folder = os.path.join(get_env('OUTPUT'), 'behave') @@ -283,9 +298,15 @@ def execution(): def execution(): return shutil.rmtree(behave_folder, ignore_errors=True) - try_operate_descriptor(behave_folder, execution) + retry_file_operation(behave_folder, execution) if not os.path.exists(behave_folder): - try_operate_descriptor(behave_folder, lambda: os.makedirs(behave_folder)) + retry_file_operation(behave_folder, lambda: os.makedirs(behave_folder)) + + + +# Implemented for backward compatibility with other libraries that use this function +def try_operate_descriptor(dest_path, execution, return_value=False): + return retry_file_operation(dest_path, execution, return_value) def set_env_variable(key, value): @@ -421,7 +442,7 @@ def set_behave_tags(): tags_line = ' '.join(tags) tags_line = tags_line.replace('~', 'not ') tags_line = tags_line.replace(',', ' or ') - try_operate_descriptor( + retry_file_operation( behave_tags, execution=get_save_function(behave_tags, tags_line) ) @@ -516,17 +537,17 @@ def __init__(self, paths=None): self.features = [ os.path.abspath(path) for path in self.features_paths - if os.path.isfile(path) and ':' not in path + if os.path.isfile(path) and not has_scenario_line_number(path) ] self.scenarios = [ - "{}:{}".format(os.path.abspath(path.split(":")[0]), path.split(":")[1]) + "{}:{}".format(os.path.abspath(get_feature_and_scenario_line(path)[0]), get_feature_and_scenario_line(path)[1]) for path in self.features_paths - if not os.path.isdir(path) and ':' in path + if not os.path.isdir(path) and has_scenario_line_number(path) ] self.folders = [ os.path.abspath(path) for path in self.features_paths - if os.path.isdir(path) and ':' not in path + if os.path.isdir(path) and not has_scenario_line_number(path) ] def __call__(self, *args, **kwargs): @@ -548,6 +569,7 @@ def bool(self): return self.features and self.folders + class IncludeNameMatch(metaclass=ExecutionSingleton): def __init__(self, expr=None): if not expr: @@ -583,3 +605,46 @@ def generate_hash(word): hash_int = int.from_bytes(truncated_hash, byteorder='big') # Ensure the result fits in 48 bits (optional, for consistency) return hash_int & 0xFFFFFFFFFFFF + +def generate_uuid(): + return uuid.uuid4().hex + +def expand_paths(paths): + """Expand glob patterns in paths and verify they exist. + + Args: + paths (list): List of paths that may contain glob patterns + + Returns: + list: List of expanded and verified paths + """ + expanded = [] + for path in paths: + # Handle glob patterns + if any(char in path for char in ['*', '?', '[']): + globbed = glob.glob(path) + if not globbed: + print('\nNo files found matching pattern: {}'.format(path)) + exit() + expanded.extend(globbed) + else: + pure_feature_path, scenario_line = get_feature_and_scenario_line(path) + normalized_features_path = os.path.normpath(pure_feature_path) + if not os.path.exists(pure_feature_path): + print('\nSpecified path was not found: {}'.format(pure_feature_path)) + exit() + expanded.append(path) + return expanded + + +def has_scenario_line_number(path): + # Split path by colon and check if the last part is a number + parts = path.split(':') + return len(parts) > 1 and parts[-1].isdigit() + +def get_feature_and_scenario_line(path): + if has_scenario_line_number(path): + parts = path.split(':') + return [':'.join(parts[:-1]), parts[-1]] + else: + return [path, None] diff --git a/pyproject.toml b/pyproject.toml index 06a4851..66687b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] -requires = ["setuptools>=42", "wheel"] +requires = ["setuptools>=61", "wheel"] build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.8" +version = "4.0.9" description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } @@ -20,6 +20,9 @@ dependencies = [ "htmlmin", "csscompressor" ] +# Add dynamic field to handle entry points +dynamic = ["entry-points"] + classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", @@ -35,6 +38,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Software Development :: Testing" ] diff --git a/setup.py b/setup.py index f973f25..abe51ad 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.8', + version='4.0.9', license="MIT", platforms=['any'], python_requires='>=3.5', @@ -46,6 +46,7 @@ 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', 'Topic :: Software Development :: Testing', ], ) diff --git a/tests/features/behavex_arguments.feature b/tests/features/behavex_arguments.feature new file mode 100644 index 0000000..1376db3 --- /dev/null +++ b/tests/features/behavex_arguments.feature @@ -0,0 +1,113 @@ +Feature: Behavex arguments + + @BEHAVEX_ARGUMENTS + Scenario Outline: Validate BehaveX arguments with separator + Given I have installed behavex + When I run the behavex command using "" separator for "passing_tests.feature" feature with the following scheme, processes and tags + | parallel_scheme | parallel_processes | tags | + | | | | + Then I should see the following behavex console outputs and exit code "0" + | output_line | + | PARALLEL_PROCESSES \| | + | PARALLEL_SCHEME \| | + | Exit code: 0 | + And I should not see error messages in the output + And I should see the same number of scenarios in the reports + And I should see the generated HTML report does not contain internal BehaveX variables and tags + Examples: + | argument_separator | parallel_processes | parallel_scheme | tags | + | blank | 1 | scenario | -t @PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | equal | 1 | scenario | -t=@PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | blank | 1 | scenario | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | equal | 1 | scenario | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | blank | 2 | scenario | -t @PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | equal | 2 | scenario | -t=@PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | blank | 2 | scenario | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | equal | 2 | scenario | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | blank | 1 | feature | -t @PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | equal | 1 | feature | -t=@PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | blank | 1 | feature | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | equal | 1 | feature | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | blank | 2 | feature | -t @PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | equal | 2 | feature | -t=@PASSING_TAG_1,@PASSING_TAG_2,@PASSING_TAG_3 | + | blank | 2 | feature | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + | equal | 2 | feature | -t @PASSING_TAG_1,PASSING_TAG_3 -t ~@PASSING_TAG_2 | + Examples: + | argument_separator | parallel_processes | parallel_scheme | tags | + | blank | 1 | scenario | | + | equal | 1 | scenario | | + | blank | 2 | scenario | | + | equal | 2 | scenario | | + | blank | 1 | feature | | + | equal | 1 | feature | | + | blank | 2 | feature | | + | equal | 2 | feature | | + + @BEHAVEX_ARGUMENTS + Scenario Outline: Validate BehaveX arguments considering feature name with scenario line + When I run the behavex command using "" separator for "passing_tests.feature:13" feature with the following scheme, processes and tags + | parallel_scheme | parallel_processes | tags | + | | | | + Then I should see the following behavex console outputs and exit code "0" + | output_line | + | PARALLEL_PROCESSES \| | + | PARALLEL_SCHEME \| | + | Exit code: 0 | + And I should not see error messages in the output + And I should see the same number of scenarios in the reports not considering the skipped scenarios + Examples: + | argument_separator | parallel_processes | parallel_scheme | tags | + | blank | 1 | scenario | | + | equal | 1 | feature | | + | blank | 2 | scenario | | + | equal | 2 | feature | | + + + @BEHAVEX_ARGUMENTS + Scenario Outline: Validate BehaveX arguments with multiple feature paths + When I run the behavex command using "" separator for "passing_tests.feature" and "failing_tests.feature" features with the following scheme, processes and tags + | parallel_scheme | parallel_processes | tags | + | | | | + Then I should see the following behavex console outputs and exit code "1" + | output_line | + | PARALLEL_PROCESSES \| | + | PARALLEL_SCHEME \| | + | Exit code: 1 | + And I should not see exception messages in the output + And I should see the same number of scenarios in the reports not considering the skipped scenarios + And I should see the generated HTML report does not contain internal BehaveX variables and tags + Examples: + | argument_separator | parallel_processes | parallel_scheme | tags | + | blank | 1 | scenario | | + | equal | 1 | feature | | + | blank | 2 | scenario | | + | equal | 2 | feature | | + + +@BEHAVEX_ARGUMENTS @SCENARIO_NAME + Scenario Outline: Validate BehaveX arguments by running a scenario by its name + When I run the behavex command with scenario name "" and the following scheme, processes and tags + | parallel_scheme | parallel_processes | tags | + | | | | + Then I should see the following behavex console outputs and exit code "0" + | output_line | + | PARALLEL_PROCESSES \| | + | PARALLEL_SCHEME \| | + | Exit code: 0 | + And I should not see exception messages in the output + And I should see the HTML report was generated and contains "" scenarios + And I should see the same number of scenarios in the reports not considering the skipped scenarios + And I should see the generated HTML report does not contain internal BehaveX variables and tags + Examples: + | argument_separator | parallel_processes | parallel_scheme | tags | test_scenario_name | total_scenarios | + | blank | 1 | scenario | | This test should pass and contains | 4 | + | equal | 1 | feature | | This test should pass and contains | 4 | + | blank | 2 | scenario | | This test should pass and contains | 4 | + | equal | 2 | feature | | This test should pass and contains | 4 | + | equal | 2 | feature | | Non existing scenario | 0 | + | equal | 2 | scenario | | Non existing scenario | 0 | + | equal | 2 | scenario | | This test should pass and contains a tag | 1 | + | equal | 2 | scenario | -t PASSING_TAG_1 | This test should pass and contains a tag | 1 | + | equal | 2 | feature | -t PASSING_TAG_1 | This test should pass and contains a tag | 1 | + | equal | 2 | scenario | -t @PASSING_TAG_1 | This test should pass and contains a tag | 1 | + | equal | 2 | feature | -t @PASSING_TAG_1 | This test should pass and contains a tag | 1 | diff --git a/tests/features/dry_run.feature b/tests/features/dry_run.feature index 68b2b49..f414ebf 100644 --- a/tests/features/dry_run.feature +++ b/tests/features/dry_run.feature @@ -9,3 +9,5 @@ Feature: Dry run | Dry run completed | | Exit code: 0 | And I should not see error messages in the output + And I should see the HTML report was generated and contains scenarios + And I should see the generated HTML report does not contain internal BehaveX variables and tags diff --git a/tests/features/failing_scenarios.feature b/tests/features/failing_scenarios.feature index c8740b9..fba39e0 100644 --- a/tests/features/failing_scenarios.feature +++ b/tests/features/failing_scenarios.feature @@ -10,3 +10,4 @@ Feature: Failing Scenarios | Exit code: 1 | And I should not see exception messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags diff --git a/tests/features/parallel_executions.feature b/tests/features/parallel_executions.feature index 1fab49f..6d86a57 100644 --- a/tests/features/parallel_executions.feature +++ b/tests/features/parallel_executions.feature @@ -31,6 +31,7 @@ Feature: Parallel executions | 1 scenario passed, 0 failed | And I should not see error messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags Examples: | parallel_scheme | parallel_processes | tags | | scenario | 3 | -t=@PASSING_TAG_3 -t=@PASSING_TAG_3_1 | diff --git a/tests/features/passing_scenarios.feature b/tests/features/passing_scenarios.feature index 4ed05d0..b63ba4d 100644 --- a/tests/features/passing_scenarios.feature +++ b/tests/features/passing_scenarios.feature @@ -10,6 +10,8 @@ Feature: Passing Scenarios | Exit code: 0 | And I should not see error messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags + @PASSING Scenario: Passing tests with AND tags @@ -23,6 +25,7 @@ Feature: Passing Scenarios | Exit code: 0 | And I should not see error messages in the output And I should see the same number of scenarios in the reports + And I should see the generated HTML report does not contain internal BehaveX variables and tags @PASSING @WIP Scenario: Passing tests with NOT tags @@ -36,3 +39,4 @@ Feature: Passing Scenarios | Exit code: 0 | And I should not see error messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags diff --git a/tests/features/progress_bar.feature b/tests/features/progress_bar.feature index 9d78e7e..431f449 100644 --- a/tests/features/progress_bar.feature +++ b/tests/features/progress_bar.feature @@ -8,11 +8,11 @@ Background: Scenario Outline: Progress bar should be shown when running tests in parallel When I run the behavex command with "" parallel processes and parallel scheme set as "" Then I should see the following behavex console outputs and exit code "1" - | output_line | - | PARALLEL_PROCESSES \| | - | PARALLEL_SCHEME \| | - | Exit code: 1 | - | Executed s: 100%\|███████████████\| | + | output_line | + | PARALLEL_PROCESSES \| | + | PARALLEL_SCHEME \| | + | Exit code: 1 | + | Executed s: 100%\| | And I should not see error messages in the output Examples: | parallel_scheme | parallel_processes | diff --git a/tests/features/renaming_scenarios.feature b/tests/features/renaming_scenarios.feature index b39cc2a..19c3ccb 100644 --- a/tests/features/renaming_scenarios.feature +++ b/tests/features/renaming_scenarios.feature @@ -22,6 +22,7 @@ Feature: Renaming Scenarios | Exit code: 0 | And I should not see error messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags Examples: | parallel_scheme | parallel_processes | | scenario | 3 | diff --git a/tests/features/secondary_features/skipped_tests.feature b/tests/features/secondary_features/skipped_tests.feature index 089f308..01a6e9d 100644 --- a/tests/features/secondary_features/skipped_tests.feature +++ b/tests/features/secondary_features/skipped_tests.feature @@ -1,6 +1,6 @@ Feature: Skipped Tests - @skip + @SKIP Scenario: This test should be skipped Given a condition to skip the scenario Then I perform the condition diff --git a/tests/features/skipped_scenarios.feature b/tests/features/skipped_scenarios.feature index 1065a91..415896c 100644 --- a/tests/features/skipped_scenarios.feature +++ b/tests/features/skipped_scenarios.feature @@ -10,3 +10,4 @@ Feature: Skipped Scenarios | Exit code: 0 And I should not see error messages in the output And I should see the same number of scenarios in the reports and the console output + And I should see the generated HTML report does not contain internal BehaveX variables and tags diff --git a/tests/features/steps/execution_steps.py b/tests/features/steps/execution_steps.py index 7abd758..a1a8d0f 100644 --- a/tests/features/steps/execution_steps.py +++ b/tests/features/steps/execution_steps.py @@ -18,52 +18,52 @@ def step_impl(context): @when('I run the behavex command with a passing test') @when('I run the behavex command with passing tests') def step_impl(context): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/passing_tests.feature'), '-o', context.output_path] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features', 'passing_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command that renames scenarios and features') def step_impl(context): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/rename_tests.feature'), '-o', context.output_path] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features', 'rename_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with a failing test') def step_impl(context): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/failing_tests.feature'), '-o', context.output_path] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features', 'failing_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with a crashing test') def step_impl(context, parallel_processes="1", parallel_scheme='scenario'): - context.output_path = 'output/output_{}'.format(get_random_number(6)) + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) execution_args = ['behavex', - os.path.join(tests_features_path, os.path.join(tests_features_path, 'crashing_features/crashing_tests.feature')), + os.path.join(tests_features_path, os.path.join(tests_features_path, 'crashing_features', 'crashing_tests.feature')), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with a skipped test') def step_impl(context): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/skipped_tests.feature'), '-o', context.output_path] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features', 'skipped_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with an untested test') def step_impl(context): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/untested_tests.feature'), '-o', context.output_path] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features', 'untested_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with "{parallel_processes}" parallel processes and parallel scheme set as "{parallel_schema}"') def step_impl(context, parallel_processes, parallel_schema): - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--parallel-processes', parallel_processes, '--parallel-scheme', parallel_schema] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features'), '-o', context.output_path, '--parallel-processes', parallel_processes, '--parallel-scheme', parallel_schema] execute_command(context, execution_args) @@ -74,14 +74,38 @@ def step_impl(context, parallel_processes, parallel_scheme): @when('I run the behavex command with the following scheme, processes and tags') -def step_impl(context): +@when('I run the behavex command with scenario name "{scenario_name}" and the following scheme, processes and tags') +@when('I run the behavex command using "{argument_separator}" separator with the following scheme, processes and tags') +@when('I run the behavex command using "{argument_separator}" separator for "{feature_name}" feature with the following scheme, processes and tags') +@when('I run the behavex command using "{argument_separator}" separator for "{feature_name}" and "{feature_name_2}" features with the following scheme, processes and tags') +def run_command_with_scheme_processes_and_tags(context, scenario_name=None, argument_separator="equal", feature_name=None, feature_name_2=None): scheme = context.table[0]['parallel_scheme'] processes = context.table[0]['parallel_processes'] tags = context.table[0]['tags'] - context.output_path = 'output/output_{}'.format(get_random_number(6)) + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) tags_to_folder_name = get_tags_string(tags) - tags_array = get_tags_arguments(tags) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--parallel-processes', processes, '--parallel-scheme', scheme] + tags_array + if not tags: + tags_array = [] + else: + tags_array = get_tags_arguments(tags) + if feature_name: + if feature_name_2: + feature_path = os.path.join(tests_features_path, 'secondary_features', feature_name) + feature_path_2 = os.path.join(tests_features_path, 'secondary_features', feature_name_2) + else: + feature_path = os.path.join(tests_features_path, 'secondary_features', feature_name) + else: + feature_path = os.path.join(tests_features_path, 'secondary_features') + if argument_separator == 'equal': + execution_args = ['behavex', feature_path, '-o', context.output_path, '--parallel-processes=' + processes, '--parallel-scheme=' + scheme] + tags_array + else: + execution_args = ['behavex', feature_path, '-o', context.output_path, '--parallel-processes', processes, '--parallel-scheme', scheme] + tags_array + if feature_name_2: + # append the second feature path to the execution arguments in index 2 + execution_args.insert(2, feature_path_2) + if scenario_name: + execution_args.append('--name') + execution_args.append(scenario_name.replace(' ', '\\ ')) execute_command(context, execution_args) @@ -90,16 +114,16 @@ def step_impl(context): tags = context.table[0]['tags'] tags_to_folder_name = get_tags_string(tags) tags_array = get_tags_arguments(tags) - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path] + tags_array + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features'), '-o', context.output_path] + tags_array execute_command(context, execution_args) @when('I run the behavex command by performing a dry run') def step_impl(context): # generate a random number between 1 and 1000000 completing with zeroes to 6 digits - context.output_path = 'output/output_{}'.format(get_random_number(6)) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--dry-run'] + context.output_path = os.path.join('output', 'output_{}'.format(get_random_number(6))) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features'), '-o', context.output_path, '--dry-run'] execute_command(context, execution_args) @@ -134,16 +158,52 @@ def step_impl(context): logging.info(f"Total scenarios in the JUnit reports: {total_scenarios_in_junit_reports}") total_scenarios_in_console_output = get_total_scenarios_in_console_output(context) logging.info(f"Total scenarios in the console output: {total_scenarios_in_console_output}") - assert total_scenarios_in_html_report == total_scenarios_in_junit_reports == total_scenarios_in_console_output, f"Expected {total_scenarios} scenarios in the reports and the console output, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports, and {total_scenarios_in_console} in the console output" + assert total_scenarios_in_html_report == total_scenarios_in_junit_reports == total_scenarios_in_console_output, f"Expected total scenarios to match, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports, and {total_scenarios_in_console_output} in the console output" @then('I should see the same number of scenarios in the reports') -def step_impl(context): +def verify_total_scenarios_in_reports(context, consider_skipped_scenarios=True): total_scenarios_in_html_report = get_total_scenarios_in_html_report(context) logging.info(f"Total scenarios in the HTML report: {total_scenarios_in_html_report}") - total_scenarios_in_junit_reports = get_total_scenarios_in_junit_reports(context) + total_scenarios_in_junit_reports = get_total_scenarios_in_junit_reports(context, consider_skipped_scenarios) logging.info(f"Total scenarios in the JUnit reports: {total_scenarios_in_junit_reports}") - assert total_scenarios_in_html_report == total_scenarios_in_junit_reports, f"Expected {total_scenarios} scenarios in the reports, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports" + assert total_scenarios_in_html_report == total_scenarios_in_junit_reports, f"Expected total scenarios to match, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports" + + +@then('I should see the same number of scenarios in the reports not considering the skipped scenarios') +def step_impl(context): + verify_total_scenarios_in_reports(context, consider_skipped_scenarios=False) + + +@then('I should see the HTML report was generated and contains scenarios') +@then('I should see the HTML report was generated and contains "{total_scenarios}" scenarios') +def verify_total_scenarios_in_html_report(context, total_scenarios=None, consider_skipped_scenarios=True): + total_scenarios_in_html_report = get_total_scenarios_in_html_report(context) + logging.info(f"Total scenarios in the HTML report: {total_scenarios_in_html_report}") + if total_scenarios is not None: + assert total_scenarios_in_html_report == int(total_scenarios), f"Expected the HTML report to contain {total_scenarios} scenarios, but found {total_scenarios_in_html_report}" + else: + assert total_scenarios_in_html_report > 0, "Expected the HTML report to be generated and contain scenarios" + + +@then('I should see the generated HTML report contains the "{string_to_search}" string') +def verify_string_in_html_report(context, string_to_search, string_should_be_present=True): + total_string_instances_in_html_report = get_string_instances_from_html_report(context, string_to_search) + logging.info(f"Total instances of '{string_to_search}' in the HTML report: {total_string_instances_in_html_report}") + if string_should_be_present: + assert total_string_instances_in_html_report > 0, f"Expected the HTML report to contain the string '{string_to_search}'" + else: + assert total_string_instances_in_html_report == 0, f"Expected the HTML report to not contain the string '{string_to_search}'" + + +@then('I should see the generated HTML report does not contain internal BehaveX variables and tags') +def verify_string_not_in_html_report(context): + internal_behavex_variables_and_tags = ["BHX_", "BHX_TAG_"] + for variable_or_tag in internal_behavex_variables_and_tags: + total_string_instances_in_html_report = get_string_instances_from_html_report(context, variable_or_tag) + logging.info(f"Total instances of '{variable_or_tag}' in the HTML report: {total_string_instances_in_html_report}") + assert total_string_instances_in_html_report == 0, f"Expected the HTML report to not contain the string '{variable_or_tag}'" + def get_tags_arguments(tags): @@ -183,14 +243,24 @@ def get_total_scenarios_in_html_report(context): return html_content.count('data-scenario-tags=') -def get_total_scenarios_in_junit_reports(context): +def get_string_instances_from_html_report(context, string_to_search): + report_path = os.path.abspath(os.path.join(context.output_path, 'report.html')) + with open(report_path, 'r') as file: + html_content = file.read() + return html_content.lower().count(string_to_search.lower()) + + +def get_total_scenarios_in_junit_reports(context, consider_skipped_scenarios=True): junit_folder = os.path.abspath(os.path.join(context.output_path, 'behave')) total_scenarios_in_junit_reports = 0 for file in os.listdir(junit_folder): if file.endswith('.xml'): with open(os.path.join(junit_folder, file), 'r') as file: xml_content = file.read() - total_scenarios_in_junit_reports += xml_content.count('