From a000626c10317ec658ab6b4660e2fb5c55d5f995 Mon Sep 17 00:00:00 2001 From: Kyle Finley Date: Wed, 17 Jul 2024 16:34:37 -0400 Subject: [PATCH] use ruff, start remediating ruff errors --- .../scripts/urlshortener/test_update_urls.py | 12 +- .github/scripts/urlshortener/update_urls.py | 2 +- Makefile | 38 +- .../prevent_privilege_escalation.py | 6 +- poetry.lock | 29 +- pyproject.toml | 115 ++++- quickstarts/conduit/update_env_endpoint.py | 4 +- runway/__init__.py | 10 +- runway/_cli/options.py | 2 +- runway/_cli/utils.py | 7 +- runway/_logging.py | 39 +- runway/blueprints/k8s/k8s_workers.py | 22 +- runway/blueprints/staticsite/auth_at_edge.py | 2 +- runway/blueprints/staticsite/staticsite.py | 10 +- runway/cfngin/actions/diff.py | 2 +- runway/cfngin/actions/graph.py | 3 +- runway/cfngin/awscli_yamlhelper.py | 3 +- runway/cfngin/blueprints/base.py | 11 +- runway/cfngin/blueprints/raw.py | 2 +- runway/cfngin/blueprints/testutil.py | 3 +- runway/cfngin/blueprints/variables/types.py | 23 +- runway/cfngin/cfngin.py | 2 +- runway/cfngin/dag/__init__.py | 13 +- runway/cfngin/exceptions.py | 2 +- runway/cfngin/hooks/acm.py | 14 +- runway/cfngin/hooks/aws_lambda.py | 15 +- runway/cfngin/hooks/awslambda/base_classes.py | 2 +- .../hooks/awslambda/deployment_package.py | 5 +- runway/cfngin/hooks/awslambda/docker.py | 2 +- runway/cfngin/hooks/awslambda/source_code.py | 3 +- runway/cfngin/hooks/docker/hook_data.py | 2 +- runway/cfngin/hooks/docker/image/_build.py | 2 +- runway/cfngin/hooks/ecr/_purge_repository.py | 2 +- runway/cfngin/hooks/keypair.py | 4 +- runway/cfngin/hooks/ssm/parameter.py | 7 +- .../hooks/staticsite/upload_staticsite.py | 2 +- runway/cfngin/hooks/staticsite/utils.py | 3 +- runway/cfngin/hooks/utils.py | 27 +- runway/cfngin/logger/__init__.py | 2 +- runway/cfngin/lookups/handlers/ami.py | 4 +- runway/cfngin/lookups/handlers/awslambda.py | 4 +- runway/cfngin/lookups/handlers/default.py | 4 +- runway/cfngin/lookups/handlers/dynamodb.py | 4 +- runway/cfngin/lookups/handlers/envvar.py | 4 +- runway/cfngin/lookups/handlers/file.py | 5 +- runway/cfngin/lookups/handlers/hook_data.py | 4 +- runway/cfngin/lookups/handlers/kms.py | 4 +- runway/cfngin/lookups/handlers/output.py | 4 +- runway/cfngin/lookups/handlers/rxref.py | 4 +- runway/cfngin/lookups/handlers/split.py | 4 +- runway/cfngin/lookups/handlers/xref.py | 4 +- runway/cfngin/plan.py | 2 +- runway/cfngin/providers/aws/default.py | 6 +- runway/cfngin/status.py | 4 +- runway/cfngin/utils.py | 10 +- runway/compat.py | 29 +- runway/config/__init__.py | 3 +- runway/config/models/runway/__init__.py | 2 +- runway/constants.py | 6 +- runway/context/_cfngin.py | 7 +- runway/core/providers/aws/_assume_role.py | 6 +- .../aws/s3/_helpers/action_architecture.py | 2 +- .../providers/aws/s3/_helpers/comparator.py | 3 +- .../aws/s3/_helpers/file_generator.py | 38 +- .../aws/s3/_helpers/file_info_builder.py | 3 +- .../core/providers/aws/s3/_helpers/filters.py | 16 +- .../providers/aws/s3/_helpers/format_path.py | 20 +- .../providers/aws/s3/_helpers/parameters.py | 2 +- .../core/providers/aws/s3/_helpers/results.py | 6 +- .../providers/aws/s3/_helpers/s3handler.py | 14 +- .../aws/s3/_helpers/transfer_config.py | 16 +- .../core/providers/aws/s3/_helpers/utils.py | 8 +- runway/dependency_managers/_pip.py | 7 +- runway/dependency_managers/_pipenv.py | 4 +- runway/dependency_managers/_poetry.py | 4 +- runway/env_mgr/__init__.py | 3 +- runway/env_mgr/kbenv.py | 6 +- runway/env_mgr/tfenv.py | 93 ++-- runway/exceptions.py | 34 +- runway/lookups/handlers/base.py | 43 +- runway/lookups/handlers/cfn.py | 10 +- runway/lookups/handlers/ecr.py | 7 +- runway/lookups/handlers/env.py | 8 +- runway/lookups/handlers/random_string.py | 12 +- runway/lookups/handlers/ssm.py | 6 +- runway/lookups/handlers/var.py | 6 +- runway/lookups/registry.py | 10 +- runway/mixins.py | 50 +-- runway/module/base.py | 39 +- runway/module/cdk.py | 33 +- runway/module/cloudformation.py | 13 +- runway/module/k8s.py | 22 +- runway/module/serverless.py | 64 +-- runway/module/staticsite/handler.py | 66 +-- runway/module/staticsite/options/models.py | 7 +- runway/module/staticsite/parameters/models.py | 7 +- runway/module/staticsite/utils.py | 2 +- runway/module/terraform.py | 77 ++-- runway/module/utils.py | 22 +- runway/s3_utils.py | 35 +- runway/sources/git.py | 7 +- runway/sources/source.py | 7 +- runway/templates/cdk-py/__init__.py | 1 + runway/templates/cdk-py/app.py | 3 +- .../templates/cdk-py/hello/hello_construct.py | 3 +- runway/templates/cdk-py/hello/hello_stack.py | 1 + .../k8s-master.cfn/k8s_hooks/auth_map.py | 10 +- .../k8s-master.cfn/k8s_hooks/awscli.py | 5 +- .../k8s-master.cfn/k8s_hooks/bootstrap.py | 13 +- .../k8s-flux-repo/flux.tf/__init__.py | 1 + .../k8s-tf-repo/eks-base.tf/__init__.py | 1 + .../k8s-tf-repo/job-s3-echo.tf/__init__.py | 1 + .../templates/sls-py/hello_world/__init__.py | 9 +- runway/tests/handlers/base.py | 8 +- runway/tests/handlers/cfn_lint.py | 6 +- runway/tests/handlers/script.py | 4 +- runway/tests/handlers/yaml_lint.py | 34 +- runway/tests/registry.py | 6 +- runway/utils/__init__.py | 160 ++++--- runway/utils/_file_hash.py | 13 +- runway/utils/_version.py | 2 +- runway/variables.py | 196 +++++---- tests/conftest.py | 15 +- tests/factories.py | 6 +- .../cdk/test_multistack/test_runner.py | 8 +- .../cfngin/fixtures/blueprints/_bastion.py | 10 +- .../cfngin/fixtures/blueprints/_broken.py | 4 +- .../cfngin/fixtures/blueprints/_dummy.py | 6 +- .../fixtures/blueprints/_lambda_function.py | 10 +- .../cfngin/fixtures/blueprints/_vpc.py | 4 +- .../cfngin/fixtures/hooks/cleanup.py | 2 +- .../sample_app/src/docker/index.py | 6 +- .../sample_app/src/docker_mysql/index.py | 6 +- .../sample_app/src/docker_xmlsec/index.py | 6 +- .../sample_app/src/local/index.py | 6 +- .../src/local_xmlsec_layer/index.py | 6 +- .../sample_app/src/type_defs.py | 10 +- .../hooks/test_awslambda/test_runner.py | 10 +- .../cfngin/test_assume_role/test_runner.py | 16 +- .../lambda_src/dockerize_src/dockerize.py | 14 +- .../nondockerize_src/nondockerize.py | 14 +- .../test_aws_lambda_hook/test_runner.py | 4 +- .../test_destroy_removed/test_runner.py | 4 +- .../test_duplicate_stack/test_runner.py | 4 +- .../cfngin/test_locked_stack/test_runner.py | 4 +- .../cfngin/test_parallel/test_runner.py | 8 +- .../cfngin/test_raw_cfn/test_runner.py | 4 +- .../test_recreate_failed/test_runner.py | 4 +- .../test_rollback_dependant/test_runner.py | 4 +- .../cfngin/test_simple_build/test_runner.py | 8 +- .../cfngin/test_simple_diff/blueprints.py | 6 +- .../cfngin/test_simple_diff/test_runner.py | 6 +- tests/functional/conftest.py | 12 +- .../serverless/test_promotezip/test_runner.py | 16 +- tests/functional/sources/git/test_runner.py | 8 +- .../test_simple_build/test_runner.py | 8 +- tests/functional/terraform/conftest.py | 10 +- .../test_backend_local_2_s3/test_runner.py | 18 +- .../test_backend_no_2_local/test_runner.py | 20 +- .../terraform/test_base/test_runner.py | 10 +- .../cli/commands/kbenv/test_install.py | 40 +- .../cli/commands/kbenv/test_list.py | 42 +- .../cli/commands/kbenv/test_run.py | 35 +- .../cli/commands/kbenv/test_uninstall.py | 81 ++-- tests/integration/cli/commands/test_deploy.py | 8 +- .../integration/cli/commands/test_destroy.py | 20 +- .../cli/commands/test_dismantle.py | 8 +- tests/integration/cli/commands/test_docs.py | 4 +- .../integration/cli/commands/test_envvars.py | 14 +- .../cli/commands/test_gen_sample.py | 28 +- tests/integration/cli/commands/test_init.py | 8 +- tests/integration/cli/commands/test_new.py | 6 +- tests/integration/cli/commands/test_plan.py | 6 +- .../cli/commands/test_preflight.py | 8 +- .../integration/cli/commands/test_takeoff.py | 8 +- tests/integration/cli/commands/test_taxi.py | 8 +- tests/integration/cli/commands/test_test.py | 24 +- .../integration/cli/commands/test_whichenv.py | 8 +- .../cli/commands/tfenv/test_install.py | 39 +- .../cli/commands/tfenv/test_list.py | 14 +- .../cli/commands/tfenv/test_run.py | 29 +- .../cli/commands/tfenv/test_uninstall.py | 81 ++-- tests/integration/conftest.py | 6 +- tests/unit/cfngin/actions/conftest.py | 5 +- tests/unit/cfngin/actions/test_base.py | 79 ++-- tests/unit/cfngin/actions/test_deploy.py | 130 +++--- tests/unit/cfngin/actions/test_destroy.py | 44 +- tests/unit/cfngin/actions/test_diff.py | 48 +-- tests/unit/cfngin/actions/test_init.py | 13 +- tests/unit/cfngin/blueprints/test_base.py | 22 +- .../cfngin/blueprints/test_cfngin_bucket.py | 2 +- tests/unit/cfngin/blueprints/test_raw.py | 10 +- tests/unit/cfngin/blueprints/test_testutil.py | 3 +- .../cfngin/blueprints/variables/test_types.py | 5 +- tests/unit/cfngin/conftest.py | 9 +- tests/unit/cfngin/factories.py | 19 +- tests/unit/cfngin/fixtures/mock_blueprints.py | 40 +- tests/unit/cfngin/fixtures/mock_hooks.py | 4 +- .../unit/cfngin/hooks/awslambda/factories.py | 3 +- .../hooks/awslambda/models/test_args.py | 7 +- .../test__deployment_package.py | 2 +- .../python_requirements/test__docker.py | 5 +- .../python_requirements/test__project.py | 12 +- .../hooks/awslambda/test__python_hooks.py | 8 +- .../hooks/awslambda/test_base_classes.py | 38 +- .../awslambda/test_deployment_package.py | 42 +- .../cfngin/hooks/awslambda/test_docker.py | 28 +- .../hooks/awslambda/test_source_code.py | 2 +- .../cfngin/hooks/docker/image/test_build.py | 14 +- .../cfngin/hooks/docker/image/test_push.py | 6 +- .../cfngin/hooks/docker/image/test_remove.py | 8 +- .../cfngin/hooks/docker/test_data_models.py | 10 +- .../cfngin/hooks/docker/test_hook_data.py | 6 +- tests/unit/cfngin/hooks/docker/test_login.py | 4 +- .../hooks/ecr/test__purge_repositroy.py | 14 +- tests/unit/cfngin/hooks/ssm/conftest.py | 16 +- tests/unit/cfngin/hooks/ssm/test_parameter.py | 15 +- .../cfngin/hooks/staticsite/test_cleanup.py | 7 +- .../staticsite/test_upload_staticsite.py | 20 +- .../cfngin/hooks/staticsite/test_utils.py | 8 +- tests/unit/cfngin/hooks/test_acm.py | 217 +++++----- tests/unit/cfngin/hooks/test_aws_lambda.py | 174 ++++---- tests/unit/cfngin/hooks/test_base.py | 43 +- tests/unit/cfngin/hooks/test_cleanup_s3.py | 8 +- tests/unit/cfngin/hooks/test_cleanup_ssm.py | 6 +- tests/unit/cfngin/hooks/test_ecs.py | 11 +- tests/unit/cfngin/hooks/test_iam.py | 16 +- tests/unit/cfngin/hooks/test_keypair.py | 19 +- tests/unit/cfngin/hooks/test_route53.py | 4 +- tests/unit/cfngin/hooks/test_utils.py | 84 ++-- .../unit/cfngin/lookups/handlers/test_ami.py | 14 +- .../cfngin/lookups/handlers/test_awslambda.py | 172 +++----- .../cfngin/lookups/handlers/test_default.py | 9 +- .../cfngin/lookups/handlers/test_dynamodb.py | 84 ++-- .../cfngin/lookups/handlers/test_envvar.py | 7 +- .../unit/cfngin/lookups/handlers/test_file.py | 7 +- .../cfngin/lookups/handlers/test_hook_data.py | 10 +- .../unit/cfngin/lookups/handlers/test_kms.py | 6 +- .../cfngin/lookups/handlers/test_output.py | 13 +- .../cfngin/lookups/handlers/test_rxref.py | 9 +- .../cfngin/lookups/handlers/test_split.py | 5 +- .../unit/cfngin/lookups/handlers/test_xref.py | 11 +- .../unit/cfngin/providers/aws/test_default.py | 290 ++++++------- tests/unit/cfngin/test_cfngin.py | 4 +- tests/unit/cfngin/test_dag.py | 6 +- tests/unit/cfngin/test_environment.py | 24 +- tests/unit/cfngin/test_exceptions.py | 4 +- tests/unit/cfngin/test_plan.py | 175 ++++---- tests/unit/cfngin/test_stack.py | 332 +++++++++++---- tests/unit/cfngin/test_tokenize_userdata.py | 10 +- tests/unit/cfngin/test_utils.py | 145 ++++--- .../config/components/runway/test_base.py | 8 +- .../components/runway/test_deployment_def.py | 10 +- .../components/runway/test_module_def.py | 12 +- .../components/runway/test_variables_def.py | 2 +- .../unit/config/models/cfngin/test_cfngin.py | 23 +- .../models/cfngin/test_package_sources.py | 5 +- .../config/models/runway/options/test_cdk.py | 3 +- .../models/runway/options/test_serverless.py | 6 +- .../models/runway/options/test_terraform.py | 15 +- .../unit/config/models/runway/test_runway.py | 4 +- tests/unit/config/models/test_base.py | 9 +- tests/unit/config/models/test_utils.py | 2 +- tests/unit/config/test_config.py | 41 +- tests/unit/conftest.py | 73 ++-- tests/unit/context/test_base.py | 6 +- tests/unit/context/test_cfngin.py | 36 +- tests/unit/context/test_runway.py | 2 +- tests/unit/context/test_sys_info.py | 34 +- .../components/test_deploy_environment.py | 46 +- tests/unit/core/components/test_deployment.py | 49 +-- tests/unit/core/components/test_module.py | 27 +- .../unit/core/components/test_module_path.py | 29 +- .../unit/core/components/test_module_type.py | 12 +- .../providers/aws/s3/_helpers/conftest.py | 17 +- .../providers/aws/s3/_helpers/factories.py | 10 +- .../s3/_helpers/sync_strategy/test_base.py | 36 +- .../sync_strategy/test_exact_timestamps.py | 8 +- .../_helpers/sync_strategy/test_register.py | 3 +- .../s3/_helpers/test_action_architecture.py | 2 +- .../aws/s3/_helpers/test_comparator.py | 109 ++--- .../aws/s3/_helpers/test_file_generator.py | 7 +- .../aws/s3/_helpers/test_file_info.py | 2 +- .../aws/s3/_helpers/test_file_info_builder.py | 2 +- .../aws/s3/_helpers/test_format_path.py | 8 +- .../aws/s3/_helpers/test_parameters.py | 7 +- .../providers/aws/s3/_helpers/test_results.py | 97 ++--- .../aws/s3/_helpers/test_s3handler.py | 64 +-- .../aws/s3/_helpers/test_transfer_config.py | 6 +- .../providers/aws/s3/_helpers/test_utils.py | 86 ++-- .../unit/core/providers/aws/s3/test_bucket.py | 11 +- .../providers/aws/s3/test_sync_handler.py | 3 +- .../core/providers/aws/test_assume_role.py | 14 +- tests/unit/core/test_core.py | 23 +- tests/unit/dependency_managers/test__pip.py | 15 +- .../unit/dependency_managers/test__pipenv.py | 9 +- .../unit/dependency_managers/test__poetry.py | 8 +- .../dependency_managers/test_base_classes.py | 2 +- tests/unit/env_mgr/test_env_mgr.py | 18 +- tests/unit/env_mgr/test_tfenv.py | 19 +- tests/unit/factories.py | 399 ++++++++++++------ tests/unit/lookups/handlers/test_base.py | 16 +- tests/unit/lookups/handlers/test_cfn.py | 32 +- tests/unit/lookups/handlers/test_ecr.py | 13 +- tests/unit/lookups/handlers/test_env.py | 2 +- .../lookups/handlers/test_random_string.py | 4 +- tests/unit/lookups/handlers/test_ssm.py | 10 +- tests/unit/lookups/handlers/test_var.py | 4 +- tests/unit/lookups/test_registry.py | 2 +- tests/unit/mock_docker/fake_api.py | 245 +++++------ tests/unit/mock_docker/fake_api_client.py | 8 +- tests/unit/mock_docker/fake_stat.py | 4 +- tests/unit/module/conftest.py | 2 +- tests/unit/module/staticsite/conftest.py | 4 +- .../module/staticsite/options/test_models.py | 18 +- .../staticsite/parameters/test_models.py | 8 +- tests/unit/module/staticsite/test_handler.py | 11 +- tests/unit/module/test_base.py | 30 +- tests/unit/module/test_cdk.py | 44 +- tests/unit/module/test_cloudformation.py | 4 +- tests/unit/module/test_k8s.py | 19 +- tests/unit/module/test_serverless.py | 33 +- tests/unit/module/test_terraform.py | 75 ++-- tests/unit/module/test_utils.py | 17 +- tests/unit/test_mixins.py | 22 +- tests/unit/test_variables.py | 123 +++--- tests/unit/utils/test_utils.py | 47 +-- 327 files changed, 3954 insertions(+), 3638 deletions(-) create mode 100644 runway/templates/cdk-py/__init__.py create mode 100644 runway/templates/k8s-flux-repo/flux.tf/__init__.py create mode 100644 runway/templates/k8s-tf-repo/eks-base.tf/__init__.py create mode 100644 runway/templates/k8s-tf-repo/job-s3-echo.tf/__init__.py diff --git a/.github/scripts/urlshortener/test_update_urls.py b/.github/scripts/urlshortener/test_update_urls.py index 63e4c253d..58f85f8d7 100644 --- a/.github/scripts/urlshortener/test_update_urls.py +++ b/.github/scripts/urlshortener/test_update_urls.py @@ -46,22 +46,22 @@ def test_handler(mock_put_item: Mock): call( table=table, id_val="runway/latest/linux", - target="https://test-bucket.s3-us-west-2.amazonaws.com/" "runway/1.0.0/linux/runway", + target="https://test-bucket.s3-us-west-2.amazonaws.com/runway/1.0.0/linux/runway", ), call( table=table, id_val="runway/1.0.0/linux", - target="https://test-bucket.s3-us-west-2.amazonaws.com/" "runway/1.0.0/linux/runway", + target="https://test-bucket.s3-us-west-2.amazonaws.com/runway/1.0.0/linux/runway", ), call( table=table, id_val="runway/latest/osx", - target="https://test-bucket.s3-us-west-2.amazonaws.com/" "runway/1.0.0/osx/runway", + target="https://test-bucket.s3-us-west-2.amazonaws.com/runway/1.0.0/osx/runway", ), call( table=table, id_val="runway/1.0.0/osx", - target="https://test-bucket.s3-us-west-2.amazonaws.com/" "runway/1.0.0/osx/runway", + target="https://test-bucket.s3-us-west-2.amazonaws.com/runway/1.0.0/osx/runway", ), call( table=table, @@ -82,14 +82,14 @@ def test_handler(mock_put_item: Mock): call( table=table, id_val="runway/1.1.0/linux", - target="https://test-bucket.s3-us-east-1.amazonaws.com/" "runway/1.1.0/linux/runway", + target="https://test-bucket.s3-us-east-1.amazonaws.com/runway/1.1.0/linux/runway", ) ) calls.append( call( table=table, id_val="runway/1.1.0/osx", - target="https://test-bucket.s3-us-east-1.amazonaws.com/" "runway/1.1.0/osx/runway", + target="https://test-bucket.s3-us-east-1.amazonaws.com/runway/1.1.0/osx/runway", ) ) calls.append( diff --git a/.github/scripts/urlshortener/update_urls.py b/.github/scripts/urlshortener/update_urls.py index 3624312e6..a8988450b 100755 --- a/.github/scripts/urlshortener/update_urls.py +++ b/.github/scripts/urlshortener/update_urls.py @@ -119,7 +119,7 @@ def handler( "table_name", metavar="", required=True, - help="Name of the DynamoDB table containing entries for the URL " "shortener.", + help="Name of the DynamoDB table containing entries for the URL shortener.", ) @click.option( "--version", diff --git a/Makefile b/Makefile index 7bbd06022..634fee8a3 100644 --- a/Makefile +++ b/Makefile @@ -30,14 +30,14 @@ build-pyinstaller-folder: clean create-tfenv-ver-file version ## build Pyinstall bash ./.github/scripts/cicd/build_pyinstaller.sh folder clean: ## remove generated file from the project directory - rm -rf build/ - rm -rf dist/ - rm -rf runway.egg-info/ - rm -rf tmp/ - rm -rf src/ - rm -rf postinstall.js preuninstall.js .coverage .npmignore - find . -name ".runway" -type d -prune -exec rm -rf '{}' + - @make -C docs clean + rm -rf ./build/ ./dist/ ./src/ ./tmp/ ./runway.egg-info/; + rm -rf ./.pytest_cache ./.venv; + find . -type d -name ".venv" -prune -exec rm -rf '{}' +; + find . -type d -name "node_modules" -prune -exec rm -rf '{}' +; + find . -type d -name ".runway" -prune -exec rm -rf '{}' +; + find . -type f -name "*.py[co]" -delete; + find . -type d -name "__pycache__" -prune -exec rm -rf '{}' +; + @$(MAKE) --no-print-directory -C docs clean; cov-report: ## display a report in the terminal of files missing coverage @poetry run coverage report \ @@ -59,15 +59,26 @@ create-tfenv-ver-file: ## create a tfenv version file using the latest version curl --silent https://releases.hashicorp.com/index.json | jq -r '.terraform.versions | to_entries | map(select(.key | contains ("-") | not)) | sort_by(.key | split(".") | map(tonumber))[-1].key' | egrep -o '^[0-9]*\.[0-9]*\.[0-9]*' > runway/templates/terraform/.terraform-version docs: ## delete current HTML docs & build fresh HTML docs - @make -C docs docs + @$(MAKE) --no-print-directory -C docs docs docs-changes: ## build HTML docs; only builds changes detected by Sphinx - @make -C docs html + @$(MAKE) --no-print-directory -C docs html + +fix: fix-ruff fix-black run-pre-commit ## run all automatic fixes fix-black: ## automatically fix all black errors @poetry run black . -lint: lint-black lint-pyright ## run all linters +fix-imports: ## automatically fix all import sorting errors + @poetry run ruff check . --fix-only --fixable I001 + +fix-ruff: ## automatically fix everything ruff can fix (implies fix-imports) + @poetry run ruff check . --fix-only + +fix-ruff-tests: + @poetry run ruff check ./tests --fix-only --unsafe-fixes + +lint: lint-black lint-ruff lint-pyright ## run all linters lint-black: ## run black @echo "Running black... If this fails, run 'make fix-black' to resolve." @@ -79,6 +90,11 @@ lint-pyright: ## run pyright @npm run-script py-type-check @echo "" +lint-ruff: ## run ruff + @echo "Running ruff... If this fails, run 'make fix-ruff' to resolve some error automatically, other require manual action." + @poetry run ruff check . + @echo "" + npm-ci: ## run "npm ci" with the option to ignore scripts - required to succeed for this project @npm ci --ignore-scripts diff --git a/infrastructure/blueprints/prevent_privilege_escalation.py b/infrastructure/blueprints/prevent_privilege_escalation.py index 778d2b0c5..645b634e9 100644 --- a/infrastructure/blueprints/prevent_privilege_escalation.py +++ b/infrastructure/blueprints/prevent_privilege_escalation.py @@ -152,10 +152,8 @@ def statement_deny_onica_sso(self) -> Statement: Action=[Action("*")], Effect=Deny, Resource=[ - Sub("arn:${AWS::Partition}:cloudformation:*:${AWS::AccountId}:stack/" "onica-sso"), - Sub( - "arn:${AWS::Partition}:cloudformation:*:${AWS::AccountId}:stack/" "onica-sso-*" - ), + Sub("arn:${AWS::Partition}:cloudformation:*:${AWS::AccountId}:stack/onica-sso"), + Sub("arn:${AWS::Partition}:cloudformation:*:${AWS::AccountId}:stack/onica-sso-*"), Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/onica-sso"), Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/onica-sso-*"), Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:role/onica-sso"), diff --git a/poetry.lock b/poetry.lock index cb5180a43..d1951d950 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2625,6 +2625,33 @@ files = [ [package.dependencies] docutils = ">=0.11,<1.0" +[[package]] +name = "ruff" +version = "0.5.1" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.5.1-py3-none-linux_armv6l.whl", hash = "sha256:6ecf968fcf94d942d42b700af18ede94b07521bd188aaf2cd7bc898dd8cb63b6"}, + {file = "ruff-0.5.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:204fb0a472f00f2e6280a7c8c7c066e11e20e23a37557d63045bf27a616ba61c"}, + {file = "ruff-0.5.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d235968460e8758d1e1297e1de59a38d94102f60cafb4d5382033c324404ee9d"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38beace10b8d5f9b6bdc91619310af6d63dd2019f3fb2d17a2da26360d7962fa"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e478d2f09cf06add143cf8c4540ef77b6599191e0c50ed976582f06e588c994"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0368d765eec8247b8550251c49ebb20554cc4e812f383ff9f5bf0d5d94190b0"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3a9a9a1b582e37669b0138b7c1d9d60b9edac880b80eb2baba6d0e566bdeca4d"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdd9f723e16003623423affabcc0a807a66552ee6a29f90eddad87a40c750b78"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:be9fd62c1e99539da05fcdc1e90d20f74aec1b7a1613463ed77870057cd6bd96"}, + {file = "ruff-0.5.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e216fc75a80ea1fbd96af94a6233d90190d5b65cc3d5dfacf2bd48c3e067d3e1"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c4c2112e9883a40967827d5c24803525145e7dab315497fae149764979ac7929"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dfaf11c8a116394da3b65cd4b36de30d8552fa45b8119b9ef5ca6638ab964fa3"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d7ceb9b2fe700ee09a0c6b192c5ef03c56eb82a0514218d8ff700f6ade004108"}, + {file = "ruff-0.5.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:bac6288e82f6296f82ed5285f597713acb2a6ae26618ffc6b429c597b392535c"}, + {file = "ruff-0.5.1-py3-none-win32.whl", hash = "sha256:5c441d9c24ec09e1cb190a04535c5379b36b73c4bc20aa180c54812c27d1cca4"}, + {file = "ruff-0.5.1-py3-none-win_amd64.whl", hash = "sha256:b1789bf2cd3d1b5a7d38397cac1398ddf3ad7f73f4de01b1e913e2abc7dfc51d"}, + {file = "ruff-0.5.1-py3-none-win_arm64.whl", hash = "sha256:2875b7596a740cbbd492f32d24be73e545a4ce0a3daf51e4f4e609962bfd3cd2"}, + {file = "ruff-0.5.1.tar.gz", hash = "sha256:3164488aebd89b1745b47fd00604fb4358d774465f20d1fcd907f9c0fc1b0655"}, +] + [[package]] name = "s3transfer" version = "0.10.0" @@ -3256,4 +3283,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = ">=3.9, <3.13" -content-hash = "ea09ce1332236dd272f9e789778596feeb16d945e06fc6902cf47fcca3d9e1bf" +content-hash = "0af9b10afa127aaf410e513e348436bdfbabdea13a543a88350fbff719c35dd8" diff --git a/pyproject.toml b/pyproject.toml index 3efabc9cd..790f1a04c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,6 @@ repository = "https://github.com/onicagroup/runway" [tool.poetry.dependencies] python = ">=3.9, <3.13" awacs = "*" -"backports.cached_property" = {python = "<3.8", version = "*"} boto3 = "^1.16" cfn-lint = "*" cfn_flip = "^1.2" # only used in runway._cli.commands._gen_sample.utils @@ -43,7 +42,6 @@ docker = ">=3.0.0" # used in runway.cfngin.hooks formic2 = "*" # only used in runway.cfngin.hooks.aws_lambda gitpython = "*" igittigitt = ">=2.0.5" -importlib-metadata = {python = "<3.8", version = "*"} jinja2 = ">=2.7" # used in runway.cfngin.blueprints.raw moto = "3.0.5" packaging = "*" # component of setuptools needed for version compare @@ -95,6 +93,7 @@ sphinxcontrib-programoutput = "^0.17" [tool.poetry.group.lint.dependencies] black = "^24.4.2" +ruff = "^0.5.1" [tool.poetry.group.types.dependencies] mypy-boto3 = "^1.16" # importable boto3 type annotations @@ -230,6 +229,118 @@ python_files = ["test_*.py"] python_functions = ["test_*"] testpaths = ["tests"] +[tool.ruff] # https://docs.astral.sh/ruff/settings/#top-level +extend-exclude = [ + "runway/aws_sso_botocore", # NOTE (kyle): ignoring vendored code + "runway/cfngin/hooks/staticsite/auth_at_edge/templates", # TODO (kyle): resolve lint error + "typings", +] +force-exclude = true +line-length = 120 +show-fixes = true +target-version = "py39" # important to set before applying fixes + +[tool.ruff.lint] # https://docs.astral.sh/ruff/settings/#lint +extend-safe-fixes = [ + "UP007", + "UP038", + "UP040", +] +ignore = [ + "ANN101", # Missing type annotation for `self` in method + "ANN102", # Missing type annotation for `cls` in classmethod + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed # TODO (kyle): improve type annotations + "COM812", # Trailing comma missing + "D203", # 1 blank line required before class docstring + "D213", # Multi-line docstring summary should start at the second line + "D215", # Section underline is over-indented + "D403", # First word of the first line should be capitalized + "D406", # Section name should end with a newline + "D407", # Missing dashed underline after section + "D408", # Section underline should be in the line following the section's name + "D409", # Section underline should match the length of its name + "DTZ", # flake8-datetimez # NOTE (kyle): this is fine here + "EM", # flake8-errmsg + "ERA001", # Found commented-out code # NOTE (kyle): incorrectly detects cspell + "FA100", # Missing `from __future__ import annotations`, but uses `typing.Optional` + "FBT001", # Boolean positional arg in function definition + "FBT002", # Boolean default value in function definition + "FBT003", # Boolean positional value in function call + "FIX002", # Line contains TODO + "N818", # Exception name should be named with an Error suffix # TODO (kyle): resolve in next major release + "PERF203", # `try`-`except` within a loop incurs performance overhead + "PGH003", # Use specific rule codes when ignoring type issues # TODO (kyle): resolve this eventually + "RUF012", # TODO (kyle): remove when resolved - https://github.com/astral-sh/ruff/issues/5243 + "S105", # (hardcoded-password-string) Possible hardcoded password + "S106", # (hardcoded-password-func-arg) Possible hardcoded password + "S107", # (hardcoded-password-default) Possible hardcoded password + "S108", # Probable insecure usage of temporary file or directory + "S301", # `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data + "S60", # flake8-bandit # NOTE (kyle): most of these are for subprocess which we don't care about right now + "S604", # Function call with `shell=True` parameter identified # NOTE (kyle): required for runway + "TD003", # Missing issue link on the line following this TODO + "TID252", # Relative imports from parent modules are banned + "TRY", # tryceratops +] +select = ["ALL"] + +[tool.ruff.lint.extend-per-file-ignores] # https://docs.astral.sh/ruff/settings/#lintextend-per-file-ignores +"*.py" = [ + "PYI024", # Use `typing.NamedTuple` instead of `collections.namedtuple` # NOTE (kyle): should only apply to pyi +] +"runway/templates/*" = [ + "N999", # Invalid module name # NOTE (kyle): these are fine here +] +"tests/*" = [ + "PT004", # Fixture does not return anything, add leading underscore + "S101", # Use of `assert` detected # NOTE (kyle): this is fine here + "SLF001", # Private member accessed # NOTE (kyle): fine in tests +] + +[tool.ruff.lint.flake8-annotations] # https://docs.astral.sh/ruff/settings/#lintflake8-annotations +allow-star-arg-any = true + +[tool.ruff.lint.flake8-pytest-style] # https://docs.astral.sh/ruff/settings/#lintflake8-pytest-style +parametrize-names-type = "csv" # TODO (kyle): update tests to remove the need for this + +[tool.ruff.lint.flake8-self] +ignore-names = [ + "_Environ", + "_Hash", + "_session", +] + +[tool.ruff.lint.flake8-type-checking] # https://docs.astral.sh/ruff/settings/#lint_flake8-type-checking_runtime-evaluated-base-classes +runtime-evaluated-base-classes = [ + "pydantic.BaseModel", + "pydantic.BeforeValidator", + "runway.cfngin.hooks.base.HookArgsBaseModel", + "runway.config.models.base.ConfigProperty", + "runway.utils.BaseModel", +] + +[tool.ruff.lint.isort] # https://docs.astral.sh/ruff/settings/#lintisort +known-local-folder = [ + "jwks_rsa", + "shared", + "update_urls", +] +known-third-party = [ + "docker", # NOTE (kyle): the `docker/` directory confuses isort +] + +[tool.ruff.lint.pydocstyle] # https://docs.astral.sh/ruff/settings/#lintpydocstyle +convention = "google" + +[tool.ruff.lint.pylint] # https://docs.astral.sh/ruff/settings/#lintpylint +allow-magic-value-types = ["bytes", "int", "str"] +max-args = 15 +max-returns = 10 +max-statements = 50 + +[tool.ruff.lint.pyupgrade] # https://docs.astral.sh/ruff/settings/#pyupgrade-keep-runtime-typing +keep-runtime-typing = true # TODO (kyle): remove when dropping support for python 3.9 + [tool.tomlsort] all = true in_place = true diff --git a/quickstarts/conduit/update_env_endpoint.py b/quickstarts/conduit/update_env_endpoint.py index 9118376af..03ccfb726 100755 --- a/quickstarts/conduit/update_env_endpoint.py +++ b/quickstarts/conduit/update_env_endpoint.py @@ -22,9 +22,9 @@ def update_api_endpoint(): stack = cloudformation.Stack(STACK_PREFIX + environment) endpoint = [i["OutputValue"] for i in stack.outputs if i["OutputKey"] == "ServiceEndpoint"][0] - with open(environment_file, "r") as stream: + with open(environment_file) as stream: content = stream.read() - content = re.sub(r"api_url: \'.*\'$", f"api_url: '{endpoint}/api'", content, flags=re.M) + content = re.sub(r"api_url: \'.*\'$", f"api_url: '{endpoint}/api'", content, flags=re.MULTILINE) with open(environment_file, "w") as stream: stream.write(content) diff --git a/runway/__init__.py b/runway/__init__.py index b0166c82f..1a381e765 100644 --- a/runway/__init__.py +++ b/runway/__init__.py @@ -1,18 +1,12 @@ """Set package version.""" import logging -import sys +from importlib.metadata import PackageNotFoundError, version # type: ignore -from ._logging import LogLevels, RunwayLogger +from ._logging import LogLevels, RunwayLogger # noqa: F401 logging.setLoggerClass(RunwayLogger) -if sys.version_info < (3, 8): - # importlib.metadata is standard lib for python>=3.8, use backport - from importlib_metadata import PackageNotFoundError, version # type: ignore -else: - from importlib.metadata import PackageNotFoundError, version # type: ignore - try: __version__ = version(__name__) except PackageNotFoundError: diff --git a/runway/_cli/options.py b/runway/_cli/options.py index 8501172fc..2844fa4f6 100644 --- a/runway/_cli/options.py +++ b/runway/_cli/options.py @@ -14,7 +14,7 @@ "--debug", count=True, envvar="DEBUG", - help="Supply once to display Runway debug logs. " "Supply twice to display all debug logs.", + help="Supply once to display Runway debug logs. Supply twice to display all debug logs.", ) deploy_environment = click.option( diff --git a/runway/_cli/utils.py b/runway/_cli/utils.py index 87c568117..ad476a7fc 100644 --- a/runway/_cli/utils.py +++ b/runway/_cli/utils.py @@ -5,8 +5,9 @@ import logging import os import sys +from collections.abc import Iterator from pathlib import Path -from typing import Any, Iterator, List, Optional, Tuple +from typing import Any, List, Optional, Tuple import click import yaml @@ -104,7 +105,7 @@ def get_runway_context( Args: deploy_environment: Object representing the current deploy environment. - Returns + Returns: RunwayContext """ @@ -229,7 +230,7 @@ def select_modules( LOGGER.debug("only one module detected; no selection necessary") if ctx.command.name == "destroy": LOGGER.info( - "Only one module detected; all modules " "automatically selected for deletion." + "Only one module detected; all modules automatically selected for deletion." ) if not click.confirm("Proceed?"): ctx.exit(0) diff --git a/runway/_logging.py b/runway/_logging.py index 329171eb5..15ea7592f 100644 --- a/runway/_logging.py +++ b/runway/_logging.py @@ -1,8 +1,13 @@ """Runway logging.""" +from __future__ import annotations + import logging from enum import IntEnum -from typing import Any, MutableMapping, Tuple, Union +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from collections.abc import MutableMapping class LogLevels(IntEnum): @@ -54,18 +59,20 @@ def __init__( self.prefix = prefix self.prefix_template = prefix_template - def notice(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def notice(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Delegate a notice call to the underlying logger. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.log(LogLevels.NOTICE, msg, *args, **kwargs) def process( - self, msg: Union[Exception, str], kwargs: MutableMapping[str, Any] - ) -> Tuple[str, MutableMapping[str, Any]]: + self, msg: Exception | str, kwargs: MutableMapping[str, Any] + ) -> tuple[str, MutableMapping[str, Any]]: """Process the message to append the prefix. Args: @@ -75,7 +82,7 @@ def process( """ return self.prefix_template.format(prefix=self.prefix, msg=msg), kwargs - def setLevel(self, level: Union[int, str]) -> None: + def setLevel(self, level: int | str) -> None: # noqa: N802 """Set the specified level on the underlying logger. Python 2 backport. @@ -83,20 +90,24 @@ def setLevel(self, level: Union[int, str]) -> None: """ self.logger.setLevel(level) - def success(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def success(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Delegate a success call to the underlying logger. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.log(LogLevels.SUCCESS, msg, *args, **kwargs) - def verbose(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def verbose(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Delegate a verbose call to the underlying logger. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.log(LogLevels.VERBOSE, msg, *args, **kwargs) @@ -105,7 +116,7 @@ def verbose(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None class RunwayLogger(logging.Logger): """Extend built-in logger with additional levels.""" - def __init__(self, name: str, level: Union[int, str] = logging.NOTSET) -> None: + def __init__(self, name: str, level: int | str = logging.NOTSET) -> None: """Instantiate the class. Args: @@ -118,31 +129,37 @@ def __init__(self, name: str, level: Union[int, str] = logging.NOTSET) -> None: logging.addLevelName(LogLevels.NOTICE, LogLevels.NOTICE.name) logging.addLevelName(LogLevels.SUCCESS, LogLevels.SUCCESS.name) - def notice(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def notice(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Log 'msg % args' with severity `NOTICE`. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ if self.isEnabledFor(LogLevels.NOTICE): self._log(LogLevels.NOTICE, msg, args, **kwargs) - def success(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def success(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Log 'msg % args' with severity `SUCCESS`. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ if self.isEnabledFor(LogLevels.SUCCESS): self._log(LogLevels.SUCCESS, msg, args, **kwargs) - def verbose(self, msg: Union[Exception, str], *args: Any, **kwargs: Any) -> None: + def verbose(self, msg: Exception | str, *args: Any, **kwargs: Any) -> None: """Log 'msg % args' with severity `VERBOSE`. Args: msg: String template or exception to use for the log record. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ if self.isEnabledFor(LogLevels.VERBOSE): diff --git a/runway/blueprints/k8s/k8s_workers.py b/runway/blueprints/k8s/k8s_workers.py index c7cfc0c51..0ff146b9c 100755 --- a/runway/blueprints/k8s/k8s_workers.py +++ b/runway/blueprints/k8s/k8s_workers.py @@ -31,7 +31,7 @@ def get_valid_instance_types() -> Any: with gzip.open(gzip_path, "rt", encoding="utf-8") as stream: data = json.load(stream) elif os.path.exists(json_path): - with open(json_path, "r", encoding="utf-8") as stream: + with open(json_path, encoding="utf-8") as stream: data = json.load(stream) else: raise FileNotFoundError("Neither JSON nor gzipped JSON file found.") @@ -45,7 +45,7 @@ class NodeGroup(Blueprint): VARIABLES = { "KeyName": { "type": CFNString, # string to allow it to be unset - "description": "(Optional) EC2 Key Pair to allow SSH " "access to the instances", + "description": "(Optional) EC2 Key Pair to allow SSH access to the instances", "default": "", }, "NodeImageId": { @@ -54,10 +54,10 @@ class NodeGroup(Blueprint): }, "NodeInstanceType": { "type": CFNString, - "description": "EC2 instance type for the node " "instances", + "description": "EC2 instance type for the node instances", "default": "t2.medium", "allowed_values": get_valid_instance_types(), - "constraint_description": "Must be a valid EC2 " "instance type", + "constraint_description": "Must be a valid EC2 instance type", }, "NodeInstanceProfile": { "type": CFNString, @@ -65,12 +65,12 @@ class NodeGroup(Blueprint): }, "NodeAutoScalingGroupMinSize": { "type": CFNNumber, - "description": "Minimum size of Node " "Group ASG.", + "description": "Minimum size of Node Group ASG.", "default": 1, }, "NodeAutoScalingGroupMaxSize": { "type": CFNNumber, - "description": "Maximum size of Node " "Group ASG.", + "description": "Maximum size of Node Group ASG.", "default": 3, }, "NodeVolumeSize": { @@ -95,16 +95,16 @@ class NodeGroup(Blueprint): }, "NodeGroupName": { "type": CFNString, - "description": "Unique identifier for the Node " "Group.", + "description": "Unique identifier for the Node Group.", }, "ClusterControlPlaneSecurityGroup": { "type": EC2SecurityGroupId, - "description": "The security " "group of the " "cluster control " "plane.", + "description": "The security group of the cluster control plane.", }, "VpcId": {"type": EC2VPCId, "description": "The VPC of the worker instances"}, "Subnets": { "type": EC2SubnetIdList, - "description": "The subnets where workers can be " "created.", + "description": "The subnets where workers can be created.", }, "UseDesiredInstanceCount": { "type": CFNString, @@ -117,7 +117,7 @@ def create_template(self) -> None: template = self.template template.set_version("2010-09-09") template.set_description( - "Kubernetes workers via EKS - V1.0.0 " "- compatible with amazon-eks-node-v23+" + "Kubernetes workers via EKS - V1.0.0 - compatible with amazon-eks-node-v23+" ) # Metadata @@ -254,7 +254,7 @@ def create_template(self) -> None: template.add_resource( ec2.SecurityGroupIngress( "ClusterControlPlaneSecurityGroupIngress", - Description="Allow pods to communicate with the cluster API " "Server", + Description="Allow pods to communicate with the cluster API Server", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", diff --git a/runway/blueprints/staticsite/auth_at_edge.py b/runway/blueprints/staticsite/auth_at_edge.py index ee4d442d4..e0fbfa146 100644 --- a/runway/blueprints/staticsite/auth_at_edge.py +++ b/runway/blueprints/staticsite/auth_at_edge.py @@ -44,7 +44,7 @@ class AuthAtEdge(StaticSite): "RedirectPathAuthRefresh": { "type": str, "default": "/refreshauth", - "description": "The URL path that should " "handle the JWT refresh request.", + "description": "The URL path that should handle the JWT refresh request.", }, "NonSPAMode": { "type": bool, diff --git a/runway/blueprints/staticsite/staticsite.py b/runway/blueprints/staticsite/staticsite.py index a49673f10..f7a6f7d29 100755 --- a/runway/blueprints/staticsite/staticsite.py +++ b/runway/blueprints/staticsite/staticsite.py @@ -61,7 +61,7 @@ class StaticSite(Blueprint): "Aliases": { "type": list, "default": [], - "description": "(Optional) Domain aliases the " "distribution", + "description": "(Optional) Domain aliases the distribution", }, "Compress": { "type": bool, @@ -87,7 +87,7 @@ class StaticSite(Blueprint): "RewriteDirectoryIndex": { "type": str, "default": "", - "description": "(Optional) File name to " "append to directory " "requests.", + "description": "(Optional) File name to append to directory requests.", }, "RoleBoundaryArn": { "type": str, @@ -99,17 +99,17 @@ class StaticSite(Blueprint): "WAFWebACL": { "type": str, "default": "", - "description": "(Optional) WAF id to associate with the " "distribution.", + "description": "(Optional) WAF id to associate with the distribution.", }, "custom_error_responses": { "type": list, "default": [], - "description": "(Optional) Custom error " "responses.", + "description": "(Optional) Custom error responses.", }, "lambda_function_associations": { "type": list, "default": [], - "description": "(Optional) Lambda " "function " "associations.", + "description": "(Optional) Lambda function associations.", }, } diff --git a/runway/cfngin/actions/diff.py b/runway/cfngin/actions/diff.py index 523b6fc6d..a9d3bc2e3 100644 --- a/runway/cfngin/actions/diff.py +++ b/runway/cfngin/actions/diff.py @@ -240,7 +240,7 @@ def _diff_stack(self, stack: Stack, **_: Any) -> Status: and "length less than or equal to" in err.response["Error"]["Message"] ): LOGGER.error( - "%s:template is too large to provide directly to the API; " "S3 must be used", + "%s:template is too large to provide directly to the API; S3 must be used", stack.name, ) return SkippedStatus("cfngin_bucket: existing bucket required") diff --git a/runway/cfngin/actions/graph.py b/runway/cfngin/actions/graph.py index 73334faba..ca41cce51 100644 --- a/runway/cfngin/actions/graph.py +++ b/runway/cfngin/actions/graph.py @@ -5,7 +5,8 @@ import json import logging import sys -from typing import TYPE_CHECKING, Any, Iterable, List, TextIO, Tuple, Union +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any, List, TextIO, Tuple, Union from ..plan import merge_graphs from .base import BaseAction diff --git a/runway/cfngin/awscli_yamlhelper.py b/runway/cfngin/awscli_yamlhelper.py index 36de18761..a0010a8bc 100644 --- a/runway/cfngin/awscli_yamlhelper.py +++ b/runway/cfngin/awscli_yamlhelper.py @@ -15,7 +15,8 @@ from __future__ import annotations import json -from typing import Any, Dict, MutableMapping, MutableSequence, cast +from collections.abc import MutableMapping, MutableSequence +from typing import Any, Dict, cast import yaml diff --git a/runway/cfngin/blueprints/base.py b/runway/cfngin/blueprints/base.py index bbf9315a6..0e86e3aa4 100644 --- a/runway/cfngin/blueprints/base.py +++ b/runway/cfngin/blueprints/base.py @@ -144,12 +144,11 @@ def validate_variable_type( raise ValidatorError(var_name, f"{var_type.resource_name}.create", value, exc) from exc elif issubclass(var_type, CFNType): value = CFNParameter(name=var_name, value=value) - else: - if not isinstance(value, var_type): - raise TypeError( - f"Value for variable {var_name} must be of type {var_type}. Actual " - f"type: {type(value)}" - ) + elif not isinstance(value, var_type): + raise TypeError( + f"Value for variable {var_name} must be of type {var_type}. Actual " + f"type: {type(value)}" + ) return value diff --git a/runway/cfngin/blueprints/raw.py b/runway/cfngin/blueprints/raw.py index 0fe4d00e9..cb1f2da65 100644 --- a/runway/cfngin/blueprints/raw.py +++ b/runway/cfngin/blueprints/raw.py @@ -176,7 +176,7 @@ def rendered(self) -> str: ) ) else: - with open(template_path, "r", encoding="utf-8") as template: + with open(template_path, encoding="utf-8") as template: self._rendered = template.read() else: raise InvalidConfig(f"Could not find template {self.raw_template_path}") diff --git a/runway/cfngin/blueprints/testutil.py b/runway/cfngin/blueprints/testutil.py index 7fdac8574..301967660 100644 --- a/runway/cfngin/blueprints/testutil.py +++ b/runway/cfngin/blueprints/testutil.py @@ -6,9 +6,10 @@ import json import os.path import unittest +from collections.abc import Iterator from glob import glob from pathlib import Path -from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Type, cast +from typing import TYPE_CHECKING, Any, List, Optional, Type, cast from ...config import CfnginConfig from ...context import CfnginContext diff --git a/runway/cfngin/blueprints/variables/types.py b/runway/cfngin/blueprints/variables/types.py index b4af01130..ed1a4ce68 100644 --- a/runway/cfngin/blueprints/variables/types.py +++ b/runway/cfngin/blueprints/variables/types.py @@ -121,23 +121,20 @@ def create( raise ValueError("Resources must be specified as a dict of title to parameters") if not self._many and len(value) > 1: raise ValueError( - "Only one resource can be provided for this " "TroposphereType variable" + "Only one resource can be provided for this TroposphereType variable" ) result = [self._type.from_dict(title, v) for title, v in value.items()] + elif self._many and isinstance(value, list): + result = [self._type.from_dict(None, v) for v in value] + elif not isinstance(value, dict): + raise ValueError( + "TroposphereType for a single non-resource" + "type must be specified as a dict of " + "parameters" + ) else: - # Our type is for properties, not a resource, so don't use - # titles - if self._many and isinstance(value, list): - result = [self._type.from_dict(None, v) for v in value] - elif not isinstance(value, dict): - raise ValueError( - "TroposphereType for a single non-resource" - "type must be specified as a dict of " - "parameters" - ) - else: - result = [self._type.from_dict(None, value)] + result = [self._type.from_dict(None, value)] if self._validate: for v in result: diff --git a/runway/cfngin/cfngin.py b/runway/cfngin/cfngin.py index 0ac2b9a81..045c01949 100644 --- a/runway/cfngin/cfngin.py +++ b/runway/cfngin/cfngin.py @@ -94,7 +94,7 @@ def env_file(self) -> MutableMap: if os.path.isfile(file_path): LOGGER.info("found environment file: %s", file_path) self._env_file_name = file_path - with open(file_path, "r", encoding="utf-8") as file_: + with open(file_path, encoding="utf-8") as file_: result.update(parse_environment(file_.read())) return MutableMap(**result) diff --git a/runway/cfngin/dag/__init__.py b/runway/cfngin/dag/__init__.py index aad5eed46..2e1b897a6 100644 --- a/runway/cfngin/dag/__init__.py +++ b/runway/cfngin/dag/__init__.py @@ -5,6 +5,8 @@ import collections import collections.abc import logging +from collections import OrderedDict +from collections.abc import Iterable from copy import copy, deepcopy from threading import Thread from typing import ( @@ -12,9 +14,7 @@ Any, Callable, Dict, - Iterable, List, - OrderedDict, Set, Tuple, Union, @@ -221,10 +221,9 @@ def rename_edges(self, old_node_name: str, new_node_name: str) -> None: graph[new_node_name] = copy(edges) del graph[old_node_name] - else: - if old_node_name in edges: - edges.remove(old_node_name) - edges.add(new_node_name) + elif old_node_name in edges: + edges.remove(old_node_name) + edges.add(new_node_name) def predecessors(self, node: str) -> List[str]: """Return a list of all immediate predecessors of the given node. @@ -359,7 +358,7 @@ def topological_sort(self) -> List[str]: for val in graph[node]: in_degree[val] += 1 - queue: "collections.deque[str]" = collections.deque() + queue: collections.deque[str] = collections.deque() for node, value in in_degree.items(): if value == 0: queue.appendleft(node) diff --git a/runway/cfngin/exceptions.py b/runway/cfngin/exceptions.py index bfad0b60d..10d7e889f 100644 --- a/runway/cfngin/exceptions.py +++ b/runway/cfngin/exceptions.py @@ -619,7 +619,7 @@ def __init__( ) if self.exception: - self.message += f": {self.exception.__class__.__name__}: {str(self.exception)}" + self.message += f": {self.exception.__class__.__name__}: {self.exception!s}" super().__init__() def __str__(self): diff --git a/runway/cfngin/hooks/acm.py b/runway/cfngin/hooks/acm.py index 9be493ef5..b4edeb0a1 100644 --- a/runway/cfngin/hooks/acm.py +++ b/runway/cfngin/hooks/acm.py @@ -103,12 +103,10 @@ def __init__(self, context: CfnginContext, provider: Provider, **kwargs: Any) -> self.stack_name = self.args.stack_name or self.args.domain.replace(".", "-") self.properties = MutableMap( - **{ - "DomainName": self.args.domain, - "SubjectAlternativeNames": self.args.alt_names, - "Tags": self.tags, - "ValidationMethod": "DNS", - } + DomainName=self.args.domain, + SubjectAlternativeNames=self.args.alt_names, + Tags=self.tags, + ValidationMethod="DNS", ) self.blueprint = self._create_blueprint() @@ -165,7 +163,7 @@ def domain_changed(self) -> bool: return False if self.args.domain != self.provider.get_outputs(self.stack.fqn)["DomainName"]: LOGGER.error( - '"domain" can\'t be changed for existing ' 'certificate in stack "%s"', + '"domain" can\'t be changed for existing certificate in stack "%s"', self.stack.fqn, ) return True @@ -228,7 +226,7 @@ def get_validation_record( ] except KeyError: LOGGER.debug( - "waiting for DomainValidationOptions to become " "available for the certificate..." + "waiting for DomainValidationOptions to become available for the certificate..." ) time.sleep(interval) return self.get_validation_record(cert_arn=cert_arn, interval=interval, status=status) diff --git a/runway/cfngin/hooks/aws_lambda.py b/runway/cfngin/hooks/aws_lambda.py index 8624a5a0b..9b7d9a7ff 100644 --- a/runway/cfngin/hooks/aws_lambda.py +++ b/runway/cfngin/hooks/aws_lambda.py @@ -12,6 +12,7 @@ import subprocess import sys import tempfile +from collections.abc import Iterable, Iterator from io import BytesIO as StringIO from pathlib import Path from shutil import copyfile @@ -19,8 +20,6 @@ TYPE_CHECKING, Any, Dict, - Iterable, - Iterator, List, Optional, Tuple, @@ -209,7 +208,7 @@ def _calculate_hash(files: Iterable[str], root: str) -> str: if not chunk: break file_hash.update(chunk) - file_hash.update("\0".encode()) + file_hash.update(b"\0") return file_hash.hexdigest() @@ -288,9 +287,9 @@ def _zip_from_file_patterns( def handle_requirements( package_root: str, dest_path: str, - requirements: Dict[str, bool], + requirements: dict[str, bool], pipenv_timeout: int = 300, - python_path: Optional[str] = None, + python_path: str | None = None, use_pipenv: bool = False, ) -> str: """Use the correct requirements file. @@ -322,7 +321,7 @@ def handle_requirements( ) if requirements["requirements.txt"]: LOGGER.info("using requirements.txt for dependencies") - return os.path.join(dest_path, "requirements.txt") + return os.path.join(dest_path, "requirements.txt") # noqa: PTH118 if requirements["Pipfile"] or requirements["Pipfile.lock"]: LOGGER.info("using pipenv for dependencies") return _handle_use_pipenv( @@ -409,7 +408,7 @@ def dockerized_pip( # exactly one of these is needed. converting to bool will give us a # 'False' (0) for 'None' and 'True' (1) for anything else. raise InvalidDockerizePipConfiguration( - "exactly only one of [docker_file, docker_file, runtime] must be " "provided" + "exactly only one of [docker_file, docker_file, runtime] must be provided" ) if not client: @@ -495,7 +494,7 @@ def _pip_has_no_color_option(python_path: str) -> bool: [ python_path, "-c", - "from __future__ import print_function;" "import pip;" "print(pip.__version__)", + "from __future__ import print_function;import pip;print(pip.__version__)", ] ) if isinstance(pip_version_string, bytes): # type: ignore diff --git a/runway/cfngin/hooks/awslambda/base_classes.py b/runway/cfngin/hooks/awslambda/base_classes.py index 6cf217d74..25451638c 100644 --- a/runway/cfngin/hooks/awslambda/base_classes.py +++ b/runway/cfngin/hooks/awslambda/base_classes.py @@ -232,7 +232,7 @@ def project_root(self) -> Path: parents = list(self.args.source_code.parents) if top_lvl_dir not in parents: LOGGER.info( - "ignoring project directory; " "source code located outside of project directory" + "ignoring project directory; source code located outside of project directory" ) return self.args.source_code diff --git a/runway/cfngin/hooks/awslambda/deployment_package.py b/runway/cfngin/hooks/awslambda/deployment_package.py index f467a1158..3aad41c28 100644 --- a/runway/cfngin/hooks/awslambda/deployment_package.py +++ b/runway/cfngin/hooks/awslambda/deployment_package.py @@ -9,13 +9,14 @@ import stat import sys import zipfile +from collections.abc import Iterator from contextlib import suppress from typing import ( TYPE_CHECKING, ClassVar, Dict, + Final, Generic, - Iterator, List, Optional, TypeVar, @@ -25,7 +26,7 @@ ) from urllib.parse import urlencode -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....compat import cached_property from ....core.providers.aws.s3 import Bucket diff --git a/runway/cfngin/hooks/awslambda/docker.py b/runway/cfngin/hooks/awslambda/docker.py index 964ef2f10..0fa31bc7c 100644 --- a/runway/cfngin/hooks/awslambda/docker.py +++ b/runway/cfngin/hooks/awslambda/docker.py @@ -5,12 +5,12 @@ import logging import os import platform +from collections.abc import Iterator from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, - Iterator, List, Optional, Type, diff --git a/runway/cfngin/hooks/awslambda/source_code.py b/runway/cfngin/hooks/awslambda/source_code.py index e815c79ca..0dc1d7c43 100644 --- a/runway/cfngin/hooks/awslambda/source_code.py +++ b/runway/cfngin/hooks/awslambda/source_code.py @@ -4,8 +4,9 @@ import hashlib import logging +from collections.abc import Iterator, Sequence from pathlib import Path -from typing import TYPE_CHECKING, Iterator, List, Optional, Sequence, Union +from typing import TYPE_CHECKING, List, Optional, Union import igittigitt diff --git a/runway/cfngin/hooks/docker/hook_data.py b/runway/cfngin/hooks/docker/hook_data.py index 345c5a623..7c8e45908 100644 --- a/runway/cfngin/hooks/docker/hook_data.py +++ b/runway/cfngin/hooks/docker/hook_data.py @@ -17,7 +17,7 @@ class DockerHookData(MutableMap): """Docker hook_data object.""" - image: Optional["DockerImage"] = None + image: Optional[DockerImage] = None @cached_property def client(self) -> DockerClient: diff --git a/runway/cfngin/hooks/docker/image/_build.py b/runway/cfngin/hooks/docker/image/_build.py index 8b8aacfcf..daa62c751 100644 --- a/runway/cfngin/hooks/docker/image/_build.py +++ b/runway/cfngin/hooks/docker/image/_build.py @@ -7,12 +7,12 @@ from __future__ import annotations import logging +from collections.abc import Iterator from pathlib import Path from typing import ( TYPE_CHECKING, Any, Dict, - Iterator, List, Optional, Tuple, diff --git a/runway/cfngin/hooks/ecr/_purge_repository.py b/runway/cfngin/hooks/ecr/_purge_repository.py index 513416b36..ff13e9a74 100644 --- a/runway/cfngin/hooks/ecr/_purge_repository.py +++ b/runway/cfngin/hooks/ecr/_purge_repository.py @@ -30,7 +30,7 @@ def delete_ecr_images( ) -> None: """Delete images from an ECR repository.""" response = client.batch_delete_image(repositoryName=repository_name, imageIds=image_ids) - if "failures" in response and response["failures"]: + if response.get("failures"): for msg in response["failures"]: LOGGER.info( "failed to delete image %s: (%s) %s", diff --git a/runway/cfngin/hooks/keypair.py b/runway/cfngin/hooks/keypair.py index 12fdc7213..18d6acdc7 100644 --- a/runway/cfngin/hooks/keypair.py +++ b/runway/cfngin/hooks/keypair.py @@ -255,9 +255,7 @@ def ensure_keypair_exists(context: CfnginContext, *__args: Any, **kwargs: Any) - args = EnsureKeypairExistsHookArgs.parse_obj(kwargs) if args.public_key_path and args.ssm_parameter_name: - LOGGER.error( - "public_key_path and ssm_parameter_name cannot be " "specified at the same time" - ) + LOGGER.error("public_key_path and ssm_parameter_name cannot be specified at the same time") return {} session = context.get_session() diff --git a/runway/cfngin/hooks/ssm/parameter.py b/runway/cfngin/hooks/ssm/parameter.py index ae02a8455..d837285e4 100644 --- a/runway/cfngin/hooks/ssm/parameter.py +++ b/runway/cfngin/hooks/ssm/parameter.py @@ -26,10 +26,11 @@ LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) + # PutParameterResultTypeDef but without metadata -_PutParameterResultTypeDef = TypedDict( - "_PutParameterResultTypeDef", {"Tier": ParameterTierType, "Version": int} -) +class _PutParameterResultTypeDef(TypedDict): + Tier: ParameterTierType + Version: int class ArgsDataModel(BaseModel): diff --git a/runway/cfngin/hooks/staticsite/upload_staticsite.py b/runway/cfngin/hooks/staticsite/upload_staticsite.py index fda541029..cdede12d2 100644 --- a/runway/cfngin/hooks/staticsite/upload_staticsite.py +++ b/runway/cfngin/hooks/staticsite/upload_staticsite.py @@ -312,7 +312,7 @@ def calculate_hash_of_extra_files( if not chunk: break file_hash.update(chunk) - file_hash.update("\0".encode()) + file_hash.update(b"\0") return file_hash.hexdigest() diff --git a/runway/cfngin/hooks/staticsite/utils.py b/runway/cfngin/hooks/staticsite/utils.py index 1f8ea2e22..c966234da 100644 --- a/runway/cfngin/hooks/staticsite/utils.py +++ b/runway/cfngin/hooks/staticsite/utils.py @@ -5,8 +5,9 @@ import hashlib import logging import os +from collections.abc import Iterable from pathlib import Path -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Union, cast +from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast import igittigitt diff --git a/runway/cfngin/hooks/utils.py b/runway/cfngin/hooks/utils.py index 3fe396aba..b064032fe 100644 --- a/runway/cfngin/hooks/utils.py +++ b/runway/cfngin/hooks/utils.py @@ -131,17 +131,16 @@ def handle_hooks( LOGGER.error("required hook %s failed; return value: %s", hook.path, result) sys.exit(1) LOGGER.warning("non-required hook %s failed; return value: %s", hook.path, result) - else: - if isinstance(result, (collections.abc.Mapping, pydantic.BaseModel)): - if hook.data_key: - LOGGER.debug( - "adding result for hook %s to context in data_key %s", - hook.path, - hook.data_key, - ) - context.set_hook_data(hook.data_key, result) - else: - LOGGER.debug( - "hook %s returned result data but no data key set; ignoring", - hook.path, - ) + elif isinstance(result, (collections.abc.Mapping, pydantic.BaseModel)): + if hook.data_key: + LOGGER.debug( + "adding result for hook %s to context in data_key %s", + hook.path, + hook.data_key, + ) + context.set_hook_data(hook.data_key, result) + else: + LOGGER.debug( + "hook %s returned result data but no data key set; ignoring", + hook.path, + ) diff --git a/runway/cfngin/logger/__init__.py b/runway/cfngin/logger/__init__.py index 81277f541..4606bde13 100644 --- a/runway/cfngin/logger/__init__.py +++ b/runway/cfngin/logger/__init__.py @@ -5,7 +5,7 @@ from typing import Any, Dict, Optional DEBUG_FORMAT = ( - "[%(asctime)s] %(levelname)s %(threadName)s " "%(name)s:%(lineno)d(%(funcName)s): %(message)s" + "[%(asctime)s] %(levelname)s %(threadName)s %(name)s:%(lineno)d(%(funcName)s): %(message)s" ) INFO_FORMAT = "[%(asctime)s] %(message)s" COLOR_FORMAT = "[%(asctime)s] \033[%(color)sm%(message)s\033[39m" diff --git a/runway/cfngin/lookups/handlers/ami.py b/runway/cfngin/lookups/handlers/ami.py index c00ddc8d3..506684511 100644 --- a/runway/cfngin/lookups/handlers/ami.py +++ b/runway/cfngin/lookups/handlers/ami.py @@ -5,10 +5,10 @@ import operator import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Tuple, Union from pydantic import validator -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....utils import BaseModel diff --git a/runway/cfngin/lookups/handlers/awslambda.py b/runway/cfngin/lookups/handlers/awslambda.py index c8890dd15..7ac1705d1 100644 --- a/runway/cfngin/lookups/handlers/awslambda.py +++ b/runway/cfngin/lookups/handlers/awslambda.py @@ -13,11 +13,11 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Final, List, Optional, Union, cast from pydantic import ValidationError from troposphere.awslambda import Code, Content -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....utils import load_object_from_string diff --git a/runway/cfngin/lookups/handlers/default.py b/runway/cfngin/lookups/handlers/default.py index d9e495952..2cd1a39ee 100644 --- a/runway/cfngin/lookups/handlers/default.py +++ b/runway/cfngin/lookups/handlers/default.py @@ -3,9 +3,9 @@ # pyright: reportIncompatibleMethodOverride=none from __future__ import annotations -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Final, Optional -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler diff --git a/runway/cfngin/lookups/handlers/dynamodb.py b/runway/cfngin/lookups/handlers/dynamodb.py index 3a1f6af5d..096087681 100644 --- a/runway/cfngin/lookups/handlers/dynamodb.py +++ b/runway/cfngin/lookups/handlers/dynamodb.py @@ -4,10 +4,10 @@ from __future__ import annotations import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Dict, Final, List, Optional, Tuple, cast from botocore.exceptions import ClientError -from typing_extensions import Final, Literal, TypedDict +from typing_extensions import Literal, TypedDict from ....lookups.handlers.base import LookupHandler from ....utils import BaseModel diff --git a/runway/cfngin/lookups/handlers/envvar.py b/runway/cfngin/lookups/handlers/envvar.py index a359b17a2..3c93ef447 100644 --- a/runway/cfngin/lookups/handlers/envvar.py +++ b/runway/cfngin/lookups/handlers/envvar.py @@ -3,9 +3,9 @@ # pyright: reportIncompatibleMethodOverride=none import logging import os -from typing import Any +from typing import Any, Final -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ...utils import read_value_from_path diff --git a/runway/cfngin/lookups/handlers/file.py b/runway/cfngin/lookups/handlers/file.py index 28f2f1f9d..c847fa090 100644 --- a/runway/cfngin/lookups/handlers/file.py +++ b/runway/cfngin/lookups/handlers/file.py @@ -7,12 +7,13 @@ import collections.abc import json import re -from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, overload +from collections.abc import Mapping, Sequence +from typing import Any, Callable, Dict, Final, List, Tuple, Union, overload import yaml from pydantic import validator from troposphere import Base64, GenericHelperFn -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....utils import BaseModel diff --git a/runway/cfngin/lookups/handlers/hook_data.py b/runway/cfngin/lookups/handlers/hook_data.py index 0b86047b6..57b8dbb02 100644 --- a/runway/cfngin/lookups/handlers/hook_data.py +++ b/runway/cfngin/lookups/handlers/hook_data.py @@ -4,10 +4,10 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Final from troposphere import BaseAWSObject -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....utils import MutableMap diff --git a/runway/cfngin/lookups/handlers/kms.py b/runway/cfngin/lookups/handlers/kms.py index e8d6fac10..f49f00a46 100644 --- a/runway/cfngin/lookups/handlers/kms.py +++ b/runway/cfngin/lookups/handlers/kms.py @@ -5,9 +5,9 @@ import codecs import logging -from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Final, Tuple, Union, cast -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....utils import DOC_SITE diff --git a/runway/cfngin/lookups/handlers/output.py b/runway/cfngin/lookups/handlers/output.py index db2b49167..ed8cbaf40 100644 --- a/runway/cfngin/lookups/handlers/output.py +++ b/runway/cfngin/lookups/handlers/output.py @@ -5,9 +5,9 @@ import logging import re -from typing import TYPE_CHECKING, Any, Dict, NamedTuple, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Final, NamedTuple, Set, Tuple -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....exceptions import OutputDoesNotExist from ....lookups.handlers.base import LookupHandler diff --git a/runway/cfngin/lookups/handlers/rxref.py b/runway/cfngin/lookups/handlers/rxref.py index 4d85c64a6..b3e79947e 100644 --- a/runway/cfngin/lookups/handlers/rxref.py +++ b/runway/cfngin/lookups/handlers/rxref.py @@ -4,9 +4,9 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, Dict, Tuple +from typing import TYPE_CHECKING, Any, Dict, Final, Tuple -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from ....lookups.handlers.cfn import CfnLookup diff --git a/runway/cfngin/lookups/handlers/split.py b/runway/cfngin/lookups/handlers/split.py index 98c0637fb..c9d08f161 100644 --- a/runway/cfngin/lookups/handlers/split.py +++ b/runway/cfngin/lookups/handlers/split.py @@ -1,9 +1,9 @@ """Split lookup.""" # pyright: reportIncompatibleMethodOverride=none -from typing import Any, List +from typing import Any, Final, List -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler diff --git a/runway/cfngin/lookups/handlers/xref.py b/runway/cfngin/lookups/handlers/xref.py index 70f06f71a..f7c6db192 100644 --- a/runway/cfngin/lookups/handlers/xref.py +++ b/runway/cfngin/lookups/handlers/xref.py @@ -4,9 +4,9 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Final -from typing_extensions import Final, Literal +from typing_extensions import Literal from ....lookups.handlers.base import LookupHandler from .output import deconstruct diff --git a/runway/cfngin/plan.py b/runway/cfngin/plan.py index 88f9e848d..ec8b256fc 100644 --- a/runway/cfngin/plan.py +++ b/runway/cfngin/plan.py @@ -8,6 +8,7 @@ import threading import time import uuid +from collections import OrderedDict from typing import ( TYPE_CHECKING, Any, @@ -16,7 +17,6 @@ List, NoReturn, Optional, - OrderedDict, Set, TypeVar, Union, diff --git a/runway/cfngin/providers/aws/default.py b/runway/cfngin/providers/aws/default.py index de6ac14a4..93c4ad71d 100644 --- a/runway/cfngin/providers/aws/default.py +++ b/runway/cfngin/providers/aws/default.py @@ -7,12 +7,12 @@ import sys import threading import time +from collections.abc import Iterable from typing import ( TYPE_CHECKING, Any, Callable, Dict, - Iterable, List, Optional, Set, @@ -950,7 +950,7 @@ def create_stack( self.cloudformation.create_stack(**args) except botocore.exceptions.ClientError as err: if err.response["Error"]["Message"] == ( - "TemplateURL must reference a valid S3 object to which you " "have access." + "TemplateURL must reference a valid S3 object to which you have access." ): s3_fallback( fqn, @@ -1035,7 +1035,7 @@ def prepare_stack_for_update(self, stack: StackTypeDef, tags: List[TagTypeDef]) raise exceptions.StackUpdateBadStatus( stack_name, stack_status, - "Tags differ from current configuration, possibly not created " "with CFNgin", + "Tags differ from current configuration, possibly not created with CFNgin", ) if self.interactive: diff --git a/runway/cfngin/status.py b/runway/cfngin/status.py index 456a6c153..c7f00dc9b 100644 --- a/runway/cfngin/status.py +++ b/runway/cfngin/status.py @@ -46,11 +46,11 @@ def _comparison(self, operator_: Callable[[Any, Any], bool], other: Any) -> bool return operator_(self.code, other.code) return NotImplemented - def __eq__(self, other: Any) -> bool: + def __eq__(self, other: object) -> bool: """Compare if self is equal to another object.""" return self._comparison(operator.eq, other) - def __ne__(self, other: Any) -> bool: + def __ne__(self, other: object) -> bool: """Compare if self is not equal to another object.""" return self._comparison(operator.ne, other) diff --git a/runway/cfngin/utils.py b/runway/cfngin/utils.py index 0eb02d417..3865a6f06 100644 --- a/runway/cfngin/utils.py +++ b/runway/cfngin/utils.py @@ -14,16 +14,16 @@ import tempfile import uuid import zipfile +from collections import OrderedDict +from collections.abc import Iterator from pathlib import Path from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, - Iterator, List, Optional, - OrderedDict, Type, Union, cast, @@ -492,7 +492,7 @@ def ensure_s3_bucket( return if err.response["Error"]["Message"] == "Forbidden": LOGGER.exception( - "Access denied for bucket %s. Did you remember " "to use a globally unique name?", + "Access denied for bucket %s. Did you remember to use a globally unique name?", bucket_name, ) elif err.response["Error"]["Message"] != "Not Found": @@ -760,7 +760,7 @@ def fetch_s3_package(self, config: S3CfnginPackageSourceDefinitionModel) -> None ) extractor.extract(tmp_package_path) LOGGER.debug( - "moving extracted package directory %s to the " "CFNgin cache at %s", + "moving extracted package directory %s to the CFNgin cache at %s", dir_name, self.package_cache_dir, ) @@ -811,7 +811,7 @@ def fetch_git_package(self, config: GitCfnginPackageSourceDefinitionModel) -> No shutil.rmtree(tmp_dir) else: LOGGER.debug( - "remote repo %s appears to have been previously " "cloned to %s; download skipped", + "remote repo %s appears to have been previously cloned to %s; download skipped", config.uri, cached_dir_path, ) diff --git a/runway/compat.py b/runway/compat.py index 038e76ed2..360443eff 100644 --- a/runway/compat.py +++ b/runway/compat.py @@ -1,26 +1,19 @@ """Python dependency compatibility handling.""" import sys -from typing import Iterable - -if sys.version_info < (3, 8): # 3.7 - import shlex - - from backports.cached_property import cached_property - from importlib_metadata import PackageNotFoundError, version - - def shlex_join(split_command: Iterable[str]) -> str: - """Backport of :meth:`shlex.join`.""" - return " ".join(shlex.quote(arg) for arg in split_command) +from functools import cached_property +from importlib.metadata import PackageNotFoundError, version +from shlex import join as shlex_join +if sys.version_info < (3, 11): + from typing_extensions import Self else: - from functools import cached_property - from importlib.metadata import PackageNotFoundError, version - from shlex import join as shlex_join + from typing import Self __all__ = [ - "PackageNotFoundError", - "cached_property", - "shlex_join", - "version", + "PackageNotFoundError", # TODO (kyle): remove in next major release + "Self", + "cached_property", # TODO (kyle): remove in next major release + "shlex_join", # TODO (kyle): remove in next major release + "version", # TODO (kyle): remove in next major release ] diff --git a/runway/config/__init__.py b/runway/config/__init__.py index c3fe0c980..802f636e1 100644 --- a/runway/config/__init__.py +++ b/runway/config/__init__.py @@ -5,6 +5,7 @@ import logging import re import sys +from collections.abc import Mapping, MutableMapping from pathlib import Path from string import Template from typing import ( @@ -13,8 +14,6 @@ Any, Dict, List, - Mapping, - MutableMapping, Optional, Union, cast, diff --git a/runway/config/models/runway/__init__.py b/runway/config/models/runway/__init__.py index b9a4a08e7..fd5fa9e94 100644 --- a/runway/config/models/runway/__init__.py +++ b/runway/config/models/runway/__init__.py @@ -4,13 +4,13 @@ import locale import logging +from collections.abc import Generator from pathlib import Path from typing import ( TYPE_CHECKING, Any, Callable, Dict, - Generator, List, Optional, Type, diff --git a/runway/constants.py b/runway/constants.py index 12c53db5d..a8a4c7693 100644 --- a/runway/constants.py +++ b/runway/constants.py @@ -1,8 +1,10 @@ """Runway constants.""" -from typing import Any, Dict +from __future__ import annotations -BOTO3_CREDENTIAL_CACHE: Dict[str, Any] = {} +from typing import Any + +BOTO3_CREDENTIAL_CACHE: dict[str, Any] = {} """A global credential cache that can be shared among boto3 sessions. This is inherently threadsafe thanks to the GIL. (https://docs.python.org/3/glossary.html#term-global-interpreter-lock) diff --git a/runway/context/_cfngin.py b/runway/context/_cfngin.py index d642ffdc2..1e2322299 100644 --- a/runway/context/_cfngin.py +++ b/runway/context/_cfngin.py @@ -5,8 +5,9 @@ import collections.abc import json import logging +from collections.abc import MutableMapping from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, MutableMapping, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast from pydantic import BaseModel @@ -233,7 +234,7 @@ def persistent_graph(self) -> Optional[Graph]: ) except self.s3_client.exceptions.NoSuchKey: self.logger.info( - "persistent graph object does not exist in s3; " "creating one now..." + "persistent graph object does not exist in s3; creating one now..." ) self.s3_client.put_object( Body=content.encode(), @@ -316,7 +317,7 @@ def upload_to_s3(self) -> bool: # explicitly set to an empty string. if self.config.cfngin_bucket == "": self.logger.debug( - "not uploading to s3; cfngin_bucket " "is explicitly set to an empty string" + "not uploading to s3; cfngin_bucket is explicitly set to an empty string" ) return False diff --git a/runway/core/providers/aws/_assume_role.py b/runway/core/providers/aws/_assume_role.py index a8b5c02d2..1c89a0eb9 100644 --- a/runway/core/providers/aws/_assume_role.py +++ b/runway/core/providers/aws/_assume_role.py @@ -19,7 +19,11 @@ LOGGER = cast("RunwayLogger", logging.getLogger(__name__.replace("._", "."))) -_KwargsTypeDef = TypedDict("_KwargsTypeDef", DurationSeconds=int, RoleArn=str, RoleSessionName=str) + +class _KwargsTypeDef(TypedDict): + DurationSeconds: int + RoleArn: str + RoleSessionName: str class AssumeRole(ContextManager["AssumeRole"]): diff --git a/runway/core/providers/aws/s3/_helpers/action_architecture.py b/runway/core/providers/aws/s3/_helpers/action_architecture.py index a556789fd..0c388933e 100644 --- a/runway/core/providers/aws/s3/_helpers/action_architecture.py +++ b/runway/core/providers/aws/s3/_helpers/action_architecture.py @@ -170,7 +170,7 @@ def run(self): "s3local": "download", "s3": "delete", } - result_queue: "Queue[Any]" = Queue() + result_queue: Queue[Any] = Queue() operation_name = action_translation[paths_type] file_generator = FileGenerator( diff --git a/runway/core/providers/aws/s3/_helpers/comparator.py b/runway/core/providers/aws/s3/_helpers/comparator.py index 3cc2c820a..909668f8d 100644 --- a/runway/core/providers/aws/s3/_helpers/comparator.py +++ b/runway/core/providers/aws/s3/_helpers/comparator.py @@ -8,7 +8,8 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Generator, Iterator, Optional, cast +from collections.abc import Generator, Iterator +from typing import TYPE_CHECKING, Optional, cast if TYPE_CHECKING: from .file_generator import FileStats diff --git a/runway/core/providers/aws/s3/_helpers/file_generator.py b/runway/core/providers/aws/s3/_helpers/file_generator.py index 6eb3c7cd2..3afa4b08c 100644 --- a/runway/core/providers/aws/s3/_helpers/file_generator.py +++ b/runway/core/providers/aws/s3/_helpers/file_generator.py @@ -10,6 +10,7 @@ import datetime import os import stat +from collections.abc import Generator from copy import deepcopy from dataclasses import dataclass from pathlib import Path @@ -18,7 +19,6 @@ TYPE_CHECKING, Any, Dict, - Generator, List, Optional, Tuple, @@ -62,7 +62,7 @@ def is_readable(path: Path) -> bool: return False else: try: - with open(path, "r", encoding="utf-8"): + with open(path, encoding="utf-8"): pass except OSError: return False @@ -92,18 +92,16 @@ def is_special_file(path: Path) -> bool: return False -FileStatsDict = TypedDict( - "FileStatsDict", - src="AnyPath", - compare_key=Optional[str], - dest_type=Optional["SupportedPathType"], - dest=Optional[str], - last_update=datetime.datetime, - operation_name=Optional[str], - response_data=Optional[Union["HeadObjectOutputTypeDef", "ObjectTypeDef"]], - size=Optional[int], - src_type=Optional["SupportedPathType"], -) +class FileStatsDict(TypedDict): + src: AnyPath + compare_key: Optional[str] + dest_type: Optional[SupportedPathType] + dest: Optional[str] + last_update: datetime.datetime + operation_name: Optional[str] + response_data: Optional[Union[HeadObjectOutputTypeDef, ObjectTypeDef]] + size: Optional[int] + src_type: Optional[SupportedPathType] @dataclass @@ -140,7 +138,9 @@ def dict(self) -> FileStatsDict: return deepcopy(cast(FileStatsDict, self.__dict__)) -_LastModifiedAndSize = TypedDict("_LastModifiedAndSize", Size=int, LastModified=datetime.datetime) +class _LastModifiedAndSize(TypedDict): + Size: int + LastModified: datetime.datetime class FileGenerator: @@ -151,7 +151,7 @@ class FileGenerator: """ - result_queue: "Queue[Any]" + result_queue: Queue[Any] def __init__( self, @@ -159,7 +159,7 @@ def __init__( operation_name: str, follow_symlinks: bool = True, page_size: Optional[int] = None, - result_queue: Optional["Queue[Any]"] = None, + result_queue: Optional[Queue[Any]] = None, request_parameters: Any = None, ): """Instantiate class. @@ -282,7 +282,7 @@ def _validate_update_time( if update_time is None: warning = create_warning( path=path, - error_message="File has an invalid timestamp. Passing epoch " "time as timestamp.", + error_message="File has an invalid timestamp. Passing epoch time as timestamp.", skip_file=False, ) self.result_queue.put(warning) @@ -323,7 +323,7 @@ def triggers_warning(self, path: Path) -> bool: if is_special_file(path): warning = create_warning( path, - ("File is character special device, " "block special device, FIFO, or socket."), + ("File is character special device, block special device, FIFO, or socket."), ) self.result_queue.put(warning) return True diff --git a/runway/core/providers/aws/s3/_helpers/file_info_builder.py b/runway/core/providers/aws/s3/_helpers/file_info_builder.py index e60907d00..3858955aa 100644 --- a/runway/core/providers/aws/s3/_helpers/file_info_builder.py +++ b/runway/core/providers/aws/s3/_helpers/file_info_builder.py @@ -7,7 +7,8 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Generator, Iterable, Optional +from collections.abc import Generator, Iterable +from typing import TYPE_CHECKING, Any, Optional from .file_info import FileInfo diff --git a/runway/core/providers/aws/s3/_helpers/filters.py b/runway/core/providers/aws/s3/_helpers/filters.py index 6ba5dfe59..efb569455 100644 --- a/runway/core/providers/aws/s3/_helpers/filters.py +++ b/runway/core/providers/aws/s3/_helpers/filters.py @@ -10,12 +10,10 @@ import fnmatch import logging import os +from collections.abc import Generator, Iterable, Iterator from typing import ( TYPE_CHECKING, ClassVar, - Generator, - Iterable, - Iterator, List, NamedTuple, Optional, @@ -36,8 +34,16 @@ _FilterType = Literal["exclude", "include"] -FileStatus = NamedTuple("FileStatus", [("file_stats", "FileStats"), ("include", bool)]) -FilterPattern = NamedTuple("FilterPattern", [("type", _FilterType), ("pattern", str)]) + + +class FileStatus(NamedTuple): + file_stats: FileStats + include: bool + + +class FilterPattern(NamedTuple): + type: _FilterType + pattern: str class Filter: diff --git a/runway/core/providers/aws/s3/_helpers/format_path.py b/runway/core/providers/aws/s3/_helpers/format_path.py index 0a7196ab2..26025caff 100644 --- a/runway/core/providers/aws/s3/_helpers/format_path.py +++ b/runway/core/providers/aws/s3/_helpers/format_path.py @@ -14,14 +14,18 @@ from typing_extensions import Literal, TypedDict SupportedPathType = Literal["local", "s3"] -FormattedPathDetails = TypedDict("FormattedPathDetails", path=str, type=SupportedPathType) -FormatPathResult = TypedDict( - "FormattedPaths", - dest=FormattedPathDetails, - dir_op=bool, - src=FormattedPathDetails, - use_src_name=bool, -) + + +class FormattedPathDetails(TypedDict): + path: str + type: SupportedPathType + + +class FormatPathResult(TypedDict): + dest: FormattedPathDetails + dir_op: bool + src: FormattedPathDetails + use_src_name: bool class FormatPath: diff --git a/runway/core/providers/aws/s3/_helpers/parameters.py b/runway/core/providers/aws/s3/_helpers/parameters.py index fd2e54556..95ec3aa27 100644 --- a/runway/core/providers/aws/s3/_helpers/parameters.py +++ b/runway/core/providers/aws/s3/_helpers/parameters.py @@ -137,7 +137,7 @@ def _validate_path_args(self) -> None: def _same_path(self) -> bool: """Evaluate if the src and dest are the same path.""" - if not self.data.paths_type == "s3s3": + if self.data.paths_type != "s3s3": return False if self.data.src == self.data.dest: return True diff --git a/runway/core/providers/aws/s3/_helpers/results.py b/runway/core/providers/aws/s3/_helpers/results.py index a0063c5f5..0b6b0df8b 100644 --- a/runway/core/providers/aws/s3/_helpers/results.py +++ b/runway/core/providers/aws/s3/_helpers/results.py @@ -169,7 +169,7 @@ class BaseResultSubscriber(OnDoneFilteredSubscriber): TRANSFER_TYPE: ClassVar[Optional[str]] = None - def __init__(self, result_queue: "queue.Queue[Any]", transfer_type: Optional[str] = None): + def __init__(self, result_queue: queue.Queue[Any], transfer_type: Optional[str] = None): """Send result notifications during transfer process. Args: @@ -672,7 +672,7 @@ class ResultProcessor(threading.Thread): def __init__( self, - result_queue: "queue.Queue[Any]", + result_queue: queue.Queue[Any], result_handlers: Optional[List[Callable[..., Any]]] = None, ) -> None: """Instantiate class. @@ -735,7 +735,7 @@ class CommandResultRecorder: def __init__( self, - result_queue: "queue.Queue[Any]", + result_queue: queue.Queue[Any], result_recorder: ResultRecorder, result_processor: ResultProcessor, ) -> None: diff --git a/runway/core/providers/aws/s3/_helpers/s3handler.py b/runway/core/providers/aws/s3/_helpers/s3handler.py index 4f0b0dbd0..d56e0e109 100644 --- a/runway/core/providers/aws/s3/_helpers/s3handler.py +++ b/runway/core/providers/aws/s3/_helpers/s3handler.py @@ -10,13 +10,13 @@ import logging import os import sys +from collections.abc import Iterator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, - Iterator, List, Optional, Tuple, @@ -109,7 +109,7 @@ def __init__( self._config_params = config_params self._runtime_config = runtime_config - def __call__(self, client: S3Client, result_queue: "Queue[Any]") -> S3TransferHandler: + def __call__(self, client: S3Client, result_queue: Queue[Any]) -> S3TransferHandler: """Create a S3TransferHandler instance. Args: @@ -161,9 +161,7 @@ def _add_result_printer( ) -> None: if self._config_params.quiet: return - if self._config_params.only_show_errors: - result_printer = OnlyShowErrorsResultPrinter(result_recorder) - elif self._config_params.is_stream: + if self._config_params.only_show_errors or self._config_params.is_stream: result_printer = OnlyShowErrorsResultPrinter(result_recorder) elif self._config_params.no_progress: result_printer = NoProgressResultPrinter(result_recorder) @@ -251,7 +249,7 @@ class BaseTransferRequestSubmitter: def __init__( self, transfer_manager: TransferManager, - result_queue: "Queue[Any]", + result_queue: Queue[Any], config_params: ParametersDataModel, ): """Instantiate class. @@ -376,7 +374,7 @@ def _warn_glacier(self, fileinfo: FileInfo) -> bool: if not self._config_params.force_glacier_transfer: if not fileinfo.is_glacier_compatible: LOGGER.debug( - "Encountered glacier object s3://%s. Not performing " "%s on object.", + "Encountered glacier object s3://%s. Not performing %s on object.", fileinfo.src, fileinfo.operation_name, ) @@ -667,7 +665,7 @@ def _add_additional_subscribers( def _get_filein(fileinfo: FileInfo) -> NonSeekableStream: # type: ignore """Get file in.""" if sys.stdin is None: - raise StdinMissingError() + raise StdinMissingError return NonSeekableStream(sys.stdin.buffer) def _format_local_path(self, path: Optional[AnyPath]) -> str: diff --git a/runway/core/providers/aws/s3/_helpers/transfer_config.py b/runway/core/providers/aws/s3/_helpers/transfer_config.py index 74df87fd8..4672b1868 100644 --- a/runway/core/providers/aws/s3/_helpers/transfer_config.py +++ b/runway/core/providers/aws/s3/_helpers/transfer_config.py @@ -14,17 +14,17 @@ from .utils import human_readable_to_bytes + # If the user does not specify any overrides, # these are the default values we use for the s3 transfer # commands. -TransferConfigDict = TypedDict( - "TransferConfigDict", - max_bandwidth=Optional[Union[int, str]], - max_concurrent_requests=int, - max_queue_size=int, - multipart_chunksize=Union[int, str], - multipart_threshold=Union[int, str], -) +class TransferConfigDict(TypedDict): + max_bandwidth: Optional[Union[int, str]] + max_concurrent_requests: int + max_queue_size: int + multipart_chunksize: Union[int, str] + multipart_threshold: Union[int, str] + DEFAULTS: TransferConfigDict = { "max_bandwidth": None, diff --git a/runway/core/providers/aws/s3/_helpers/utils.py b/runway/core/providers/aws/s3/_helpers/utils.py index 54b981370..70c007250 100644 --- a/runway/core/providers/aws/s3/_helpers/utils.py +++ b/runway/core/providers/aws/s3/_helpers/utils.py @@ -16,6 +16,7 @@ import re import sys import time +from collections.abc import Generator from datetime import datetime from pathlib import Path from typing import ( @@ -24,7 +25,6 @@ BinaryIO, Callable, Dict, - Generator, NamedTuple, Optional, TextIO, @@ -67,7 +67,7 @@ } _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile( - r"^(?Parn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[:/][^/]+)/?" r"(?P.*)$" + r"^(?Parn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[:/][^/]+)/?(?P.*)$" ) _S3_OUTPOST_TO_BUCKET_KEY_REGEX = re.compile( r"^(?Parn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]" @@ -311,7 +311,7 @@ def _get_filename(self, future: TransferFuture) -> str: class ProvideLastModifiedTimeSubscriber(OnDoneFilteredSubscriber): """Sets utime for a downloaded file.""" - def __init__(self, last_modified_time: datetime, result_queue: "Queue[Any]") -> None: + def __init__(self, last_modified_time: datetime, result_queue: Queue[Any]) -> None: """Instantiate class.""" self._last_modified_time = last_modified_time self._result_queue = result_queue @@ -753,7 +753,7 @@ def get_file_stat(path: Path) -> Tuple[int, Optional[datetime]]: """Get size of file in bytes and last modified time stamp.""" try: stats = path.stat() - except IOError as exc: + except OSError as exc: raise ValueError(f"Could not retrieve file stat of {path}: {exc}") from exc try: diff --git a/runway/dependency_managers/_pip.py b/runway/dependency_managers/_pip.py index 62611ad99..1885d9edc 100644 --- a/runway/dependency_managers/_pip.py +++ b/runway/dependency_managers/_pip.py @@ -5,10 +5,11 @@ import logging import re import subprocess +from collections.abc import Iterable from pathlib import Path -from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Final, List, Optional, Tuple, Union, cast -from typing_extensions import Final, Literal +from typing_extensions import Literal from ..compat import cached_property, shlex_join from ..exceptions import RunwayError @@ -30,7 +31,7 @@ class PipInstallFailedError(RunwayError): def __init__(self, *args: Any, **kwargs: Any) -> None: """Instantiate class. All args/kwargs are passed to parent method.""" self.message = ( - "pip failed to install dependencies; " "review pip's output above to troubleshoot" + "pip failed to install dependencies; review pip's output above to troubleshoot" ) super().__init__(*args, **kwargs) diff --git a/runway/dependency_managers/_pipenv.py b/runway/dependency_managers/_pipenv.py index e0ea5149c..91170551c 100644 --- a/runway/dependency_managers/_pipenv.py +++ b/runway/dependency_managers/_pipenv.py @@ -7,9 +7,9 @@ import re import subprocess from pathlib import Path -from typing import TYPE_CHECKING, Any, Tuple +from typing import TYPE_CHECKING, Any, Final, Tuple -from typing_extensions import Final, Literal +from typing_extensions import Literal from ..compat import cached_property from ..exceptions import RunwayError diff --git a/runway/dependency_managers/_poetry.py b/runway/dependency_managers/_poetry.py index 0bdf3e94f..e26dd5b7c 100644 --- a/runway/dependency_managers/_poetry.py +++ b/runway/dependency_managers/_poetry.py @@ -6,10 +6,10 @@ import re import subprocess from pathlib import Path -from typing import TYPE_CHECKING, Any, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Final, List, Optional, Tuple import tomli -from typing_extensions import Final, Literal +from typing_extensions import Literal from ..compat import cached_property from ..exceptions import RunwayError diff --git a/runway/env_mgr/__init__.py b/runway/env_mgr/__init__.py index 4625562e6..30f9f0068 100644 --- a/runway/env_mgr/__init__.py +++ b/runway/env_mgr/__init__.py @@ -7,8 +7,9 @@ import platform import shutil import sys +from collections.abc import Generator from pathlib import Path -from typing import TYPE_CHECKING, Generator, Optional, Union, cast +from typing import TYPE_CHECKING, Optional, Union, cast from ..compat import cached_property from ..mixins import DelCachedPropMixin diff --git a/runway/env_mgr/kbenv.py b/runway/env_mgr/kbenv.py index b51677710..211b988ff 100644 --- a/runway/env_mgr/kbenv.py +++ b/runway/env_mgr/kbenv.py @@ -11,12 +11,12 @@ import shutil import sys import tempfile -from typing import TYPE_CHECKING, Generator, Optional, cast +from collections.abc import Generator +from typing import TYPE_CHECKING, Final, Optional, cast from urllib.error import URLError from urllib.request import urlretrieve import requests -from typing_extensions import Final from ..compat import cached_property from ..exceptions import KubectlVersionNotSpecified @@ -50,7 +50,7 @@ def verify_kb_release(kb_url: str, download_dir: str, filename: str) -> None: # the ridiculousness should be short-lived as md5 & sha1 support won't last # long. try: - hash_alg: "hashlib._Hash" = hashlib.sha512() + hash_alg: hashlib._Hash = hashlib.sha512() checksum_filename = filename + "." + hash_alg.name LOGGER.debug("attempting download of kubectl %s checksum...", hash_alg.name) download_request = requests.get(kb_url + "/" + checksum_filename, allow_redirects=True) diff --git a/runway/env_mgr/tfenv.py b/runway/env_mgr/tfenv.py index 3b0b0d7b7..a98c6faa6 100644 --- a/runway/env_mgr/tfenv.py +++ b/runway/env_mgr/tfenv.py @@ -14,17 +14,8 @@ import sys import tempfile import zipfile -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generator, - List, - Optional, - Union, - cast, - overload, -) +from collections.abc import Generator +from typing import TYPE_CHECKING, Any, Final, cast, overload from urllib.error import URLError from urllib.request import urlretrieve @@ -32,7 +23,6 @@ import hcl2 import requests from packaging.version import InvalidVersion -from typing_extensions import Final from ..compat import cached_property from ..exceptions import HclParserError @@ -53,8 +43,8 @@ def download_tf_release( version: str, versions_dir: Path, command_suffix: str, - tf_platform: Optional[str] = None, - arch: Optional[str] = None, + tf_platform: str | None = None, + arch: str | None = None, ) -> None: """Download Terraform archive and return path to it.""" version_dir = versions_dir / version @@ -64,18 +54,15 @@ def download_tf_release( if tf_platform: tfver_os = tf_platform + "_" + arch + elif platform.system().startswith("Darwin"): + tfver_os = f"darwin_{arch}" + elif platform.system().startswith("Windows") or ( + platform.system().startswith("MINGW64") + or (platform.system().startswith("MSYS_NT") or platform.system().startswith("CYGWIN_NT")) + ): + tfver_os = f"windows_{arch}" else: - if platform.system().startswith("Darwin"): - tfver_os = f"darwin_{arch}" - elif platform.system().startswith("Windows") or ( - platform.system().startswith("MINGW64") - or ( - platform.system().startswith("MSYS_NT") or platform.system().startswith("CYGWIN_NT") - ) - ): - tfver_os = f"windows_{arch}" - else: - tfver_os = f"linux_{arch}" + tfver_os = f"linux_{arch}" download_dir = tempfile.mkdtemp() filename = f"terraform_{version}_{tfver_os}.zip" @@ -105,7 +92,7 @@ def download_tf_release( result.chmod(result.stat().st_mode | 0o0111) # ensure it is executable -def get_available_tf_versions(include_prerelease: bool = False) -> List[str]: +def get_available_tf_versions(include_prerelease: bool = False) -> list[str]: """Return available Terraform versions.""" tf_releases = json.loads(requests.get("https://releases.hashicorp.com/index.json").text)[ "terraform" @@ -136,21 +123,21 @@ def get_latest_tf_version(include_prerelease: bool = False) -> str: return get_available_tf_versions(include_prerelease)[0] -def load_terraform_module(parser: ModuleType, path: Path) -> Dict[str, Any]: +def load_terraform_module(parser: ModuleType, path: Path) -> dict[str, Any]: """Load all Terraform files in a module into one dict. Args: - parser (Union[hcl, hcl2]): Parser to use when loading files. + parser: Parser to use when loading files. path: Terraform module path. All Terraform files in the path will be loaded. """ - result: Dict[str, Any] = {} + result: dict[str, Any] = {} LOGGER.debug("using %s parser to load module: %s", parser.__name__.upper(), path) for tf_file in path.glob("*.tf"): try: tf_config = parser.loads(tf_file.read_text()) # type: ignore - result = merge_dicts(result, cast(Dict[str, Any], tf_config)) + result = merge_dicts(result, cast("dict[str, Any]", tf_config)) except Exception as exc: raise HclParserError(exc, tf_file, parser) from None return result @@ -168,38 +155,36 @@ class TFEnvManager(EnvManager): r"^Terraform v(?P[0-9]*\.[0-9]*\.[0-9]*)(?P-.*)?" ) - def __init__(self, path: Optional[Path] = None) -> None: + def __init__(self, path: Path | None = None) -> None: """Initialize class.""" super().__init__("terraform", "tfenv", path) @cached_property - def backend(self) -> Dict[str, Any]: + def backend(self) -> dict[str, Any]: """Backend config of the Terraform module.""" # Terraform can only have one backend configured; this formats the # data to make it easier to work with return [ {"type": k, "config": v} for k, v in self.terraform_block.get( - "backend", {None: cast(Dict[str, str], {})} + "backend", {None: cast("dict[str, str]", {})} ).items() ][0] @cached_property - def terraform_block(self) -> Dict[str, Any]: + def terraform_block(self) -> dict[str, Any]: """Collect Terraform configuration blocks from a Terraform module.""" @overload - def _flatten_lists(data: Dict[str, Any]) -> Dict[str, Any]: ... + def _flatten_lists(data: dict[str, Any]) -> dict[str, Any]: ... @overload - def _flatten_lists(data: List[Any]) -> List[Any]: ... + def _flatten_lists(data: list[Any]) -> list[Any]: ... @overload def _flatten_lists(data: str) -> str: ... - def _flatten_lists( - data: Union[Dict[str, Any], List[Any], Any] - ) -> Union[Dict[str, Any], Any]: + def _flatten_lists(data: dict[str, Any] | list[Any] | Any) -> dict[str, Any] | Any: """Flatten HCL2 list attributes until its fixed. python-hcl2 incorrectly turns all attributes into lists so we need @@ -213,28 +198,28 @@ def _flatten_lists( """ if not isinstance(data, dict): return data - copy_data = cast(Dict[str, Any], data.copy()) + copy_data = cast("dict[str, Any]", data.copy()) for attr, val in copy_data.items(): if isinstance(val, list): - if len(cast(List[Any], val)) == 1: + if len(cast("list[Any]", val)) == 1: # pull single values out of lists data[attr] = _flatten_lists(cast(Any, val[0])) else: - data[attr] = [_flatten_lists(v) for v in cast(List[Any], val)] + data[attr] = [_flatten_lists(v) for v in cast("list[Any]", val)] elif isinstance(val, dict): - data[attr] = _flatten_lists(cast(Dict[str, Any], val)) + data[attr] = _flatten_lists(cast("dict[str, Any]", val)) return data try: - result: Union[Dict[str, Any], List[Dict[str, Any]]] = load_terraform_module( + result: dict[str, Any] | list[dict[str, Any]] = load_terraform_module( hcl2, self.path - ).get("terraform", cast(Dict[str, Any], {})) + ).get("terraform", cast("dict[str, Any]", {})) except HclParserError as exc: LOGGER.warning(exc) LOGGER.warning("failed to parse as HCL2; trying HCL...") try: result = load_terraform_module(hcl, self.path).get( - "terraform", cast(Dict[str, Any], {}) + "terraform", cast("dict[str, Any]", {}) ) except HclParserError as exc2: LOGGER.warning(exc2) @@ -248,7 +233,7 @@ def _flatten_lists( return _flatten_lists(result) @cached_property - def version(self) -> Optional[Version]: + def version(self) -> Version | None: """Terraform version.""" version_requested = self.current_version or self.get_version_from_file() @@ -287,7 +272,7 @@ def version(self) -> Optional[Version]: return self.parse_version_string(self.current_version) @cached_property - def version_file(self) -> Optional[Path]: + def version_file(self) -> Path | None: """Find and return a ".terraform-version" file if one is present. Returns: @@ -329,7 +314,7 @@ def get_min_required(self) -> str: ) sys.exit(1) - def get_version_from_file(self, file_path: Optional[Path] = None) -> Optional[str]: + def get_version_from_file(self, file_path: Path | None = None) -> str | None: """Get Terraform version from a file. Args: @@ -344,7 +329,7 @@ def get_version_from_file(self, file_path: Optional[Path] = None) -> Optional[st LOGGER.debug("file path not provided and version file could not be found") return None - def install(self, version_requested: Optional[str] = None) -> str: + def install(self, version_requested: str | None = None) -> str: """Ensure Terraform is available.""" if version_requested: self.set_version(version_requested) @@ -393,11 +378,11 @@ def set_version(self, version: str) -> None: @classmethod def get_version_from_executable( cls, - bin_path: Union[Path, str], + bin_path: Path | str, *, - cwd: Optional[Union[Path, str]] = None, - env: Optional[Dict[str, str]] = None, - ) -> Optional[Version]: + cwd: Path | str | None = None, + env: dict[str, str] | None = None, + ) -> Version | None: """Get Terraform version from an executable. Args: diff --git a/runway/exceptions.py b/runway/exceptions.py index 0cd50b209..ea674e2d4 100644 --- a/runway/exceptions.py +++ b/runway/exceptions.py @@ -2,12 +2,12 @@ from __future__ import annotations -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any from .utils import DOC_SITE if TYPE_CHECKING: + from pathlib import Path from types import ModuleType from .variables import ( @@ -35,11 +35,11 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ConfigNotFound(RunwayError): """Configuration file could not be found.""" - looking_for: List[str] + looking_for: list[str] message: str path: Path - def __init__(self, *, looking_for: Optional[List[str]] = None, path: Path) -> None: + def __init__(self, *, looking_for: list[str] | None = None, path: Path) -> None: """Instantiate class. Args: @@ -51,9 +51,7 @@ def __init__(self, *, looking_for: Optional[List[str]] = None, path: Path) -> No self.path = path if looking_for: - self.message = ( - f"config file not found at path {path}; " f"looking for one of {looking_for}" - ) + self.message = f"config file not found at path {path}; looking for one of {looking_for}" else: self.message = f"config file not found at path {path}" super().__init__(self.path, self.looking_for) @@ -91,7 +89,7 @@ class DockerExecFailedError(RunwayError): exit_code: int """The ``StatusCode`` returned by Docker.""" - def __init__(self, response: Dict[str, Any]) -> None: + def __init__(self, response: dict[str, Any]) -> None: """Instantiate class. Args: @@ -102,7 +100,7 @@ def __init__(self, response: Dict[str, Any]) -> None: """ self.exit_code = response.get("StatusCode", 1) # we can assume this will be > 0 - error = response.get("Error") or {} # value from dict could be NoneType + error: dict[Any, Any] = response.get("Error") or {} # value from dict could be NoneType self.message = error.get("Message", "error message undefined") super().__init__() @@ -129,6 +127,8 @@ def __init__( lookup: The variable value lookup that was attempted and resulted in an exception being raised. cause: The exception that was raised. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.cause = cause @@ -155,12 +155,14 @@ def __init__( Args: variable: The variable containing the failed lookup. lookup_error: The exception that was raised directly before this one. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.cause = lookup_error self.variable = variable self.message = ( - f'Could not resolve lookup "{lookup_error.lookup}" ' f'for variable "{variable.name}"' + f'Could not resolve lookup "{lookup_error.lookup}" for variable "{variable.name}"' ) super().__init__(*args, **kwargs) @@ -173,8 +175,8 @@ class HclParserError(RunwayError): def __init__( self, exc: Exception, - file_path: Union[Path, str], - parser: Optional[ModuleType] = None, + file_path: Path | str, + parser: ModuleType | None = None, ) -> None: """Instantiate class. @@ -269,6 +271,8 @@ def __init__(self, stack_name: str, output: str, *args: Any, **kwargs: Any) -> N Args: stack_name: Name of the stack. output: The output that does not exist. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.stack_name = stack_name @@ -317,6 +321,8 @@ def __init__(self, lookup: VariableValueLookup, *args: Any, **kwargs: Any) -> No Args: lookup: Variable value lookup that could not find a handler. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.message = f'Unknown lookup type "{lookup.lookup_name.value}" in "{lookup}"' @@ -333,6 +339,8 @@ def __init__(self, variable: Variable, *args: Any, **kwargs: Any) -> None: Args: variable: The unresolved variable. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.message = f'Attempted to use variable "{variable.name}" before it was resolved' @@ -357,6 +365,8 @@ def __init__(self, lookup: VariableValueLookup, *args: Any, **kwargs: Any) -> No Args: lookup: The variable value lookup that is not resolved. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. """ self.lookup = lookup diff --git a/runway/lookups/handlers/base.py b/runway/lookups/handlers/base.py index bd1ca7192..5e8f72339 100644 --- a/runway/lookups/handlers/base.py +++ b/runway/lookups/handlers/base.py @@ -4,18 +4,8 @@ import json import logging -from typing import ( - TYPE_CHECKING, - Any, - ClassVar, - Dict, - Optional, - Sequence, - Set, - Tuple, - Union, - cast, -) +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, ClassVar, cast import yaml from troposphere import BaseAWSObject @@ -34,7 +24,7 @@ TransformToTypeLiteral = Literal["bool", "str"] -def str2bool(v: str): +def str2bool(v: str) -> bool: """Return boolean value of string.""" return v.lower() in ("yes", "true", "t", "1", "on", "y") @@ -46,7 +36,7 @@ class LookupHandler: """Name that the Lookup is registered as.""" @classmethod - def dependencies(cls, __lookup_query: VariableValue) -> Set[str]: + def dependencies(cls, __lookup_query: VariableValue) -> set[str]: """Calculate any dependencies required to perform this lookup. Note that lookup_query may not be (completely) resolved at this time. @@ -58,9 +48,9 @@ def dependencies(cls, __lookup_query: VariableValue) -> Set[str]: def format_results( cls, value: Any, - get: Optional[str] = None, - load: Optional[str] = None, - transform: Optional[TransformToTypeLiteral] = None, + get: str | None = None, + load: str | None = None, + transform: TransformToTypeLiteral | None = None, **kwargs: Any, ) -> Any: """Format results to be returned by a lookup. @@ -72,6 +62,7 @@ def format_results( and ``transform`` method. transform: Convert the final value to a different data type before returning it. + **kwargs: Arbitrary keyword arguments. Raises: TypeError: If ``get`` is provided but the value value is not a @@ -113,9 +104,9 @@ def format_results( def handle( cls, __value: str, - context: Union[CfnginContext, RunwayContext], + context: CfnginContext | RunwayContext, *__args: Any, - provider: Optional[Provider] = None, + provider: Provider | None = None, **__kwargs: Any, ) -> Any: """Perform the lookup. @@ -129,7 +120,7 @@ def handle( raise NotImplementedError @classmethod - def parse(cls, value: str) -> Tuple[str, Dict[str, str]]: + def parse(cls, value: str) -> tuple[str, dict[str, str]]: """Parse the value passed to a lookup in a standardized way. Args: @@ -144,12 +135,12 @@ def parse(cls, value: str) -> Tuple[str, Dict[str, str]]: colon_split = raw_value.split("::", 1) query = colon_split.pop(0) - args: Dict[str, str] = cls._parse_args(colon_split[0]) if colon_split else {} + args: dict[str, str] = cls._parse_args(colon_split[0]) if colon_split else {} return query, args @classmethod - def _parse_args(cls, args: str) -> Dict[str, str]: + def _parse_args(cls, args: str) -> dict[str, str]: """Convert a string into an args dict. Each arg should be separated by ``,``. The key and value should @@ -169,7 +160,7 @@ def _parse_args(cls, args: str) -> Dict[str, str]: } @classmethod - def load(cls, value: Any, parser: Optional[str] = None, **kwargs: Any) -> Any: + def load(cls, value: Any, parser: str | None = None, **kwargs: Any) -> Any: """Load a formatted string or object into a python data type. First action taken in :meth:`format_results`. @@ -180,6 +171,7 @@ def load(cls, value: Any, parser: Optional[str] = None, **kwargs: Any) -> Any: Args: value: What is being loaded. parser: Name of the parser to use. + **kwargs: Arbitrary keyword arguments. Returns: The loaded value. @@ -254,7 +246,7 @@ def transform( cls, value: Any, *, - to_type: Optional[TransformToTypeLiteral] = "str", + to_type: TransformToTypeLiteral | None = "str", **kwargs: Any, ) -> Any: """Transform the result of a lookup into another datatype. @@ -267,6 +259,7 @@ def transform( Args: value: What is to be transformed. to_type: The type the value will be transformed into. + **kwargs: Arbitrary keyword arguments. Returns: The transformed value. @@ -322,7 +315,7 @@ def _transform_to_string( value = value.data if isinstance(value, dict): # dumped twice for an escaped json dict - return json.dumps(json.dumps(cast(Dict[str, Any], value), indent=int(indent))) + return json.dumps(json.dumps(cast("dict[str, Any]", value), indent=int(indent))) if isinstance(value, bool): return json.dumps(str(value)) return str(value) diff --git a/runway/lookups/handlers/cfn.py b/runway/lookups/handlers/cfn.py index 24939b3ef..4f038925c 100644 --- a/runway/lookups/handlers/cfn.py +++ b/runway/lookups/handlers/cfn.py @@ -11,10 +11,9 @@ import json import logging -from typing import TYPE_CHECKING, Any, Dict, NamedTuple, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Final, NamedTuple, cast from botocore.exceptions import ClientError -from typing_extensions import Final, Literal from ...cfngin.exceptions import StackDoesNotExist from ...exceptions import OutputDoesNotExist @@ -22,6 +21,7 @@ if TYPE_CHECKING: from mypy_boto3_cloudformation.client import CloudFormationClient + from typing_extensions import Literal from ...cfngin.providers.aws.default import Provider from ...context import CfnginContext, RunwayContext @@ -43,7 +43,7 @@ class CfnLookup(LookupHandler): """Name that the Lookup is registered as.""" @staticmethod - def should_use_provider(args: Dict[str, str], provider: Optional[Provider]) -> bool: + def should_use_provider(args: dict[str, str], provider: Provider | None) -> bool: """Determine if the provider should be used for the lookup. This will open happen when the lookup is used with CFNgin. @@ -84,9 +84,9 @@ def get_stack_output(client: CloudFormationClient, query: OutputQuery) -> str: def handle( cls, value: str, - context: Union[CfnginContext, RunwayContext], + context: CfnginContext | RunwayContext, *, - provider: Optional[Provider] = None, + provider: Provider | None = None, **_: Any, ) -> Any: """Retrieve a value from CloudFormation Stack outputs. diff --git a/runway/lookups/handlers/ecr.py b/runway/lookups/handlers/ecr.py index 445ed3c22..f7d7bff27 100644 --- a/runway/lookups/handlers/ecr.py +++ b/runway/lookups/handlers/ecr.py @@ -4,14 +4,13 @@ import base64 import logging -from typing import TYPE_CHECKING, Any, Union - -from typing_extensions import Final, Literal +from typing import TYPE_CHECKING, Any, Final from ...lookups.handlers.base import LookupHandler if TYPE_CHECKING: from mypy_boto3_ecr.client import ECRClient + from typing_extensions import Literal from ...context import CfnginContext, RunwayContext @@ -38,7 +37,7 @@ def get_login_password(client: ECRClient) -> str: def handle( cls, value: str, - context: Union[CfnginContext, RunwayContext], + context: CfnginContext | RunwayContext, *__args: Any, **__kwargs: Any, ) -> Any: diff --git a/runway/lookups/handlers/env.py b/runway/lookups/handlers/env.py index d5b1f0a7a..8398abb19 100644 --- a/runway/lookups/handlers/env.py +++ b/runway/lookups/handlers/env.py @@ -3,13 +3,13 @@ # pyright: reportIncompatibleMethodOverride=none from __future__ import annotations -from typing import TYPE_CHECKING, Any, Union - -from typing_extensions import Final, Literal +from typing import TYPE_CHECKING, Any, Final from .base import LookupHandler if TYPE_CHECKING: + from typing_extensions import Literal + from ...context import CfnginContext, RunwayContext @@ -23,7 +23,7 @@ class EnvLookup(LookupHandler): def handle( cls, value: str, - context: Union[CfnginContext, RunwayContext], + context: CfnginContext | RunwayContext, *__args: Any, **__kwargs: Any, ) -> Any: diff --git a/runway/lookups/handlers/random_string.py b/runway/lookups/handlers/random_string.py index 3afa4f387..b9811a44b 100644 --- a/runway/lookups/handlers/random_string.py +++ b/runway/lookups/handlers/random_string.py @@ -6,14 +6,16 @@ import logging import secrets import string -from typing import TYPE_CHECKING, Any, Callable, List, Sequence, Union - -from typing_extensions import Final, Literal +from typing import TYPE_CHECKING, Any, Callable, Final from ...utils import BaseModel from .base import LookupHandler if TYPE_CHECKING: + from collections.abc import Sequence + + from typing_extensions import Literal + from ...context import CfnginContext, RunwayContext LOGGER = logging.getLogger(__name__) @@ -83,7 +85,7 @@ def ensure_has_one_of(cls, args: ArgsDataModel, value: str) -> bool: value: Value to check. """ - checks: List[Callable[[str], bool]] = [] + checks: list[Callable[[str], bool]] = [] if args.digits: checks.append(cls.has_digit) if args.lowercase: @@ -98,7 +100,7 @@ def ensure_has_one_of(cls, args: ArgsDataModel, value: str) -> bool: def handle( cls, value: str, - context: Union[CfnginContext, RunwayContext], + context: CfnginContext | RunwayContext, # noqa: ARG003 *__args: Any, **__kwargs: Any, ) -> Any: diff --git a/runway/lookups/handlers/ssm.py b/runway/lookups/handlers/ssm.py index 6f7401d0d..d34edd27f 100644 --- a/runway/lookups/handlers/ssm.py +++ b/runway/lookups/handlers/ssm.py @@ -3,13 +3,13 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, Union - -from typing_extensions import Final, Literal +from typing import TYPE_CHECKING, Any, Final, Union from ...lookups.handlers.base import LookupHandler if TYPE_CHECKING: + from typing_extensions import Literal + from ...context import CfnginContext, RunwayContext LOGGER = logging.getLogger(__name__) diff --git a/runway/lookups/handlers/var.py b/runway/lookups/handlers/var.py index 6b83055df..c33499dee 100644 --- a/runway/lookups/handlers/var.py +++ b/runway/lookups/handlers/var.py @@ -4,13 +4,13 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any - -from typing_extensions import Final, Literal +from typing import TYPE_CHECKING, Any, Final from .base import LookupHandler if TYPE_CHECKING: + from typing_extensions import Literal + from ...utils import MutableMap diff --git a/runway/lookups/registry.py b/runway/lookups/registry.py index 139a35787..2c69015f5 100644 --- a/runway/lookups/registry.py +++ b/runway/lookups/registry.py @@ -3,7 +3,7 @@ from __future__ import annotations import logging -from typing import Dict, Type, Union, cast +from typing import cast from ..utils import load_object_from_string from .handlers.base import LookupHandler @@ -14,13 +14,11 @@ from .handlers.ssm import SsmLookup from .handlers.var import VarLookup -RUNWAY_LOOKUP_HANDLERS: Dict[str, Type[LookupHandler]] = {} +RUNWAY_LOOKUP_HANDLERS: dict[str, type[LookupHandler]] = {} LOGGER = logging.getLogger(__name__) -def register_lookup_handler( - lookup_type: str, handler_or_path: Union[str, Type[LookupHandler]] -) -> None: +def register_lookup_handler(lookup_type: str, handler_or_path: str | type[LookupHandler]) -> None: """Register a lookup handler. Args: @@ -39,7 +37,7 @@ def register_lookup_handler( if issubclass(handler, LookupHandler): RUNWAY_LOOKUP_HANDLERS[lookup_type] = handler return - except Exception: + except Exception: # noqa: BLE001 LOGGER.debug("failed to validate lookup handler", exc_info=True) raise TypeError( f"lookup {handler_or_path} must be a subclass of " diff --git a/runway/mixins.py b/runway/mixins.py index 2cb3a855d..79958ff74 100644 --- a/runway/mixins.py +++ b/runway/mixins.py @@ -6,26 +6,17 @@ import platform import shutil import subprocess +from collections.abc import Iterable from contextlib import suppress -from typing import ( - TYPE_CHECKING, - ClassVar, - Dict, - Iterable, - List, - Optional, - Union, - cast, - overload, -) - -from typing_extensions import Literal +from typing import TYPE_CHECKING, ClassVar, cast, overload from .compat import shlex_join if TYPE_CHECKING: from pathlib import Path + from typing_extensions import Literal + from ._logging import RunwayLogger from .context import CfnginContext, RunwayContext @@ -38,7 +29,7 @@ class CliInterfaceMixin: EXECUTABLE: ClassVar[str] """CLI executable.""" - ctx: Union[CfnginContext, RunwayContext] + ctx: CfnginContext | RunwayContext """CFNgin or Runway context object.""" cwd: Path @@ -52,21 +43,20 @@ def convert_to_cli_arg(arg_name: str, *, prefix: str = "--") -> str: @classmethod def found_in_path(cls) -> bool: """Determine if executable is found in $PATH.""" - if shutil.which(cls.EXECUTABLE): - return True - return False + return bool(shutil.which(cls.EXECUTABLE)) @classmethod def generate_command( cls, - command: Union[List[str], str], - **kwargs: Optional[Union[bool, Iterable[str], str]], - ) -> List[str]: + command: list[str] | str, + **kwargs: bool | Iterable[str] | str | None, + ) -> list[str]: """Generate command to be executed and log it. Args: command: Command to run. args: Additional args to pass to the command. + **kwargs: Arbitrary keyword arguments. Returns: The full command to be passed into a subprocess. @@ -79,10 +69,10 @@ def generate_command( @classmethod def _generate_command_handle_kwargs( - cls, **kwargs: Optional[Union[bool, Iterable[str], str]] - ) -> List[str]: + cls, **kwargs: bool | Iterable[str] | str | None + ) -> list[str]: """Handle kwargs passed to generate_command.""" - result: List[str] = [] + result: list[str] = [] for k, v in kwargs.items(): if isinstance(v, str): result.extend([cls.convert_to_cli_arg(k), v]) @@ -107,28 +97,28 @@ def list2cmdline(split_command: Iterable[str]) -> str: @overload def _run_command( self, - command: Union[Iterable[str], str], + command: Iterable[str] | str, *, - env: Optional[Dict[str, str]] = ..., + env: dict[str, str] | None = ..., suppress_output: Literal[True] = ..., ) -> str: ... @overload def _run_command( self, - command: Union[Iterable[str], str], + command: Iterable[str] | str, *, - env: Optional[Dict[str, str]] = ..., + env: dict[str, str] | None = ..., suppress_output: Literal[False] = ..., ) -> None: ... def _run_command( self, - command: Union[Iterable[str], str], + command: Iterable[str] | str, *, - env: Optional[Dict[str, str]] = None, + env: dict[str, str] | None = None, suppress_output: bool = True, - ) -> Optional[str]: + ) -> str | None: """Run command. Args: diff --git a/runway/module/base.py b/runway/module/base.py index a57cb38ec..97350e4c8 100644 --- a/runway/module/base.py +++ b/runway/module/base.py @@ -4,14 +4,15 @@ import logging import subprocess -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast from ..exceptions import NpmNotFound from ..utils import which from .utils import NPM_BIN, format_npm_command_for_logging, use_npm_ci if TYPE_CHECKING: + from pathlib import Path + from .._logging import PrefixAdaptor, RunwayLogger from ..context import RunwayContext @@ -22,22 +23,22 @@ class RunwayModule: """Base class for Runway modules.""" ctx: RunwayContext - explicitly_enabled: Optional[bool] - logger: Union[PrefixAdaptor, RunwayLogger] + explicitly_enabled: bool | None + logger: PrefixAdaptor | RunwayLogger name: str - options: Union[Dict[str, Any], ModuleOptions] + options: dict[str, Any] | ModuleOptions region: str def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -99,12 +100,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -136,7 +137,7 @@ def __init__( self.check_for_npm(logger=self.logger) # fail fast self.warn_on_boto_env_vars(self.ctx.env.vars, logger=logger) - def log_npm_command(self, command: List[str]) -> None: + def log_npm_command(self, command: list[str]) -> None: """Log an npm command that is going to be run. Args: @@ -174,9 +175,7 @@ def package_json_missing(self) -> bool: return False @staticmethod - def check_for_npm( - *, logger: Union[logging.Logger, PrefixAdaptor, RunwayLogger] = LOGGER - ) -> None: + def check_for_npm(*, logger: logging.Logger | PrefixAdaptor | RunwayLogger = LOGGER) -> None: """Ensure npm is installed and in the current path. Args: @@ -192,9 +191,9 @@ def check_for_npm( @staticmethod def warn_on_boto_env_vars( - env_vars: Dict[str, str], + env_vars: dict[str, str], *, - logger: Union[logging.Logger, PrefixAdaptor, RunwayLogger] = LOGGER, + logger: logging.Logger | PrefixAdaptor | RunwayLogger = LOGGER, ) -> None: """Inform user if boto-specific environment variables are in use. @@ -219,7 +218,7 @@ def get(self, name: str, default: Any = None) -> Any: """Get a value or return the default.""" return getattr(self, name, default) - def __eq__(self, other: Any) -> bool: + def __eq__(self, other: object) -> bool: """Assess equality.""" if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ diff --git a/runway/module/cdk.py b/runway/module/cdk.py index 35f1300d3..df3ea2c0a 100644 --- a/runway/module/cdk.py +++ b/runway/module/cdk.py @@ -6,8 +6,7 @@ import platform import subprocess import sys -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast from typing_extensions import Literal @@ -19,6 +18,8 @@ from .utils import generate_node_command, run_module_command if TYPE_CHECKING: + from pathlib import Path + from .._logging import RunwayLogger from ..context import RunwayContext @@ -53,12 +54,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -92,9 +93,9 @@ def __init__( LOGGER.warning("%s:%s", self.name, self.DEPRECATION_MSG) @cached_property - def cli_args(self) -> List[str]: + def cli_args(self) -> list[str]: """Generate CLI args from self used in all CDK commands.""" - result: List[str] = [] + result: list[str] = [] if self.ctx.no_color: result.append("--no-color") if self.ctx.env.debug: @@ -104,9 +105,9 @@ def cli_args(self) -> List[str]: return result @cached_property - def cli_args_context(self) -> List[str]: + def cli_args_context(self) -> list[str]: """Generate CLI args from self passed to CDK commands as ``--context``.""" - result: List[str] = [] + result: list[str] = [] args = {"environment": self.ctx.env.name} args.update(self.parameters) for key, val in args.items(): @@ -157,7 +158,7 @@ def cdk_destroy(self) -> None: ) self.logger.info("destroy (complete)") - def cdk_diff(self, stack_name: Optional[str] = None) -> None: + def cdk_diff(self, stack_name: str | None = None) -> None: """Execute ``cdk diff`` command.""" self.logger.info("plan (in progress)") try: @@ -179,11 +180,11 @@ def cdk_diff(self, stack_name: Optional[str] = None) -> None: "is not enabled", stack_name, ) - # TODO raise error instead of sys.exit() when refactoring cli error handling + # TODO (kyle): raise error instead of sys.exit() when refactoring cli error handling sys.exit(exc.returncode) self.logger.info("plan (complete)") - def cdk_list(self) -> List[str]: + def cdk_list(self) -> list[str]: """Execute ``cdk list`` command.""" result = subprocess.check_output( self.gen_cmd("list", include_context=True), @@ -213,10 +214,10 @@ def destroy(self) -> None: def gen_cmd( self, command: CdkCommandTypeDef, - args_list: Optional[List[str]] = None, + args_list: list[str] | None = None, *, include_context: bool = False, - ) -> List[str]: + ) -> list[str]: """Generate and log a CDK command. This does not execute the command, only prepares it for use. @@ -231,7 +232,7 @@ def gen_cmd( The full command to be passed into a subprocess. """ - args = [command] + self.cli_args + args = [command, *self.cli_args] args.extend(args_list or []) if include_context: args.extend(self.cli_args_context) diff --git a/runway/module/cloudformation.py b/runway/module/cloudformation.py index cf74037ac..e46ef24e0 100644 --- a/runway/module/cloudformation.py +++ b/runway/module/cloudformation.py @@ -3,14 +3,15 @@ from __future__ import annotations import logging -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast from .._logging import PrefixAdaptor from ..cfngin.cfngin import CFNgin from .base import RunwayModule if TYPE_CHECKING: + from pathlib import Path + from .._logging import RunwayLogger from ..context import RunwayContext from .base import ModuleOptions @@ -25,12 +26,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. diff --git a/runway/module/k8s.py b/runway/module/k8s.py index 285afcb2e..901692e4c 100644 --- a/runway/module/k8s.py +++ b/runway/module/k8s.py @@ -6,14 +6,13 @@ import subprocess import sys from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast from typing_extensions import Literal from .._logging import PrefixAdaptor from ..compat import cached_property from ..config.models.runway.options.k8s import RunwayK8sModuleOptionsDataModel -from ..core.components import DeployEnvironment from ..env_mgr.kbenv import KBEnvManager from ..exceptions import KubectlVersionNotSpecified from ..utils import which @@ -23,6 +22,7 @@ if TYPE_CHECKING: from .._logging import RunwayLogger from ..context import RunwayContext + from ..core.components import DeployEnvironment LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) @@ -63,12 +63,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -154,8 +154,8 @@ def destroy(self) -> None: def gen_cmd( self, command: KubectlCommandTypeDef, - args_list: Optional[List[str]] = None, - ) -> List[str]: + args_list: list[str] | None = None, + ) -> list[str]: """Generate and log a kubectl command. This does not execute the command, only prepares it for use. @@ -243,7 +243,7 @@ class K8sOptions(ModuleOptions): data: RunwayK8sModuleOptionsDataModel deploy_environment: DeployEnvironment - kubectl_version: Optional[str] + kubectl_version: str | None path: Path def __init__( @@ -282,7 +282,7 @@ def overlay_path(self) -> Path: ) @staticmethod - def gen_overlay_dirs(environment: str, region: str) -> List[str]: + def gen_overlay_dirs(environment: str, region: str) -> list[str]: """Generate possible overlay directories. Prefers more explicit directory name but falls back to environment name only. @@ -309,7 +309,7 @@ def parse_obj( cls, deploy_environment: DeployEnvironment, obj: object, - path: Optional[Path] = None, + path: Path | None = None, ) -> K8sOptions: """Parse options definition and return an options object. diff --git a/runway/module/serverless.py b/runway/module/serverless.py index d2855fb72..8630e91cc 100644 --- a/runway/module/serverless.py +++ b/runway/module/serverless.py @@ -11,7 +11,7 @@ import tempfile import uuid from pathlib import Path -from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast +from typing import IO, TYPE_CHECKING, Any, Callable, cast import yaml @@ -34,15 +34,15 @@ LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) -def gen_sls_config_files(stage: str, region: str) -> List[str]: +def gen_sls_config_files(stage: str, region: str) -> list[str]: """Generate possible SLS config files names.""" - names: List[str] = [] + names: list[str] = [] for ext in ["yml", "json"]: # Give preference to explicit stage-region files - names.append(os.path.join("env", f"{stage}-{region}.{ext}")) + names.append(os.path.join("env", f"{stage}-{region}.{ext}")) # noqa: PTH118 names.append(f"config-{stage}-{region}.{ext}") # Fallback to stage name only - names.append(os.path.join("env", f"{stage}.{ext}")) + names.append(os.path.join("env", f"{stage}.{ext}")) # noqa: PTH118 names.append(f"config-{stage}.{ext}") return names @@ -56,12 +56,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -94,7 +94,7 @@ def __init__( self.stage = self.ctx.env.name @property - def cli_args(self) -> List[str]: + def cli_args(self) -> list[str]: """Generate CLI args from self used in all Serverless commands.""" result = ["--region", self.region, "--stage", self.stage] if "DEBUG" in self.ctx.env.vars: @@ -102,7 +102,7 @@ def cli_args(self) -> List[str]: return result @cached_property - def env_file(self) -> Optional[Path]: + def env_file(self) -> Path | None: """Find the environment file for the module.""" for name in gen_sls_config_files(self.stage, self.region): test_path = self.path / name @@ -117,7 +117,7 @@ def skip(self) -> bool: if self.parameters or self.explicitly_enabled or self.env_file: return False self.logger.info( - "skipped; config file for this stage/region not found" " -- looking for one of: %s", + "skipped; config file for this stage/region not found -- looking for one of: %s", ", ".join(gen_sls_config_files(self.stage, self.region)), ) else: @@ -157,11 +157,11 @@ def extend_serverless_yml(self, func: Callable[..., None]) -> None: self.logger.debug("removed temporary Serverless config") except OSError: self.logger.debug( - "encountered an error when trying to delete the " "temporary Serverless config", + "encountered an error when trying to delete the temporary Serverless config", exc_info=True, ) - def gen_cmd(self, command: str, args_list: Optional[List[str]] = None) -> List[str]: + def gen_cmd(self, command: str, args_list: list[str] | None = None) -> list[str]: """Generate and log a Serverless command. This does not execute the command, only prepares it for use. @@ -174,7 +174,7 @@ def gen_cmd(self, command: str, args_list: Optional[List[str]] = None) -> List[s The full command to be passed into a subprocess. """ - args = [command] + self.cli_args + self.options.args + args = [command, *self.cli_args, *self.options.args] args.extend(args_list or []) if command not in ["remove", "package", "print"] and self.ctx.is_noninteractive: args.append("--conceal") # hide secrets from serverless output @@ -198,7 +198,7 @@ def gen_cmd(self, command: str, args_list: Optional[List[str]] = None) -> List[s command="sls", command_opts=args, path=self.path, logger=self.logger ) - def sls_deploy(self, *, package: Optional[AnyPath] = None, skip_install: bool = False) -> None: + def sls_deploy(self, *, package: AnyPath | None = None, skip_install: bool = False) -> None: """Execute ``sls deploy`` command. Args: @@ -220,9 +220,9 @@ def sls_deploy(self, *, package: Optional[AnyPath] = None, skip_install: bool = def sls_package( self, *, - output_path: Optional[AnyPathConstrained] = None, + output_path: AnyPathConstrained | None = None, skip_install: bool = False, - ) -> Optional[AnyPathConstrained]: + ) -> AnyPathConstrained | None: """Execute ``sls package`` command. Args: @@ -244,8 +244,8 @@ def sls_package( return output_path def sls_print( - self, *, item_path: Optional[str] = None, skip_install: bool = False - ) -> Dict[str, Any]: + self, *, item_path: str | None = None, skip_install: bool = False + ) -> dict[str, Any]: """Execute ``sls print`` command. Keyword Args: @@ -290,7 +290,7 @@ def sls_remove(self, *, skip_install: bool = False) -> None: self.npm_install() stack_missing = False # track output for acceptable error self.logger.info("destroy (in progress)") - with subprocess.Popen( + with subprocess.Popen( # noqa: SIM117 self.gen_cmd("remove"), bufsize=1, env=self.ctx.env.vars, @@ -299,7 +299,7 @@ def sls_remove(self, *, skip_install: bool = False) -> None: ) as proc: with cast(IO[str], proc.stdout): for line in cast(IO[str], proc.stdout): - print(line, end="") + print(line, end="") # noqa: T201 if re.search(r"Stack '.*' does not exist", line): stack_missing = True if proc.wait() != 0 and not stack_missing: @@ -362,9 +362,9 @@ class ServerlessArtifact: def __init__( self, context: RunwayContext, - config: Dict[str, Any], + config: dict[str, Any], *, - logger: Union[PrefixAdaptor, RunwayLogger] = LOGGER, + logger: PrefixAdaptor | RunwayLogger = LOGGER, package_path: AnyPath, path: AnyPath, ) -> None: @@ -386,16 +386,18 @@ def __init__( self.path = Path(path) if isinstance(path, str) else path @cached_property - def source_hash(self) -> Dict[str, str]: + def source_hash(self) -> dict[str, str]: """File hash(es) of each service's source code.""" if self.config.get("package", {"": ""}).get("individually"): return { - name: get_hash_of_files(self.path / os.path.dirname(detail.get("handler"))) + name: get_hash_of_files( + self.path / os.path.dirname(detail.get("handler")) # noqa: PTH120 + ) for name, detail in self.config.get("functions", {}).items() } - directories: List[Dict[str, Union[List[str], str]]] = [] - for _name, detail in self.config.get("functions", {}).items(): - func_path = {"path": os.path.dirname(detail.get("handler"))} + directories: list[dict[str, list[str] | str]] = [] + for detail in self.config.get("functions", {}).values(): + func_path = {"path": os.path.dirname(detail.get("handler"))} # noqa: PTH120 if func_path not in directories: directories.append(func_path) if isinstance(self.config["service"], dict): @@ -471,9 +473,9 @@ def __init__(self, data: RunwayServerlessModuleOptionsDataModel) -> None: self.skip_npm_ci = data.skip_npm_ci @property - def args(self) -> List[str]: + def args(self) -> list[str]: """List of CLI arguments/options to pass to the Serverless Framework CLI.""" - known_args: List[str] = [] + known_args: list[str] = [] for key, val in self._cli_args.items(): if isinstance(val, str): known_args.extend([f"--{key}", val]) diff --git a/runway/module/staticsite/handler.py b/runway/module/staticsite/handler.py index d6cd01389..2af2a6e78 100644 --- a/runway/module/staticsite/handler.py +++ b/runway/module/staticsite/handler.py @@ -8,7 +8,7 @@ import sys import tempfile from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, cast import yaml @@ -44,12 +44,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -197,7 +197,7 @@ def _create_dependencies_yaml(self, module_dir: Path) -> Path: Path to the file that was created. """ - pre_deploy: List[Any] = [] + pre_deploy: list[Any] = [] pre_destroy = [ { @@ -260,7 +260,7 @@ def _create_dependencies_yaml(self, module_dir: Path) -> Path: } ) - content: Dict[str, Any] = { + content: dict[str, Any] = { "cfngin_bucket": "", "namespace": "${namespace}", "pre_deploy": pre_deploy, @@ -275,8 +275,9 @@ def _create_dependencies_yaml(self, module_dir: Path) -> Path: } out_file = module_dir / "01-dependencies.yaml" - with open(out_file, "w", encoding="utf-8") as output_stream: - yaml.dump(content, output_stream, default_flow_style=False, sort_keys=True) + out_file.write_text( + yaml.dump(content, default_flow_style=False, sort_keys=True), encoding="utf-8" + ) self.logger.debug("created %s:\n%s", out_file.name, yaml.dump(content, Dumper=YamlDumper)) return out_file @@ -297,7 +298,7 @@ def _create_staticsite_yaml(self, module_dir: Path) -> Path: self.options.source_hashing.parameter = f"${{namespace}}-{self.sanitized_name}-hash" nonce_secret_param = f"${{namespace}}-{self.sanitized_name}-nonce-secret" - build_staticsite_args: Dict[str, Any] = { + build_staticsite_args: dict[str, Any] = { # ensures yaml.safe_load will work by using JSON to convert objects "options": json.loads(self.options.data.json(by_alias=True)) } @@ -306,9 +307,7 @@ def _create_staticsite_yaml(self, module_dir: Path) -> Path: ) build_staticsite_args["options"]["namespace"] = "${namespace}" build_staticsite_args["options"]["name"] = self.sanitized_name - build_staticsite_args["options"]["path"] = os.path.join( - os.path.realpath(self.ctx.env.root_dir), self.path - ) + build_staticsite_args["options"]["path"] = self.ctx.env.root_dir.resolve() / self.path site_stack_variables = self._get_site_stack_variables() @@ -443,8 +442,9 @@ def _create_staticsite_yaml(self, module_dir: Path) -> Path: } out_file = module_dir / "02-staticsite.yaml" - with open(out_file, "w", encoding="utf-8") as output_stream: - yaml.dump(content, output_stream, default_flow_style=False, sort_keys=True) + out_file.write_text( + yaml.dump(content, default_flow_style=False, sort_keys=True), encoding="utf-8" + ) self.logger.debug("created 02-staticsite.yaml:\n%s", yaml.dump(content, Dumper=YamlDumper)) return out_file @@ -466,19 +466,23 @@ def _create_cleanup_yaml(self, module_dir: Path) -> Path: "service_role": self.parameters.service_role, "stacks": { f"{self.sanitized_name}-cleanup": { - "template_path": os.path.join(tempfile.gettempdir(), "thisfileisnotused.yaml"), + "template_path": os.path.join( # noqa: PTH118 + tempfile.gettempdir(), + "thisfileisnotused.yaml", # cspell: disable-line + ), } }, } out_file = module_dir / "03-cleanup.yaml" - with open(out_file, "w", encoding="utf-8") as output_stream: - yaml.dump(content, output_stream, default_flow_style=False, sort_keys=True) + out_file.write_text( + yaml.dump(content, default_flow_style=False, sort_keys=True), encoding="utf-8" + ) self.logger.debug("created %s:\n%s", out_file.name, yaml.dump(content, Dumper=YamlDumper)) return out_file - def _get_site_stack_variables(self) -> Dict[str, Any]: - site_stack_variables: Dict[str, Any] = { + def _get_site_stack_variables(self) -> dict[str, Any]: + site_stack_variables: dict[str, Any] = { "Aliases": [], "Compress": self.parameters.compress, "DisableCloudFront": self.parameters.cf_disable, @@ -519,8 +523,8 @@ def _get_site_stack_variables(self) -> Dict[str, Any]: return site_stack_variables - def _get_dependencies_variables(self) -> Dict[str, Any]: - variables: Dict[str, Any] = {"OAuthScopes": self.parameters.oauth_scopes} + def _get_dependencies_variables(self) -> dict[str, Any]: + variables: dict[str, Any] = {"OAuthScopes": self.parameters.oauth_scopes} if self.parameters.auth_at_edge: self._ensure_auth_at_edge_requirements() @@ -546,8 +550,8 @@ def _get_dependencies_variables(self) -> Dict[str, Any]: return variables - def _get_user_pool_id_retriever_variables(self) -> Dict[str, Any]: - args: Dict[str, Any] = { + def _get_user_pool_id_retriever_variables(self) -> dict[str, Any]: + args: dict[str, Any] = { "user_pool_arn": self.parameters.user_pool_arn, } @@ -558,7 +562,7 @@ def _get_user_pool_id_retriever_variables(self) -> Dict[str, Any]: return args - def _get_domain_updater_variables(self) -> Dict[str, str]: + def _get_domain_updater_variables(self) -> dict[str, str]: return { "client_id_output_lookup": f"{self.sanitized_name}-dependencies::AuthAtEdgeClient", "client_id": f"${{rxref {self.sanitized_name}-dependencies::AuthAtEdgeClient}}", @@ -566,10 +570,10 @@ def _get_domain_updater_variables(self) -> Dict[str, str]: def _get_lambda_config_variables( self, - site_stack_variables: Dict[str, Any], + site_stack_variables: dict[str, Any], nonce_secret_param: str, - required_group: Optional[str] = None, - ) -> Dict[str, Any]: + required_group: str | None = None, + ) -> dict[str, Any]: return { "client_id": f"${{rxref {self.sanitized_name}-dependencies::AuthAtEdgeClient}}", "bucket": f"${{rxref {self.sanitized_name}-dependencies::ArtifactsBucketName}}", @@ -584,8 +588,8 @@ def _get_lambda_config_variables( } def _get_client_updater_variables( - self, name: str, site_stack_variables: Dict[str, Any] - ) -> Dict[str, Any]: + self, name: str, site_stack_variables: dict[str, Any] + ) -> dict[str, Any]: return { "alternate_domains": [add_url_scheme(x) for x in site_stack_variables["Aliases"]], "client_id": f"${{rxref {self.sanitized_name}-dependencies::AuthAtEdgeClient}}", @@ -618,7 +622,7 @@ def _ensure_cloudfront_with_auth_at_edge(self) -> None: """Exit if both the Auth@Edge and CloudFront disablement are true.""" if self.parameters.cf_disable and self.parameters.auth_at_edge: self.logger.error( - 'staticsite_cf_disable must be "false" if ' 'staticsite_auth_at_edge is "true"' + 'staticsite_cf_disable must be "false" if staticsite_auth_at_edge is "true"' ) sys.exit(1) diff --git a/runway/module/staticsite/options/models.py b/runway/module/staticsite/options/models.py index f2ba776a9..5ddea7c7c 100644 --- a/runway/module/staticsite/options/models.py +++ b/runway/module/staticsite/options/models.py @@ -1,5 +1,6 @@ """Runway static site Module options.""" +# ruff: noqa: UP006, UP035 from __future__ import annotations from pathlib import Path @@ -37,19 +38,19 @@ class Config(ConfigProperty.Config): title = "Runway static site Module extra_files option item." @root_validator - def _autofill_content_type(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def _autofill_content_type(cls, values: Dict[str, Any]) -> Dict[str, Any]: # noqa: N805 """Attempt to fill content_type if not provided.""" if values.get("content_type"): return values name = cast(str, values.get("name", "")) if name.endswith(".json"): values["content_type"] = "application/json" - elif name.endswith(".yaml") or name.endswith(".yml"): + elif name.endswith((".yaml", ".yml")): values["content_type"] = "text/yaml" return values @root_validator(pre=True) - def _validate_content_or_file(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def _validate_content_or_file(cls, values: Dict[str, Any]) -> Dict[str, Any]: # noqa: N805 """Validate that content or file is provided.""" if all(i in values and values[i] for i in ["content", "file"]): raise ValueError("only one of content or file can be provided") diff --git a/runway/module/staticsite/parameters/models.py b/runway/module/staticsite/parameters/models.py index dfd636abc..cbd700359 100644 --- a/runway/module/staticsite/parameters/models.py +++ b/runway/module/staticsite/parameters/models.py @@ -1,8 +1,9 @@ """Runway static site Module parameters.""" +# ruff: noqa: UP006, UP035 from __future__ import annotations -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional from pydantic import Extra, Field, validator @@ -128,7 +129,7 @@ class RunwayStaticSiteModuleParametersDataModel(ConfigProperty): "font-src 'self' 'unsafe-inline' 'unsafe-eval' data: https:; " "object-src 'none'; " "connect-src 'self' https://*.amazonaws.com https://*.amazoncognito.com", - "Strict-Transport-Security": "max-age=31536000; " "includeSubdomains; " "preload", + "Strict-Transport-Security": "max-age=31536000; includeSubdomains; preload", "Referrer-Policy": "same-origin", "X-XSS-Protection": "1; mode=block", "X-Frame-Options": "DENY", @@ -183,7 +184,7 @@ class Config(ConfigProperty.Config): "supported_identity_providers", pre=True, ) - def _convert_comma_delimited_list(cls, v: Union[List[str], str]) -> List[str]: + def _convert_comma_delimited_list(cls, v: list[str] | str) -> list[str]: # noqa: N805 """Convert comma delimited lists to a string.""" if isinstance(v, str): return [i.strip() for i in v.split(",")] diff --git a/runway/module/staticsite/utils.py b/runway/module/staticsite/utils.py index c157384a8..86c81cfe3 100644 --- a/runway/module/staticsite/utils.py +++ b/runway/module/staticsite/utils.py @@ -8,6 +8,6 @@ def add_url_scheme(url: str) -> str: url (str): The current url. """ - if url.startswith("https://") or url.startswith("http://"): + if url.startswith(("https://", "http://")): return url return f"https://{url}" diff --git a/runway/module/terraform.py b/runway/module/terraform.py index 8492b0e3f..63a17ca17 100644 --- a/runway/module/terraform.py +++ b/runway/module/terraform.py @@ -8,7 +8,7 @@ import subprocess import sys from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, cast import hcl from send2trash import send2trash @@ -34,7 +34,7 @@ LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) -def gen_workspace_tfvars_files(environment: str, region: str) -> List[str]: +def gen_workspace_tfvars_files(environment: str, region: str) -> list[str]: """Generate possible Terraform workspace tfvars filenames.""" return [ # Give preference to explicit environment-region files @@ -45,9 +45,9 @@ def gen_workspace_tfvars_files(environment: str, region: str) -> List[str]: def update_env_vars_with_tf_var_values( - os_env_vars: Dict[str, str], - tf_vars: Dict[str, Union[Dict[str, Any], List[Any], str]], -) -> Dict[str, str]: + os_env_vars: dict[str, str], + tf_vars: dict[str, dict[str, Any] | list[Any] | str], +) -> dict[str, str]: """Return os_env_vars with TF_VAR values for each tf_var.""" # https://www.terraform.io/docs/commands/environment-variables.html#tf_var_name for key, val in tf_vars.items(): @@ -85,12 +85,12 @@ def __init__( self, context: RunwayContext, *, - explicitly_enabled: Optional[bool] = False, + explicitly_enabled: bool | None = False, logger: RunwayLogger = LOGGER, module_root: Path, - name: Optional[str] = None, - options: Optional[Union[Dict[str, Any], ModuleOptions]] = None, - parameters: Optional[Dict[str, Any]] = None, + name: str | None = None, + options: dict[str, Any] | ModuleOptions | None = None, + parameters: dict[str, Any] | None = None, **_: Any, ) -> None: """Instantiate class. @@ -149,9 +149,9 @@ def current_workspace(self) -> str: return self.terraform_workspace_show() @cached_property - def env_file(self) -> List[str]: + def env_file(self) -> list[str]: """Find the environment file for the module.""" - result: List[str] = [] + result: list[str] = [] for name in gen_workspace_tfvars_files(self.ctx.env.name, self.ctx.env.aws_region): test_path = self.path / name if test_path.is_file(): @@ -222,7 +222,7 @@ def cleanup_dot_terraform(self) -> None: return self.logger.verbose( - ".terraform directory exists from a previous run; " "removing some of its contents" + ".terraform directory exists from a previous run; removing some of its contents" ) for child in dot_terraform.iterdir(): if child.name == "plugins" and child.is_dir(): @@ -241,14 +241,15 @@ def destroy(self) -> None: def gen_command( self, - command: Union[List[str], str, Tuple[str, ...]], - args_list: Optional[List[str]] = None, - ) -> List[str]: + command: list[str] | str | tuple[str, ...], + args_list: list[str] | None = None, + ) -> list[str]: """Generate Terraform command.""" - if isinstance(command, (list, tuple)): - cmd = [self.tf_bin, *command] - else: - cmd = [self.tf_bin, command] + cmd = ( + [self.tf_bin, *command] + if isinstance(command, (list, tuple)) + else [self.tf_bin, command] + ) cmd.extend(args_list or []) if self.ctx.no_color: cmd.append("-no-color") @@ -263,7 +264,7 @@ def handle_backend(self) -> None: """ if not self.tfenv.backend["type"]: self.logger.info( - "unable to determine backend for module; no special handling " "will be applied" + "unable to determine backend for module; no special handling will be applied" ) return handler = f"_{self.tfenv.backend['type']}_backend_handler" @@ -370,7 +371,7 @@ def _terraform_destroy_12(self) -> None: """ return run_module_command( - self.gen_command("destroy", ["-auto-approve"] + self.env_file), + self.gen_command("destroy", ["-auto-approve", *self.env_file]), env_vars=self.ctx.env.vars, logger=self.logger, ) @@ -382,7 +383,7 @@ def _terraform_destroy_15_2(self) -> None: """ return run_module_command( - self.gen_command("apply", ["-destroy", "-auto-approve"] + self.env_file), + self.gen_command("apply", ["-destroy", "-auto-approve", *self.env_file]), env_vars=self.ctx.env.vars, logger=self.logger, ) @@ -394,7 +395,7 @@ def _terraform_destroy_legacy(self) -> None: """ return run_module_command( - self.gen_command("destroy", ["-force"] + self.env_file), + self.gen_command("destroy", ["-force", *self.env_file]), env_vars=self.ctx.env.vars, logger=self.logger, ) @@ -420,7 +421,7 @@ def terraform_init(self) -> None: """ cmd = self.gen_command( "init", - ["-reconfigure"] + self.options.backend_config.init_args + self.options.args.init, + ["-reconfigure", *self.options.backend_config.init_args, *self.options.args.init], ) try: run_module_command( @@ -528,7 +529,7 @@ def run(self, action: TerraformActionTypeDef) -> None: self.logger.info("init (in progress)") self.terraform_init() if self.current_workspace != self.required_workspace: - if re.compile(f"^[*\\s]\\s{self.required_workspace}$", re.M).search( + if re.compile(f"^[*\\s]\\s{self.required_workspace}$", re.MULTILINE).search( self.terraform_workspace_list() ): self.terraform_workspace_select(self.required_workspace) @@ -565,7 +566,7 @@ def __init__( self, data: RunwayTerraformModuleOptionsDataModel, deploy_environment: DeployEnvironment, - path: Optional[Path] = None, + path: Path | None = None, ) -> None: """Instantiate class. @@ -597,7 +598,7 @@ def parse_obj( cls, deploy_environment: DeployEnvironment, obj: object, - path: Optional[Path] = None, + path: Path | None = None, ) -> TerraformOptions: """Parse options definition and return an options object. @@ -641,14 +642,14 @@ def __init__( self.region = data.region @cached_property - def config_file(self) -> Optional[Path]: + def config_file(self) -> Path | None: """Backend configuration file.""" return self.get_backend_file(self.path, self.env.name, self.env.aws_region) @cached_property - def init_args(self) -> List[str]: + def init_args(self) -> list[str]: """Return command line arguments for init.""" - result: List[str] = [] + result: list[str] = [] for k, v in self.data.dict(exclude_none=True).items(): result.extend(["-backend-config", f"{k}={v}"]) if not result: @@ -664,16 +665,16 @@ def init_args(self) -> List[str]: LOGGER.debug("provided backend values: %s", json.dumps(result)) return result - def get_full_configuration(self) -> Dict[str, str]: + def get_full_configuration(self) -> dict[str, str]: """Get full backend configuration.""" if not self.config_file: return self.data.dict(exclude_none=True) - result = cast(Dict[str, str], hcl.loads(self.config_file.read_text())) + result = cast(dict[str, str], hcl.loads(self.config_file.read_text())) result.update(self.data.dict(exclude_none=True)) return result @classmethod - def get_backend_file(cls, path: Path, environment: str, region: str) -> Optional[Path]: + def get_backend_file(cls, path: Path, environment: str, region: str) -> Path | None: """Determine Terraform backend file. Args: @@ -690,7 +691,7 @@ def get_backend_file(cls, path: Path, environment: str, region: str) -> Optional return None @staticmethod - def gen_backend_filenames(environment: str, region: str) -> List[str]: + def gen_backend_filenames(environment: str, region: str) -> list[str]: """Generate possible Terraform backend filenames. Args: @@ -704,10 +705,12 @@ def gen_backend_filenames(environment: str, region: str) -> List[str]: "backend-{region}.{extension}", "backend.{extension}", ] - result: List[str] = [] + result: list[str] = [] for fmt in formats: for ext in ["hcl", "tfvars"]: - result.append(fmt.format(environment=environment, extension=ext, region=region)) + result.append( # noqa: PERF401 + fmt.format(environment=environment, extension=ext, region=region) + ) return result @classmethod @@ -715,7 +718,7 @@ def parse_obj( cls, deploy_environment: DeployEnvironment, obj: object, - path: Optional[Path] = None, + path: Path | None = None, ) -> TerraformBackendConfig: """Parse options definition and return an options object. diff --git a/runway/module/utils.py b/runway/module/utils.py index 305bfc589..d686c5217 100644 --- a/runway/module/utils.py +++ b/runway/module/utils.py @@ -7,12 +7,12 @@ import platform import subprocess import sys -from pathlib import Path -from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, cast from ..utils import which if TYPE_CHECKING: + from pathlib import Path from typing import Any from .._logging import RunwayLogger @@ -22,7 +22,7 @@ NPX_BIN = "npx.cmd" if platform.system().lower() == "windows" else "npx" -def format_npm_command_for_logging(command: List[str]) -> str: +def format_npm_command_for_logging(command: list[str]) -> str: """Convert npm command list to string for display to user.""" if platform.system().lower() == "windows" and (command[0] == "npx.cmd" and command[1] == "-c"): return f'npx.cmd -c "{" ".join(command[2:])}"' @@ -31,12 +31,12 @@ def format_npm_command_for_logging(command: List[str]) -> str: def generate_node_command( command: str, - command_opts: List[str], + command_opts: list[str], path: Path, *, - logger: Union[logging.Logger, "logging.LoggerAdapter[Any]"] = LOGGER, - package: Optional[str] = None, -) -> List[str]: + logger: logging.Logger | logging.LoggerAdapter[Any] = LOGGER, + package: str | None = None, +) -> list[str]: """Return node bin command list for subprocess execution. Args: @@ -73,10 +73,10 @@ def generate_node_command( def run_module_command( - cmd_list: List[str], - env_vars: Dict[str, str], + cmd_list: list[str], + env_vars: dict[str, str], exit_on_error: bool = True, - logger: Union[logging.Logger, "logging.LoggerAdapter[Any]"] = LOGGER, + logger: logging.Logger | logging.LoggerAdapter[Any] = LOGGER, ) -> None: """Shell out to provisioner command. @@ -103,7 +103,7 @@ def run_module_command( def use_npm_ci(path: Path) -> bool: """Return true if npm ci should be used in lieu of npm install.""" # https://docs.npmjs.com/cli/ci#description - with open(os.devnull, "w", encoding="utf-8") as fnull: + with open(os.devnull, "w", encoding="utf-8") as fnull: # noqa: PTH123 if ( (path / "package-lock.json").is_file() or (path / "npm-shrinkwrap.json").is_file() ) and subprocess.call([NPM_BIN, "ci", "-h"], stdout=fnull, stderr=subprocess.STDOUT) == 0: diff --git a/runway/s3_utils.py b/runway/s3_utils.py index 83dfd912e..36f36b8fa 100644 --- a/runway/s3_utils.py +++ b/runway/s3_utils.py @@ -6,12 +6,15 @@ import os import tempfile import zipfile -from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Sequence, cast +from pathlib import Path +from typing import TYPE_CHECKING, Any, cast import boto3 from botocore.exceptions import ClientError if TYPE_CHECKING: + from collections.abc import Iterator, Sequence + from mypy_boto3_s3.client import S3Client from mypy_boto3_s3.service_resource import S3ServiceResource from mypy_boto3_s3.type_defs import ObjectTypeDef @@ -21,20 +24,20 @@ LOGGER = cast("RunwayLogger", logging.getLogger(__name__)) -def _get_client(session: Optional[boto3.Session] = None, region: Optional[str] = None) -> S3Client: +def _get_client(session: boto3.Session | None = None, region: str | None = None) -> S3Client: """Get S3 boto client.""" return session.client("s3") if session else boto3.client("s3", region_name=region) def _get_resource( - session: Optional[boto3.Session] = None, region: Optional[str] = None + session: boto3.Session | None = None, region: str | None = None ) -> S3ServiceResource: """Get S3 boto resource.""" return session.resource("s3") if session else boto3.resource("s3", region_name=region) def purge_and_delete_bucket( - bucket_name: str, region: str = "us-east-1", session: Optional[boto3.Session] = None + bucket_name: str, region: str = "us-east-1", session: boto3.Session | None = None ) -> None: """Delete all objects and versions in bucket, then delete bucket.""" purge_bucket(bucket_name, region, session) @@ -42,7 +45,7 @@ def purge_and_delete_bucket( def purge_bucket( - bucket_name: str, region: str = "us-east-1", session: Optional[boto3.Session] = None + bucket_name: str, region: str = "us-east-1", session: boto3.Session | None = None ) -> None: """Delete all objects and versions in bucket.""" if does_bucket_exist(bucket_name, region, session): @@ -54,7 +57,7 @@ def purge_bucket( def delete_bucket( - bucket_name: str, region: str = "us-east-1", session: Optional[boto3.Session] = None + bucket_name: str, region: str = "us-east-1", session: boto3.Session | None = None ) -> None: """Delete bucket.""" if does_bucket_exist(bucket_name, region, session): @@ -68,7 +71,7 @@ def delete_bucket( def does_bucket_exist( - bucket_name: str, region: str = "us-east-1", session: Optional[boto3.Session] = None + bucket_name: str, region: str = "us-east-1", session: boto3.Session | None = None ) -> bool: """Check if bucket exists in S3.""" s3_resource = _get_resource(session, region) @@ -86,14 +89,14 @@ def does_bucket_exist( def ensure_bucket_exists( - bucket_name: str, region: str = "us-east-1", session: Optional[boto3.Session] = None + bucket_name: str, region: str = "us-east-1", session: boto3.Session | None = None ) -> None: """Ensure S3 bucket exists.""" if not does_bucket_exist(bucket_name, region, session): LOGGER.info('creating bucket "%s" (in progress)', bucket_name) s3_client = _get_client(session, region) if region == "us-east-1": - create_bucket_opts: Dict[str, Any] = {} + create_bucket_opts: dict[str, Any] = {} else: create_bucket_opts = {"CreateBucketConfiguration": {"LocationConstraint": region}} s3_client.create_bucket(Bucket=bucket_name, **create_bucket_opts) @@ -117,7 +120,7 @@ def ensure_bucket_exists( def does_s3_object_exist( bucket: str, key: str, - session: Optional[boto3.Session] = None, + session: boto3.Session | None = None, region: str = "us-east-1", ) -> bool: """Determine if object exists on s3.""" @@ -133,14 +136,14 @@ def does_s3_object_exist( return True -def upload(bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None) -> None: +def upload(bucket: str, key: str, filename: str, session: boto3.Session | None = None) -> None: """Upload file to S3 bucket.""" s3_client = _get_client(session) LOGGER.info("uploading %s to s3://%s/%s...", filename, bucket, key) s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key) -def download(bucket: str, key: str, file_path: str, session: Optional[boto3.Session] = None) -> str: +def download(bucket: str, key: str, file_path: str, session: boto3.Session | None = None) -> str: """Download a file from S3 to the given path.""" s3_client = _get_client(session) @@ -150,7 +153,7 @@ def download(bucket: str, key: str, file_path: str, session: Optional[boto3.Sess def download_and_extract_to_mkdtemp( - bucket: str, key: str, session: Optional[boto3.Session] = None + bucket: str, key: str, session: boto3.Session | None = None ) -> str: """Download zip archive and extract it to temporary directory.""" filedes, temp_file = tempfile.mkstemp() @@ -160,7 +163,7 @@ def download_and_extract_to_mkdtemp( output_dir = tempfile.mkdtemp() with zipfile.ZipFile(temp_file, "r") as zip_ref: zip_ref.extractall(output_dir) - os.remove(temp_file) + Path(temp_file).unlink() LOGGER.verbose("extracted %s to %s", temp_file, output_dir) return output_dir @@ -169,7 +172,7 @@ def get_matching_s3_objects( bucket: str, prefix: Sequence[str] = "", suffix: str = "", - session: Optional[boto3.Session] = None, + session: boto3.Session | None = None, ) -> Iterator[ObjectTypeDef]: """Generate objects in an S3 bucket. @@ -208,7 +211,7 @@ def get_matching_s3_keys( bucket: str, prefix: str = "", suffix: str = "", - session: Optional[boto3.Session] = None, + session: boto3.Session | None = None, ) -> Iterator[str]: """Generate the keys in an S3 bucket. diff --git a/runway/sources/git.py b/runway/sources/git.py index 245ad51cc..c39b69a2e 100644 --- a/runway/sources/git.py +++ b/runway/sources/git.py @@ -1,11 +1,13 @@ """'Git type Path Source.""" +from __future__ import annotations + import logging import shutil import subprocess import tempfile from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any from .source import Source @@ -23,7 +25,7 @@ class Git(Source): def __init__( self, *, - arguments: Optional[Dict[str, str]] = None, + arguments: dict[str, str] | None = None, location: str = "", uri: str = "", **kwargs: Any, @@ -38,6 +40,7 @@ def __init__( module resides. Leaving this as an empty string, ``/``, or ``./`` will have runway look in the root folder. uri: The uniform resource identifier that targets the remote git repository + **kwargs: Arbitrary keyword arguments. """ self.args = arguments or {} diff --git a/runway/sources/source.py b/runway/sources/source.py index 06daf757a..4e5c520b4 100644 --- a/runway/sources/source.py +++ b/runway/sources/source.py @@ -5,9 +5,11 @@ """ +from __future__ import annotations + import logging from pathlib import Path -from typing import Any, Union +from typing import Any LOGGER = logging.getLogger(__name__) @@ -29,12 +31,13 @@ class Source: cache_dir: Path - def __init__(self, *, cache_dir: Union[Path, str], **_: Any): + def __init__(self, *, cache_dir: Path | str, **_: Any) -> None: """Source. Args: cache_dir: The directory where the given remote resource should be cached. + **kwargs: Arbitrary keyword arguments. """ self.cache_dir = cache_dir if isinstance(cache_dir, Path) else Path(cache_dir) diff --git a/runway/templates/cdk-py/__init__.py b/runway/templates/cdk-py/__init__.py new file mode 100644 index 000000000..0349622bd --- /dev/null +++ b/runway/templates/cdk-py/__init__.py @@ -0,0 +1 @@ +"""PLACEHOLDER.""" diff --git a/runway/templates/cdk-py/app.py b/runway/templates/cdk-py/app.py index febe08d8e..e4582376b 100644 --- a/runway/templates/cdk-py/app.py +++ b/runway/templates/cdk-py/app.py @@ -1,5 +1,6 @@ -#!/usr/bin/env python3 """Sample app.""" + +# ruff: noqa from aws_cdk import core from hello.hello_stack import MyStack diff --git a/runway/templates/cdk-py/hello/hello_construct.py b/runway/templates/cdk-py/hello/hello_construct.py index cf2e5ff49..709d59696 100644 --- a/runway/templates/cdk-py/hello/hello_construct.py +++ b/runway/templates/cdk-py/hello/hello_construct.py @@ -1,5 +1,6 @@ """Sample app.""" +# ruff: noqa from aws_cdk import aws_iam as iam from aws_cdk import aws_s3 as s3 from aws_cdk import core @@ -17,7 +18,7 @@ def __init__(self, scope: core.Construct, id: str, num_buckets: int) -> None: """Instantiate class.""" super().__init__(scope, id) self._buckets = [] - for i in range(0, num_buckets): + for i in range(num_buckets): self._buckets.append(s3.Bucket(self, f"Bucket-{i}")) def grant_read(self, principal: iam.IPrincipal): diff --git a/runway/templates/cdk-py/hello/hello_stack.py b/runway/templates/cdk-py/hello/hello_stack.py index 5a9f814a2..d970a8000 100644 --- a/runway/templates/cdk-py/hello/hello_stack.py +++ b/runway/templates/cdk-py/hello/hello_stack.py @@ -1,5 +1,6 @@ """Hello stack.""" +# ruff: noqa from aws_cdk import aws_iam as iam from aws_cdk import aws_sns as sns from aws_cdk import aws_sns_subscriptions as subs diff --git a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/auth_map.py b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/auth_map.py index 6fc2aa774..24880132c 100644 --- a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/auth_map.py +++ b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/auth_map.py @@ -5,7 +5,7 @@ import logging import os from pathlib import Path -from typing import TYPE_CHECKING, Any, List +from typing import TYPE_CHECKING, Any from runway.cfngin.lookups.handlers.output import OutputLookup @@ -36,7 +36,9 @@ def get_principal_arn(context: CfnginContext) -> str: return assumed_role_to_principle(caller_identity_arn) -def generate(context: CfnginContext, *, filename: str, path: List[str], stack: str, **_: Any): +def generate( + context: CfnginContext, *, filename: str, path: list[str], stack: str, **_: Any +) -> bool: """Generate an EKS auth_map for worker connection. Args: @@ -51,7 +53,7 @@ def generate(context: CfnginContext, *, filename: str, path: List[str], stack: s """ overlay_path = Path(*path) file_path = overlay_path / filename - if os.path.exists(filename): + if os.path.exists(filename): # noqa: PTH110 LOGGER.info("%s file present; skipping initial creation", file_path) return True LOGGER.info("Creating auth_map at %s", file_path) @@ -67,7 +69,7 @@ def generate(context: CfnginContext, *, filename: str, path: List[str], stack: s return True -def remove(*, path: List[str], filename: str, **_: Any) -> bool: +def remove(*, path: list[str], filename: str, **_: Any) -> bool: """Remove an EKS auth_map for worker connection. For use after destroying a cluster. diff --git a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/awscli.py b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/awscli.py index f3c2e5c10..b01b74ec6 100644 --- a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/awscli.py +++ b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/awscli.py @@ -22,6 +22,7 @@ def aws_eks_update_kubeconfig(context: CfnginContext, **kwargs: Any) -> bool: Args: context: Context object. + **kwargs: Arbitrary keyword arguments. Returns: boolean for whether or not the hook succeeded @@ -43,8 +44,8 @@ def aws_eks_update_kubeconfig(context: CfnginContext, **kwargs: Any) -> bool: if not os.environ.get("PIPENV_ACTIVE") and ( not os.environ.get("VIRTUAL_ENV") and not which("aws") ): - print("", file=sys.stderr) - print( + print(file=sys.stderr) # noqa: T201 + print( # noqa: T201 "Warning: the generated kubeconfig uses the aws-cli for " "authentication, but it is not found in your environment. ", file=sys.stderr, diff --git a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/bootstrap.py b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/bootstrap.py index e27b3ed99..90b5ddbe3 100644 --- a/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/bootstrap.py +++ b/runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/bootstrap.py @@ -3,7 +3,6 @@ from __future__ import annotations import logging -import os import shutil from pathlib import Path from typing import TYPE_CHECKING, Any @@ -16,13 +15,13 @@ LOGGER = logging.getLogger(__name__) -def copy_template_to_env(path: Path, env: str, region: str): +def copy_template_to_env(path: Path, env: str, region: str) -> None: """Copy k8s module template into new environment directory.""" overlays_dir = path / "overlays" template_dir = overlays_dir / "template" env_dir = overlays_dir / env if template_dir.is_dir(): - if env_dir.is_dir() or (os.path.isdir(f"{env_dir}-{region}")): + if env_dir.is_dir() or (Path(f"{env_dir}-{region}").is_dir()): LOGGER.info( 'Bootstrap of k8s module at "%s" skipped; module ' "already has a config for this environment", @@ -30,7 +29,7 @@ def copy_template_to_env(path: Path, env: str, region: str): ) else: LOGGER.info( - 'Copying overlay template at "%s" to new ' 'environment directory "%s"', + 'Copying overlay template at "%s" to new environment directory "%s"', template_dir, env_dir, ) @@ -44,8 +43,8 @@ def copy_template_to_env(path: Path, env: str, region: str): templated_file_path = env_dir / i if templated_file_path.is_file(): filedata = templated_file_path.read_text() - if "REPLACEMEENV" in filedata: - templated_file_path.write_text(filedata.replace("REPLACEMEENV", env)) + if "REPLACE_ME_ENV" in filedata: + templated_file_path.write_text(filedata.replace("REPLACE_ME_ENV", env)) else: LOGGER.info( 'Skipping bootstrap of k8s module at "%s"; no template directory present', @@ -53,7 +52,7 @@ def copy_template_to_env(path: Path, env: str, region: str): ) -def create_runway_environments(*, context: CfnginContext, namespace: str, **_: Any): +def create_runway_environments(*, context: CfnginContext, namespace: str, **_: Any) -> bool: """Copy k8s module templates into new environment directories. Args: diff --git a/runway/templates/k8s-flux-repo/flux.tf/__init__.py b/runway/templates/k8s-flux-repo/flux.tf/__init__.py new file mode 100644 index 000000000..fe2653a60 --- /dev/null +++ b/runway/templates/k8s-flux-repo/flux.tf/__init__.py @@ -0,0 +1 @@ +"""Empty init file for python import traversal.""" diff --git a/runway/templates/k8s-tf-repo/eks-base.tf/__init__.py b/runway/templates/k8s-tf-repo/eks-base.tf/__init__.py new file mode 100644 index 000000000..fe2653a60 --- /dev/null +++ b/runway/templates/k8s-tf-repo/eks-base.tf/__init__.py @@ -0,0 +1 @@ +"""Empty init file for python import traversal.""" diff --git a/runway/templates/k8s-tf-repo/job-s3-echo.tf/__init__.py b/runway/templates/k8s-tf-repo/job-s3-echo.tf/__init__.py new file mode 100644 index 000000000..fe2653a60 --- /dev/null +++ b/runway/templates/k8s-tf-repo/job-s3-echo.tf/__init__.py @@ -0,0 +1 @@ +"""Empty init file for python import traversal.""" diff --git a/runway/templates/sls-py/hello_world/__init__.py b/runway/templates/sls-py/hello_world/__init__.py index d12b49328..9359fcc30 100644 --- a/runway/templates/sls-py/hello_world/__init__.py +++ b/runway/templates/sls-py/hello_world/__init__.py @@ -3,19 +3,16 @@ from __future__ import annotations import json -from typing import Any, Dict, Union +from typing import Any -def handler(event: Any, context: Any) -> Dict[str, Union[int, str]]: +def handler(event: Any, context: Any) -> dict[str, int | str]: # noqa: ARG001 """Return Serverless Hello World.""" body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event, } - - response = {"statusCode": 200, "body": json.dumps(body)} - - return response + return {"statusCode": 200, "body": json.dumps(body)} # Use this code if you don't use the http event with the LAMBDA-PROXY # integration diff --git a/runway/tests/handlers/base.py b/runway/tests/handlers/base.py index 965facfbf..014785955 100644 --- a/runway/tests/handlers/base.py +++ b/runway/tests/handlers/base.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Dict, List, Union +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ...config.components.runway.base import ConfigProperty @@ -13,12 +13,12 @@ class TestHandler: """Base class for test handlers.""" @classmethod - def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: + def handle(cls, name: str, args: ConfigProperty | dict[str, Any]) -> None: """Redefine in subclass.""" - raise NotImplementedError() + raise NotImplementedError @staticmethod - def get_dirs(provided_path: str) -> List[str]: + def get_dirs(provided_path: str) -> list[str]: """Return list of directories.""" repo_dirs = next(os.walk(provided_path))[1] if ".git" in repo_dirs: diff --git a/runway/tests/handlers/cfn_lint.py b/runway/tests/handlers/cfn_lint.py index 57b3e7f00..5a53dbd4a 100644 --- a/runway/tests/handlers/cfn_lint.py +++ b/runway/tests/handlers/cfn_lint.py @@ -7,7 +7,7 @@ import runpy import sys from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Union +from typing import TYPE_CHECKING, Any import yaml @@ -26,7 +26,7 @@ class CfnLintHandler(TestHandler): """Lints CFN.""" @classmethod - def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: + def handle(cls, name: str, args: ConfigProperty | dict[str, Any]) -> None: """Perform the actual test. Relies on .cfnlintrc file to be located beside the Runway config file. @@ -42,7 +42,7 @@ def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: # prevent duplicate log messages by not passing to the root logger logging.getLogger("cfnlint").propagate = False try: - with argv(*["cfn-lint"] + args.get("cli_args", [])): + with argv(*["cfn-lint", *args.get("cli_args", [])]): runpy.run_module("cfnlint", run_name="__main__") except SystemExit as err: # this call will always result in SystemExit if err.code != 0: # ignore zero exit codes but re-raise for non-zero diff --git a/runway/tests/handlers/script.py b/runway/tests/handlers/script.py index fcbc22b31..7d45ff564 100644 --- a/runway/tests/handlers/script.py +++ b/runway/tests/handlers/script.py @@ -6,7 +6,7 @@ import subprocess import sys from subprocess import CalledProcessError -from typing import TYPE_CHECKING, Any, Dict, Union +from typing import TYPE_CHECKING, Any from ..._logging import PrefixAdaptor from ...tests.handlers.base import TestHandler @@ -40,7 +40,7 @@ class ScriptHandler(TestHandler): """ @classmethod - def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: + def handle(cls, name: str, args: ConfigProperty | dict[str, Any]) -> None: """Perform the actual test.""" logger = PrefixAdaptor(name, LOGGER) for cmd in args["commands"]: diff --git a/runway/tests/handlers/yaml_lint.py b/runway/tests/handlers/yaml_lint.py index 8505d42c2..0bd4c4403 100644 --- a/runway/tests/handlers/yaml_lint.py +++ b/runway/tests/handlers/yaml_lint.py @@ -7,7 +7,7 @@ import logging import os import runpy -from typing import TYPE_CHECKING, Any, Dict, List, Union +from typing import TYPE_CHECKING, Any from ...tests.handlers.base import TestHandler from ...utils import argv @@ -23,31 +23,35 @@ class YamllintHandler(TestHandler): """Lints yaml.""" @staticmethod - def get_yaml_files_at_path(provided_path: str) -> List[str]: + def get_yaml_files_at_path(provided_path: str) -> list[str]: """Return list of yaml files.""" - yaml_files = glob.glob(os.path.join(provided_path, "*.yaml")) - yml_files = glob.glob(os.path.join(provided_path, "*.yml")) + yaml_files = glob.glob(os.path.join(provided_path, "*.yaml")) # noqa: PTH207, PTH118 + yml_files = glob.glob(os.path.join(provided_path, "*.yml")) # noqa: PTH118, PTH207 return yaml_files + yml_files @classmethod - def get_yamllint_options(cls, path: str) -> List[str]: + def get_yamllint_options(cls, path: str) -> list[str]: """Return yamllint option list.""" - yamllint_options: List[str] = [] + yamllint_options: list[str] = [] return yamllint_options + cls.get_dirs(path) + cls.get_yaml_files_at_path(path) @classmethod - def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: + def handle(cls, name: str, args: ConfigProperty | dict[str, Any]) -> None: # noqa: ARG003 """Perform the actual test.""" - base_dir = os.getcwd() + base_dir = os.getcwd() # noqa: PTH109 - if os.path.isfile(os.path.join(base_dir, ".yamllint")): - yamllint_config = os.path.join(base_dir, ".yamllint") - elif os.path.isfile(os.path.join(base_dir, ".yamllint.yml")): - yamllint_config = os.path.join(base_dir, ".yamllint.yml") + if os.path.isfile(os.path.join(base_dir, ".yamllint")): # noqa: PTH118, PTH113 + yamllint_config = os.path.join(base_dir, ".yamllint") # noqa: PTH118 + elif os.path.isfile(os.path.join(base_dir, ".yamllint.yml")): # noqa: PTH113, PTH118 + yamllint_config = os.path.join(base_dir, ".yamllint.yml") # noqa: PTH118 else: - yamllint_config = os.path.join( - os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), + yamllint_config = os.path.join( # noqa: PTH118 + os.path.dirname( # noqa: PTH120 + os.path.dirname( # noqa: PTH120 + os.path.dirname(os.path.abspath(__file__)) # noqa: PTH120, PTH100 + ) + ), "templates", ".yamllint.yml", ) @@ -57,5 +61,5 @@ def handle(cls, name: str, args: Union[ConfigProperty, Dict[str, Any]]) -> None: *cls.get_yamllint_options(base_dir), ] - with argv(*["yamllint"] + yamllint_options): + with argv(*["yamllint", *yamllint_options]): runpy.run_module("yamllint", run_name="__main__") diff --git a/runway/tests/registry.py b/runway/tests/registry.py index a33f5556e..66796e7a7 100644 --- a/runway/tests/registry.py +++ b/runway/tests/registry.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, Type +from typing import TYPE_CHECKING from .handlers import cfn_lint, script from .handlers import yaml_lint as yamllint @@ -10,10 +10,10 @@ if TYPE_CHECKING: from .handlers.base import TestHandler -TEST_HANDLERS: Dict[str, Type[TestHandler]] = {} +TEST_HANDLERS: dict[str, type[TestHandler]] = {} -def register_test_handler(test_type: str, handler: Type[TestHandler]) -> None: +def register_test_handler(test_type: str, handler: type[TestHandler]) -> None: """Register a test handler. Args: diff --git a/runway/utils/__init__.py b/runway/utils/__init__.py index beffc4740..a70560561 100644 --- a/runway/utils/__init__.py +++ b/runway/utils/__init__.py @@ -12,51 +12,33 @@ import re import stat import sys -from contextlib import contextmanager +from collections.abc import Iterable, Iterator, MutableMapping +from contextlib import AbstractContextManager, contextmanager from decimal import Decimal +from functools import cached_property # noqa: F401 # TODO (kyle): remove in next major release from pathlib import Path from subprocess import check_call -from types import TracebackType -from typing import ( - ContextManager, # deprecated in 3.9 for contextlib.AbstractContextManager -) -from typing import ( - MutableMapping, # deprecated in 3.9 for collections.abc.MutableMapping -) -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Set, - Type, - Union, - cast, - overload, -) +from typing import TYPE_CHECKING, Any, Callable, cast, overload import yaml from pydantic import BaseModel as _BaseModel -from typing_extensions import Literal - -# make this importable for util as it was before -from ..compat import cached_property # make this importable without defining __all__ yet. # more things need to be moved of this file before starting an explicit __all__. -from ._file_hash import FileHash -from ._version import Version +from ._file_hash import FileHash # noqa: F401 +from ._version import Version # noqa: F401 if TYPE_CHECKING: + from types import TracebackType + from mypy_boto3_cloudformation.type_defs import OutputTypeDef + from typing_extensions import Literal + + from ..compat import Self AWS_ENV_VARS = ("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN") DOC_SITE = "https://docs.onica.com/projects/runway" -EMBEDDED_LIB_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "embedded") +EMBEDDED_LIB_PATH = str(Path(__file__).resolve().parent / "embedded") LOGGER = logging.getLogger(__name__) @@ -167,13 +149,13 @@ def __init__(self, **kwargs: Any) -> None: self._found_queries = MutableMap() @property - def data(self) -> Dict[str, Any]: + def data(self) -> dict[str, Any]: """Sanitized output of __dict__. Removes anything that starts with ``_``. """ - result: Dict[str, Any] = {} + result: dict[str, Any] = {} for key, val in self.__dict__.items(): if key.startswith("_"): continue @@ -182,7 +164,7 @@ def data(self) -> Dict[str, Any]: def clear_found_cache(self) -> None: """Clear _found_cache.""" - for _, val in self.__dict__.items(): + for val in self.__dict__.values(): if isinstance(val, MutableMap): val.clear_found_cache() if hasattr(self, "_found_queries"): @@ -350,7 +332,7 @@ def __str__(self) -> str: return json.dumps(self.data, default=json_serial) -class SafeHaven(ContextManager["SafeHaven"]): +class SafeHaven(AbstractContextManager["SafeHaven"]): """Context manager that caches and resets important values on exit. Caches and resets os.environ, sys.argv, sys.modules, and sys.path. @@ -359,10 +341,10 @@ class SafeHaven(ContextManager["SafeHaven"]): def __init__( self, - argv: Optional[Iterable[str]] = None, - environ: Optional[Dict[str, str]] = None, - sys_modules_exclude: Optional[Iterable[str]] = None, - sys_path: Optional[Iterable[str]] = None, + argv: Iterable[str] | None = None, + environ: dict[str, str] | None = None, + sys_modules_exclude: Iterable[str] | None = None, + sys_path: Iterable[str] | None = None, ) -> None: """Instantiate class. @@ -384,7 +366,7 @@ def __init__( self.__sys_path = list(sys.path) # more informative origin for log statements self.logger = logging.getLogger("runway." + self.__class__.__name__) - self.sys_modules_exclude: Set[str] = ( + self.sys_modules_exclude: set[str] = ( set(sys_modules_exclude) if sys_modules_exclude else set() ) self.sys_modules_exclude.add("runway") @@ -433,7 +415,7 @@ def reset_sys_path(self) -> None: self.logger.debug("resetting sys.path: %s", json.dumps(self.__sys_path)) sys.path = self.__sys_path - def __enter__(self) -> SafeHaven: + def __enter__(self) -> Self: """Enter the context manager. Returns: @@ -445,16 +427,16 @@ def __enter__(self) -> SafeHaven: def __exit__( self, - exc_type: Optional[Type[BaseException]], - exc_value: Optional[BaseException], - traceback: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, ) -> None: """Exit the context manager.""" self.logger.debug("leaving the safe haven...") self.reset_all() -# TODO remove after https://github.com/yaml/pyyaml/issues/234 is resolved +# TODO (kyle): remove after https://github.com/yaml/pyyaml/issues/234 is resolved class YamlDumper(yaml.Dumper): """Custom YAML Dumper. @@ -471,7 +453,7 @@ class YamlDumper(yaml.Dumper): """ - def increase_indent(self, flow: bool = False, indentless: bool = False) -> None: + def increase_indent(self, flow: bool = False, indentless: bool = False) -> None: # noqa: ARG002 """Override parent method.""" return super().increase_indent(flow, False) # type: ignore @@ -489,7 +471,7 @@ def argv(*args: str) -> Iterator[None]: @contextmanager -def change_dir(newdir: Union[Path, str]) -> Iterator[None]: +def change_dir(newdir: Path | str) -> Iterator[None]: """Change directory. Adapted from http://stackoverflow.com/a/24176022 @@ -518,8 +500,8 @@ def ensure_file_is_executable(path: str) -> None: SystemExit: file is not executable. """ - if platform.system() != "Windows" and (not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]): - print(f"Error: File {path} is not executable") + if platform.system() != "Windows" and (not stat.S_IXUSR & Path(path).stat()[stat.ST_MODE]): + print(f"Error: File {path} is not executable") # noqa: T201 sys.exit(1) @@ -533,13 +515,14 @@ def ensure_string(value: Any) -> str: @contextmanager -def environ(env: Optional[Dict[str, str]] = None, **kwargs: str) -> Iterator[None]: +def environ(env: dict[str, str] | None = None, **kwargs: str) -> Iterator[None]: """Context manager for temporarily changing os.environ. The original value of os.environ is restored upon exit. Args: env: Dictionary to use when updating os.environ. + **kwargs: Arbitrary keyword arguments. """ env = env or {} @@ -563,7 +546,7 @@ def json_serial(obj: Any) -> Any: raise TypeError(f"Type {type(obj)} not serializable") -def load_object_from_string(fqcn: str, try_reload: bool = False) -> Union[type, Callable[..., Any]]: +def load_object_from_string(fqcn: str, try_reload: bool = False) -> type | Callable[..., Any]: """Convert "." delimited strings to a python object. Args: @@ -600,19 +583,19 @@ def load_object_from_string(fqcn: str, try_reload: bool = False) -> Union[type, @overload def merge_dicts( - dict1: Dict[Any, Any], dict2: Dict[Any, Any], deep_merge: bool = ... -) -> Dict[str, Any]: ... + dict1: dict[Any, Any], dict2: dict[Any, Any], deep_merge: bool = ... +) -> dict[str, Any]: ... @overload -def merge_dicts(dict1: List[Any], dict2: List[Any], deep_merge: bool = ...) -> List[Any]: ... +def merge_dicts(dict1: list[Any], dict2: list[Any], deep_merge: bool = ...) -> list[Any]: ... def merge_dicts( - dict1: Union[Dict[Any, Any], List[Any]], - dict2: Union[Dict[Any, Any], List[Any]], + dict1: dict[Any, Any] | list[Any], + dict2: dict[Any, Any] | list[Any], deep_merge: bool = True, -) -> Union[Dict[Any, Any], List[Any]]: +) -> dict[Any, Any] | list[Any]: """Merge dict2 into dict1.""" if deep_merge: if isinstance(dict1, list) and isinstance(dict2, list): @@ -641,7 +624,7 @@ def snake_case_to_kebab_case(value: str) -> str: return value.replace("_", "-") -def extract_boto_args_from_env(env_vars: Dict[str, str]) -> Dict[str, str]: +def extract_boto_args_from_env(env_vars: dict[str, str]) -> dict[str, str]: """Return boto3 client args dict with environment creds.""" return { i: env_vars[i.upper()] @@ -650,25 +633,25 @@ def extract_boto_args_from_env(env_vars: Dict[str, str]) -> Dict[str, str]: } -def flatten_path_lists(env_dict: Dict[str, Any], env_root: Optional[str] = None) -> Dict[str, Any]: +def flatten_path_lists(env_dict: dict[str, Any], env_root: str | None = None) -> dict[str, Any]: """Join paths in environment dict down to strings.""" for key, val in env_dict.items(): # Lists are presumed to be path components and will be turned back # to strings if isinstance(val, list): env_dict[key] = ( - os.path.join(env_root, os.path.join(*cast(List[str], val))) - if (env_root and not os.path.isabs(os.path.join(*cast(List[str], val)))) - else os.path.join(*cast(List[str], val)) + Path(env_root).joinpath(*cast("list[str]", val)) + if (env_root and not Path(*cast("list[str]", val)).is_absolute()) + else Path(*cast("list[str]", val)) ) return env_dict def merge_nested_environment_dicts( - env_dicts: Dict[str, Any], - env_name: Optional[str] = None, - env_root: Optional[str] = None, -) -> Dict[str, Any]: + env_dicts: dict[str, Any], + env_name: str | None = None, + env_root: str | None = None, +) -> dict[str, Any]: """Return single-level dictionary from dictionary of dictionaries.""" # If the provided dictionary is just a single "level" (no nested # environments), it applies to all environments @@ -684,13 +667,13 @@ def merge_nested_environment_dicts( return {} combined_dicts = merge_dicts( - cast(Dict[Any, Any], env_dicts.get("*", {})), - cast(Dict[Any, Any], env_dicts.get(env_name, {})), + cast("dict[Any, Any]", env_dicts.get("*", {})), + cast("dict[Any, Any]", env_dicts.get(env_name, {})), ) return flatten_path_lists(combined_dicts, env_root) -def find_cfn_output(key: str, outputs: List[OutputTypeDef]) -> Optional[str]: +def find_cfn_output(key: str, outputs: list[OutputTypeDef]) -> str | None: """Return CFN output value. Args: @@ -706,13 +689,13 @@ def find_cfn_output(key: str, outputs: List[OutputTypeDef]) -> Optional[str]: def get_embedded_lib_path() -> str: """Return path of embedded libraries.""" - return os.path.join(os.path.dirname(os.path.abspath(__file__)), "embedded") + return str(Path(__file__).resolve().parent / "embedded") def get_hash_for_filename(filename: str, hashfile_path: str) -> str: """Return hash for filename in the hashfile.""" filehash = "" - with open(hashfile_path, "r", encoding="utf-8") as stream: + with open(hashfile_path, encoding="utf-8") as stream: # noqa: PTH123 for _cnt, line in enumerate(stream): if line.rstrip().endswith(filename): match = re.match(r"^[A-Za-z0-9]*", line) @@ -734,7 +717,7 @@ def ignore_exit_code_0() -> Iterator[None]: raise -def fix_windows_command_list(commands: List[str]) -> List[str]: +def fix_windows_command_list(commands: list[str]) -> list[str]: """Return command list with working Windows commands. npm on windows is npm.cmd, which will blow up @@ -746,16 +729,17 @@ def fix_windows_command_list(commands: List[str]) -> List[str]: """ fully_qualified_cmd_path = which(commands[0]) if fully_qualified_cmd_path: - commands[0] = os.path.basename(fully_qualified_cmd_path) + commands[0] = Path(fully_qualified_cmd_path).name return commands def run_commands( - commands: List[Union[str, List[str], Dict[str, Union[str, List[str]]]]], - directory: Union[Path, str], - env: Optional[Dict[str, str]] = None, + commands: list[dict[str, list[str] | str] | list[str] | str], + directory: Path | str, + env: dict[str, str] | None = None, ) -> None: """Run list of commands.""" + directory = Path(directory) if env is None: env = os.environ.copy() for step in commands: @@ -764,9 +748,7 @@ def run_commands( raw_command = step elif step.get("command"): # dictionary execution_dir = ( - os.path.join(directory, cast(str, step.get("cwd", ""))) - if step.get("cwd") - else directory + directory / str(step["cwd"]) if step.get("cwd") and step["cwd"] else directory ) raw_command = step["command"] else: @@ -783,7 +765,7 @@ def run_commands( try: check_call(command_list, env=env) except FileNotFoundError: - print(failed_to_find_error, file=sys.stderr) + print(failed_to_find_error, file=sys.stderr) # noqa: T201 sys.exit(1) @@ -817,7 +799,7 @@ def get_file_hash( __name__, ) file_hash = getattr(hashlib, algorithm)() - with open(filename, "rb") as stream: + with open(filename, "rb") as stream: # noqa: PTH123 while True: data = stream.read(65536) # 64kb chunks if not data: @@ -852,7 +834,7 @@ def sha256sum(filename: str) -> str: __name__, ) sha256 = hashlib.sha256() - with open(filename, "rb", buffering=0) as stream: + with open(filename, "rb", buffering=0) as stream: # noqa: PTH123 mem_view = memoryview(bytearray(128 * 1024)) for i in iter(lambda: stream.readinto(mem_view), 0): sha256.update(mem_view[:i]) @@ -860,7 +842,7 @@ def sha256sum(filename: str) -> str: @contextmanager -def use_embedded_pkgs(embedded_lib_path: Optional[str] = None) -> Iterator[None]: +def use_embedded_pkgs(embedded_lib_path: str | None = None) -> Iterator[None]: """Temporarily prepend embedded packages to sys.path.""" if embedded_lib_path is None: embedded_lib_path = get_embedded_lib_path() @@ -873,14 +855,14 @@ def use_embedded_pkgs(embedded_lib_path: Optional[str] = None) -> Iterator[None] sys.path = old_sys_path -def which(program: str) -> Optional[str]: +def which(program: str) -> str | None: """Mimic 'which' command behavior.""" def is_exe(fpath: str) -> bool: """Determine if program exists and is executable.""" - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + return Path(fpath).is_file() and os.access(fpath, os.X_OK) - def get_extensions() -> List[str]: + def get_extensions() -> list[str]: """Get PATHEXT if the exist, otherwise use default.""" exts = ".COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC" @@ -889,7 +871,7 @@ def get_extensions() -> List[str]: return exts.split(";") - fname, file_ext = os.path.splitext(program) + fname, file_ext = os.path.splitext(program) # noqa: PTH122 fpath, fname = os.path.split(program) if not file_ext and platform.system().lower() == "windows": @@ -899,16 +881,16 @@ def get_extensions() -> List[str]: for i in fnames: if fpath: - exe_file = os.path.join(fpath, i) + exe_file = os.path.join(fpath, i) # noqa: PTH118 if is_exe(exe_file): return exe_file else: for path in ( os.environ.get("PATH", "").split(os.pathsep) if "PATH" in os.environ - else [os.getcwd()] + else [os.getcwd()] # noqa: PTH109 ): - exe_file = os.path.join(path, i) + exe_file = os.path.join(path, i) # noqa: PTH118 if is_exe(exe_file): return exe_file diff --git a/runway/utils/_file_hash.py b/runway/utils/_file_hash.py index f6c660485..33448211a 100644 --- a/runway/utils/_file_hash.py +++ b/runway/utils/_file_hash.py @@ -3,10 +3,11 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, ClassVar, Iterable, Optional +from typing import TYPE_CHECKING, ClassVar, Optional if TYPE_CHECKING: import hashlib + from collections.abc import Iterable from _typeshed import StrPath @@ -26,7 +27,7 @@ class FileHash: 1024 * 10_000_000 # 10mb - number of bytes in each read operation ) - def __init__(self, hash_alg: "hashlib._Hash", *, chunk_size: int = DEFAULT_CHUNK_SIZE) -> None: + def __init__(self, hash_alg: hashlib._Hash, *, chunk_size: int = DEFAULT_CHUNK_SIZE) -> None: """Instantiate class. Args: @@ -73,10 +74,8 @@ def add_file(self, file_path: StrPath) -> None: file_path: Path of the file to add. """ - with open(file_path, "rb") as stream: - # python 3.7 compatible version of `while chunk := buf.read(read_size):` - chunk = stream.read(self.chunk_size) # seed chunk with initial value - while chunk: + with Path.open(Path(file_path), "rb") as stream: + while chunk := stream.read(self.chunk_size): self._hash.update(chunk) chunk = stream.read(self.chunk_size) # read in new chunk @@ -126,4 +125,4 @@ def add_files( self.add_file_name(fp, relative_to=relative_to) self.add_file(fp) # end of file contents; only necessary with multiple files - self._hash.update("\0".encode()) + self._hash.update(b"\0") diff --git a/runway/utils/_version.py b/runway/utils/_version.py index c35a7ff35..54c601245 100644 --- a/runway/utils/_version.py +++ b/runway/utils/_version.py @@ -22,7 +22,7 @@ def __repr__(self) -> str: """Return repr.""" # this usage of super is required to reproduce the intended result in # any subclasses of this class - return f"" + return f"" def __str__(self) -> str: """Return the original version string.""" diff --git a/runway/variables.py b/runway/variables.py index 56f13d19e..d2d0c0b8f 100644 --- a/runway/variables.py +++ b/runway/variables.py @@ -4,24 +4,8 @@ import logging import re -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generic, - Iterable, - Iterator, - List, - MutableMapping, - MutableSequence, - Optional, - Set, - Type, - TypeVar, - Union, - cast, - overload, -) +from collections.abc import Iterable, Iterator, MutableMapping, MutableSequence +from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload from pydantic import BaseModel from typing_extensions import Literal @@ -35,13 +19,13 @@ UnresolvedVariable, UnresolvedVariableValue, ) -from .lookups.handlers.base import LookupHandler from .lookups.registry import RUNWAY_LOOKUP_HANDLERS if TYPE_CHECKING: from .cfngin.providers.aws.default import Provider from .config.components.runway import RunwayVariablesDefinition from .context import CfnginContext, RunwayContext + from .lookups.handlers.base import LookupHandler LOGGER = logging.getLogger(__name__) @@ -53,6 +37,7 @@ class Variable: """Represents a variable provided to a Runway directive.""" + _value: VariableValue name: str def __init__( @@ -75,11 +60,11 @@ def __init__( self.variable_type = variable_type @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on. Returns: - Set[str]: Stack names that this variable depends on. + set[str]: Stack names that this variable depends on. """ return self._value.dependencies @@ -108,9 +93,9 @@ def value(self) -> Any: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -119,6 +104,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. Raises: FailedVariableLookup @@ -145,9 +131,9 @@ def __repr__(self) -> str: def resolve_variables( - variables: List[Variable], - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, + variables: list[Variable], + context: CfnginContext | RunwayContext, + provider: Provider | None = None, ) -> None: """Given a list of variables, resolve all of them. @@ -172,7 +158,7 @@ class VariableValue: variable_type: VariableTypeLiteralTypeDef @property - def dependencies(self) -> Set[Any]: + def dependencies(self) -> set[Any]: """Stack names that this variable depends on.""" return set() @@ -210,9 +196,9 @@ def value(self) -> Any: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -221,6 +207,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. """ @@ -243,13 +230,13 @@ def parse_obj( @overload @classmethod def parse_obj( - cls, obj: Dict[str, Any], variable_type: VariableTypeLiteralTypeDef = ... + cls, obj: dict[str, Any], variable_type: VariableTypeLiteralTypeDef = ... ) -> VariableValue: ... @overload @classmethod def parse_obj( - cls, obj: List[Any], variable_type: VariableTypeLiteralTypeDef = ... + cls, obj: list[Any], variable_type: VariableTypeLiteralTypeDef = ... ) -> VariableValueList: ... @overload @@ -262,10 +249,10 @@ def parse_obj( @classmethod def parse_obj( cls, obj: str, variable_type: VariableTypeLiteralTypeDef = ... - ) -> VariableValueConcatenation[Union[VariableValueLiteral[str], VariableValueLookup]]: ... + ) -> VariableValueConcatenation[VariableValueLiteral[str] | VariableValueLookup]: ... @classmethod - def parse_obj( + def parse_obj( # noqa: C901 cls, obj: Any, variable_type: VariableTypeLiteralTypeDef = "cfngin" ) -> VariableValue: """Parse complex variable structures using type appropriate subclasses. @@ -284,14 +271,14 @@ def parse_obj( if not isinstance(obj, str): return VariableValueLiteral(obj, variable_type=variable_type) # type: ignore - tokens: VariableValueConcatenation[ - Union[VariableValueLiteral[str], VariableValueLookup] - ] = VariableValueConcatenation( - # pyright 1.1.138 is having issues properly inferring the type from comprehension - [ # type: ignore - VariableValueLiteral(cast(str, t), variable_type=variable_type) - for t in re.split(r"(\$\{|\}|\s+)", obj) # ${ or space or } - ] + tokens: VariableValueConcatenation[VariableValueLiteral[str] | VariableValueLookup] = ( + VariableValueConcatenation( + # pyright 1.1.138 is having issues properly inferring the type from comprehension + [ # type: ignore + VariableValueLiteral(cast(str, t), variable_type=variable_type) + for t in re.split(r"(\$\{|\}|\s+)", obj) # ${ or space or } + ] + ) ) opener = "${" @@ -348,7 +335,7 @@ class VariableValueDict(VariableValue, MutableMapping[str, VariableValue]): """A dict variable value.""" def __init__( - self, data: Dict[str, Any], variable_type: VariableTypeLiteralTypeDef = "cfngin" + self, data: dict[str, Any], variable_type: VariableTypeLiteralTypeDef = "cfngin" ) -> None: """Instantiate class. @@ -361,9 +348,9 @@ def __init__( self.variable_type: VariableTypeLiteralTypeDef = variable_type @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on.""" - deps: Set[str] = set() + deps: set[str] = set() for item in self.values(): deps.update(item.dependencies) return deps @@ -377,7 +364,7 @@ def resolved(self) -> bool: return accumulator @property - def simplified(self) -> Dict[str, Any]: + def simplified(self) -> dict[str, Any]: """Return a simplified version of the value. This can be used to concatenate two literals into one literal or @@ -387,15 +374,15 @@ def simplified(self) -> Dict[str, Any]: return {k: v.simplified for k, v in self.items()} @property - def value(self) -> Dict[str, Any]: + def value(self) -> dict[str, Any]: """Value of the variable. Can be resolved or unresolved.""" return {k: v.value for k, v in self.items()} def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -404,6 +391,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. """ for item in self.values(): @@ -449,15 +437,15 @@ def __init__( variable_type: Type of variable (cfngin|runway). """ - self._data: List[VariableValue] = [ + self._data: list[VariableValue] = [ self.parse_obj(i, variable_type=variable_type) for i in iterable ] self.variable_type: VariableTypeLiteralTypeDef = variable_type @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on.""" - deps: Set[str] = set() + deps: set[str] = set() for item in self: deps.update(item.dependencies) return deps @@ -471,7 +459,7 @@ def resolved(self) -> bool: return accumulator @property - def simplified(self) -> List[VariableValue]: + def simplified(self) -> list[VariableValue]: """Return a simplified version of the value. This can be used to concatenate two literals into one literal or @@ -481,7 +469,7 @@ def simplified(self) -> List[VariableValue]: return [item.simplified for item in self] @property - def value(self) -> List[Any]: + def value(self) -> list[Any]: """Value of the variable. Can be resolved or unresolved.""" return [item.value for item in self] @@ -491,9 +479,9 @@ def insert(self, index: int, value: VariableValue) -> None: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -502,37 +490,44 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. """ for item in self: item.resolve(context, provider=provider, variables=variables, **kwargs) - def __delitem__(self, __index: int) -> None: + @overload + def __delitem__(self, index: int) -> None: ... + + @overload + def __delitem__(self, index: slice) -> None: ... + + def __delitem__(self, index: int | slice) -> None: """Delete item by index.""" - del self._data[__index] + del self._data[index] @overload def __getitem__(self, __index: int) -> VariableValue: ... @overload - def __getitem__(self, __index: slice) -> List[VariableValue]: ... + def __getitem__(self, __index: slice) -> list[VariableValue]: ... - def __getitem__( # type: ignore - self, __index: Union[int, slice] - ) -> Union[MutableSequence[VariableValue], VariableValue]: + def __getitem__( # pyright: ignore[reportIncompatibleMethodOverride] + self, __index: int | slice + ) -> MutableSequence[VariableValue] | VariableValue: """Get item by index.""" - return self._data[__index] # type: ignore + return self._data[__index] # pyright: ignore[reportCallIssue] @overload def __setitem__(self, __index: int, __value: VariableValue) -> None: ... @overload - def __setitem__(self, __index: slice, __value: List[VariableValue]) -> None: ... + def __setitem__(self, __index: slice, __value: list[VariableValue]) -> None: ... - def __setitem__( + def __setitem__( # pyright: ignore[reportIncompatibleMethodOverride] self, - __index: Union[int, slice], - __value: Union[List[VariableValue], VariableValue], + __index: int | slice, + __value: list[VariableValue] | VariableValue, ) -> None: """Set item by index.""" self._data[__index] = __value # type: ignore @@ -609,9 +604,9 @@ def __init__( self.variable_type: VariableTypeLiteralTypeDef = variable_type @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on.""" - deps: Set[str] = set() + deps: set[str] = set() for item in self: deps.update(item.dependencies) return deps @@ -632,7 +627,7 @@ def simplified(self) -> VariableValue: nested concatenations. """ - concat: List[VariableValue] = [] + concat: list[VariableValue] = [] for item in self: if isinstance(item, VariableValueLiteral) and item.value == "": pass @@ -666,7 +661,7 @@ def value(self) -> Any: if len(self) == 1: return self[0].value - values: List[str] = [] + values: list[str] = [] for value in self: resolved_value = value.value if isinstance(resolved_value, bool) or not isinstance(resolved_value, (int, str)): @@ -676,9 +671,9 @@ def value(self) -> Any: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -687,6 +682,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. """ for value in self: @@ -700,11 +696,9 @@ def __delitem__(self, __index: int) -> None: def __getitem__(self, __index: int) -> _VariableValue: ... @overload - def __getitem__(self, __index: slice) -> List[_VariableValue]: ... + def __getitem__(self, __index: slice) -> list[_VariableValue]: ... - def __getitem__( - self, __index: Union[int, slice] - ) -> Union[List[_VariableValue], _VariableValue]: + def __getitem__(self, __index: int | slice) -> list[_VariableValue] | _VariableValue: """Get item by index.""" return self._data[__index] @@ -712,12 +706,12 @@ def __getitem__( def __setitem__(self, __index: int, __value: _VariableValue) -> None: ... @overload - def __setitem__(self, __index: slice, __value: List[_VariableValue]) -> None: ... + def __setitem__(self, __index: slice, __value: list[_VariableValue]) -> None: ... def __setitem__( self, - __index: Union[int, slice], - __value: Union[List[_VariableValue], _VariableValue], + __index: int | slice, + __value: list[_VariableValue] | _VariableValue, ) -> None: """Set item by index.""" self._data[__index] = __value @@ -738,7 +732,7 @@ def __repr__(self) -> str: class VariableValueLookup(VariableValue): """A lookup variable value.""" - handler: Type[LookupHandler] + handler: type[LookupHandler] lookup_name: VariableValueLiteral[str] lookup_query: VariableValue @@ -747,8 +741,8 @@ class VariableValueLookup(VariableValue): def __init__( self, lookup_name: VariableValueLiteral[str], - lookup_query: Union[str, VariableValue], - handler: Optional[Type[LookupHandler]] = None, + lookup_query: str | VariableValue, + handler: type[LookupHandler] | None = None, variable_type: VariableTypeLiteralTypeDef = "cfngin", ) -> None: """Initialize class. @@ -788,7 +782,7 @@ def __init__( self.handler = handler @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on.""" if hasattr(self.handler, "dependencies"): return self.handler.dependencies(self.lookup_query) @@ -823,9 +817,9 @@ def value(self) -> Any: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -834,6 +828,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. Raises: FailedLookup: A lookup failed for any reason. @@ -859,8 +854,8 @@ def __iter__(self) -> Iterator[VariableValueLookup]: def __repr__(self) -> str: """Return object representation.""" if self._resolved: - return f"Lookup[{self._data} ({self.lookup_name} {repr(self.lookup_query)})]" - return f"Lookup[{self.lookup_name} {repr(self.lookup_query)}]" + return f"Lookup[{self._data} ({self.lookup_name} {self.lookup_query!r})]" + return f"Lookup[{self.lookup_name} {self.lookup_query!r}]" def __str__(self) -> str: """Object displayed as a string.""" @@ -882,16 +877,16 @@ def __init__( variable_type: Type of variable (cfngin|runway). """ - self._data: Dict[str, VariableValue] = { + self._data: dict[str, VariableValue] = { k: self.parse_obj(v, variable_type=variable_type) for k, v in data } self._model_class = type(data) self.variable_type: VariableTypeLiteralTypeDef = variable_type @property - def dependencies(self) -> Set[str]: + def dependencies(self) -> set[str]: """Stack names that this variable depends on.""" - deps: Set[str] = set() + deps: set[str] = set() for value in self._data.values(): deps.update(value.dependencies) return deps @@ -905,7 +900,7 @@ def resolved(self) -> bool: return accumulator @property - def simplified(self) -> Dict[str, Any]: + def simplified(self) -> dict[str, Any]: """Return a simplified version of the value. This can be used to concatenate two literals into one literal or @@ -928,9 +923,9 @@ def value(self) -> _PydanticModelTypeVar: def resolve( self, - context: Union[CfnginContext, RunwayContext], - provider: Optional[Provider] = None, - variables: Optional[RunwayVariablesDefinition] = None, + context: CfnginContext | RunwayContext, + provider: Provider | None = None, + variables: RunwayVariablesDefinition | None = None, **kwargs: Any, ) -> None: """Resolve the variable value. @@ -939,6 +934,7 @@ def resolve( context: The current context object. provider: Subclass of the base provider. variables: Object containing variables passed to Runway. + **kwargs: Arbitrary keyword arguments. """ for item in self._data.values(): diff --git a/tests/conftest.py b/tests/conftest.py index df86d1ced..b73ec1d13 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,18 +4,19 @@ import os from pathlib import Path -from typing import TYPE_CHECKING, Generator, Iterator +from typing import TYPE_CHECKING import pytest from .factories import cli_runner_factory if TYPE_CHECKING: + from collections.abc import Generator, Iterator + from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.fixtures import SubRequest from click.testing import CliRunner - from pytest import TempPathFactory def pytest_configure(config: Config) -> None: @@ -49,20 +50,20 @@ def pytest_addoption(parser: Parser) -> None: ) -@pytest.fixture(scope="function") +@pytest.fixture() def cli_runner(request: SubRequest) -> CliRunner: """Initialize instance of `click.testing.CliRunner`.""" return cli_runner_factory(request) -@pytest.fixture(scope="function") +@pytest.fixture() def cli_runner_isolated(cli_runner: CliRunner) -> Generator[CliRunner, None, None]: """Initialize instance of `click.testing.CliRunner` with `isolate_filesystem()` called.""" with cli_runner.isolated_filesystem(): yield cli_runner -@pytest.fixture(scope="function") +@pytest.fixture() def cd_tmp_path(tmp_path: Path) -> Iterator[Path]: """Change directory to a temporary path. @@ -78,7 +79,7 @@ def cd_tmp_path(tmp_path: Path) -> Iterator[Path]: os.chdir(prev_dir) -@pytest.fixture(scope="function") +@pytest.fixture() def root_dir() -> Path: """Return a path object to the root directory.""" return Path(__file__).parent.parent @@ -100,6 +101,6 @@ def sanitize_environment() -> None: @pytest.fixture(scope="session") -def tfenv_dir(tmp_path_factory: TempPathFactory) -> Path: +def tfenv_dir(tmp_path_factory: pytest.TempPathFactory) -> Path: """Directory for storing tfenv between tests.""" return tmp_path_factory.mktemp(".tfenv", numbered=True) diff --git a/tests/factories.py b/tests/factories.py index a46eb9c64..a9cafd510 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -4,7 +4,7 @@ from __future__ import annotations import os # imports os -from typing import TYPE_CHECKING, Any, Dict, cast +from typing import TYPE_CHECKING, Any, cast from click.testing import CliRunner @@ -14,7 +14,7 @@ def cli_runner_factory(request: SubRequest) -> CliRunner: """Initialize instance of `click.testing.CliRunner`.""" - kwargs: Dict[str, Any] = { + kwargs: dict[str, Any] = { "env": { "CFNGIN_STACK_POLL_TIME": "1", "DEPLOY_ENVIRONMENT": "test", @@ -24,5 +24,5 @@ def cli_runner_factory(request: SubRequest) -> CliRunner: } mark = request.node.get_closest_marker("cli_runner") if mark: - kwargs.update(cast(Dict[str, Any], mark.kwargs)) + kwargs.update(cast(dict[str, Any], mark.kwargs)) return CliRunner(**kwargs) diff --git a/tests/functional/cdk/test_multistack/test_runner.py b/tests/functional/cdk/test_multistack/test_runner.py index 140530694..6a9199e01 100644 --- a/tests/functional/cdk/test_multistack/test_runner.py +++ b/tests/functional/cdk/test_multistack/test_runner.py @@ -14,22 +14,24 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) @pytest.fixture(scope="module") diff --git a/tests/functional/cfngin/fixtures/blueprints/_bastion.py b/tests/functional/cfngin/fixtures/blueprints/_bastion.py index 0a0eaacfe..3e3312678 100644 --- a/tests/functional/cfngin/fixtures/blueprints/_bastion.py +++ b/tests/functional/cfngin/fixtures/blueprints/_bastion.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar from runway.cfngin.blueprints.base import Blueprint from runway.cfngin.blueprints.variables.types import ( @@ -22,7 +22,7 @@ class FakeBastion(Blueprint): """Fake Bastion.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "VpcId": {"type": EC2VPCId, "description": "Vpc Id"}, "DefaultSG": { "type": EC2SecurityGroupId, @@ -30,15 +30,15 @@ class FakeBastion(Blueprint): }, "PublicSubnets": { "type": EC2SubnetIdList, - "description": "Subnets to deploy public " "instances in.", + "description": "Subnets to deploy public instances in.", }, "PrivateSubnets": { "type": EC2SubnetIdList, - "description": "Subnets to deploy private " "instances in.", + "description": "Subnets to deploy private instances in.", }, "AvailabilityZones": { "type": CFNCommaDelimitedList, - "description": "Availability Zones to deploy " "instances in.", + "description": "Availability Zones to deploy instances in.", }, "InstanceType": { "type": CFNString, diff --git a/tests/functional/cfngin/fixtures/blueprints/_broken.py b/tests/functional/cfngin/fixtures/blueprints/_broken.py index f7fd569f3..99610b61f 100644 --- a/tests/functional/cfngin/fixtures/blueprints/_broken.py +++ b/tests/functional/cfngin/fixtures/blueprints/_broken.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar from troposphere import Ref from troposphere.cloudformation import WaitCondition, WaitConditionHandle @@ -20,7 +20,7 @@ class Broken(Blueprint): """ - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "StringVariable": {"type": str, "default": ""} } diff --git a/tests/functional/cfngin/fixtures/blueprints/_dummy.py b/tests/functional/cfngin/fixtures/blueprints/_dummy.py index 2b316f895..92d97f608 100644 --- a/tests/functional/cfngin/fixtures/blueprints/_dummy.py +++ b/tests/functional/cfngin/fixtures/blueprints/_dummy.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar from troposphere import Ref from troposphere.cloudformation import WaitCondition, WaitConditionHandle @@ -16,7 +16,7 @@ class Dummy(Blueprint): """Dummy blueprint.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "StringVariable": {"type": str, "default": ""} } @@ -35,7 +35,7 @@ class LongRunningDummy(Blueprint): """ - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Count": { "type": int, "description": "The # of WaitConditionHandles to create.", diff --git a/tests/functional/cfngin/fixtures/blueprints/_lambda_function.py b/tests/functional/cfngin/fixtures/blueprints/_lambda_function.py index 5d115d1c4..927a33774 100644 --- a/tests/functional/cfngin/fixtures/blueprints/_lambda_function.py +++ b/tests/functional/cfngin/fixtures/blueprints/_lambda_function.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional +from typing import TYPE_CHECKING, Any, ClassVar, Optional import awacs.awslambda import awacs.dynamodb @@ -22,7 +22,7 @@ class LambdaFunction(Blueprint): """Blueprint for creating a Lambda Function.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "AppName": {"type": str, "description": "Name of app."}, "Code": { "type": awslambda.Code, @@ -122,7 +122,7 @@ def runtime(self) -> Ref: @cached_property def lambda_function(self) -> awslambda.Function: """AWS Lambda Function.""" - optional_kwargs: Dict[str, Any] = { + optional_kwargs: dict[str, Any] = { "Environment": ( awslambda.Environment(Variables=self.variables["EnvironmentVariables"]) if self.variables["EnvironmentVariables"] @@ -177,5 +177,5 @@ def create_template(self) -> None: """Create template.""" self.template.set_version("2010-09-09") self.template.set_description("Test Lambda") - self.iam_role - self.lambda_function + self.iam_role # noqa: B018 + self.lambda_function # noqa: B018 diff --git a/tests/functional/cfngin/fixtures/blueprints/_vpc.py b/tests/functional/cfngin/fixtures/blueprints/_vpc.py index 6161745e2..7ad18f69f 100644 --- a/tests/functional/cfngin/fixtures/blueprints/_vpc.py +++ b/tests/functional/cfngin/fixtures/blueprints/_vpc.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar from troposphere.cloudformation import WaitConditionHandle @@ -16,7 +16,7 @@ class FakeVPC(Blueprint): """Fake VPC.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "AZCount": {"type": int, "default": 2}, "PrivateSubnets": { "type": CFNCommaDelimitedList, diff --git a/tests/functional/cfngin/fixtures/hooks/cleanup.py b/tests/functional/cfngin/fixtures/hooks/cleanup.py index 78da5af0d..8da35b0f3 100644 --- a/tests/functional/cfngin/fixtures/hooks/cleanup.py +++ b/tests/functional/cfngin/fixtures/hooks/cleanup.py @@ -18,7 +18,7 @@ def local_delete( - context: CfnginContext, + context: CfnginContext, # noqa: ARG001 *, path: StrPath, **_: Any, diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker/index.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker/index.py index 6833462d8..20ce6cc87 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker/index.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker/index.py @@ -4,7 +4,7 @@ import inspect from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ..type_defs import LambdaResponse @@ -12,7 +12,7 @@ PACKAGE_DIR = Path(__file__).parent -def handler(event: Dict[str, Any], context: object) -> LambdaResponse: +def handler(event: dict[str, Any], context: object) -> LambdaResponse: # noqa: ARG001 """Lambda Function entrypoint.""" try: import requests @@ -29,7 +29,7 @@ def handler(event: Dict[str, Any], context: object) -> LambdaResponse: "message": None, "status": "success", } - except Exception as exc: + except Exception as exc: # noqa: BLE001 return { "code": 500, "data": { diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_mysql/index.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_mysql/index.py index af8218cdb..a3009272d 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_mysql/index.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_mysql/index.py @@ -4,7 +4,7 @@ import inspect from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ..type_defs import LambdaResponse @@ -12,7 +12,7 @@ PACKAGE_DIR = Path(__file__).parent -def handler(event: Dict[str, Any], context: object) -> LambdaResponse: +def handler(event: dict[str, Any], context: object) -> LambdaResponse: # noqa: ARG001 """Lambda Function entrypoint.""" try: import MySQLdb # type: ignore @@ -29,7 +29,7 @@ def handler(event: Dict[str, Any], context: object) -> LambdaResponse: "message": None, "status": "success", } - except Exception as exc: + except Exception as exc: # noqa: BLE001 return { "code": 500, "data": { diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_xmlsec/index.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_xmlsec/index.py index b25fc74d6..1eaa4d45a 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_xmlsec/index.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/docker_xmlsec/index.py @@ -4,7 +4,7 @@ import inspect from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ..type_defs import LambdaResponse @@ -12,7 +12,7 @@ PACKAGE_DIR = Path(__file__).parent -def handler(event: Dict[str, Any], context: object) -> LambdaResponse: +def handler(event: dict[str, Any], context: object) -> LambdaResponse: # noqa: ARG001 """Lambda Function entrypoint.""" try: import lxml # type: ignore @@ -31,7 +31,7 @@ def handler(event: Dict[str, Any], context: object) -> LambdaResponse: "message": None, "status": "success", } - except Exception as exc: + except Exception as exc: # noqa: BLE001 return { "code": 500, "data": { diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local/index.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local/index.py index b4e7e155d..58885c3b1 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local/index.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local/index.py @@ -3,7 +3,7 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ..type_defs import LambdaResponse @@ -11,7 +11,7 @@ PACKAGE_DIR = Path(__file__).parent -def handler(event: Dict[str, Any], context: object) -> LambdaResponse: +def handler(event: dict[str, Any], context: object) -> LambdaResponse: # noqa: ARG001 """Lambda Function entrypoint.""" try: return { @@ -25,7 +25,7 @@ def handler(event: Dict[str, Any], context: object) -> LambdaResponse: "message": None, "status": "success", } - except Exception as exc: + except Exception as exc: # noqa: BLE001 return { "code": 500, "data": {}, diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local_xmlsec_layer/index.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local_xmlsec_layer/index.py index c57592a59..d52093dc7 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local_xmlsec_layer/index.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/local_xmlsec_layer/index.py @@ -4,7 +4,7 @@ import inspect from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ..type_defs import LambdaResponse @@ -13,7 +13,7 @@ OPT_DIR = Path("/opt") -def handler(event: Dict[str, Any], context: object) -> LambdaResponse: +def handler(event: dict[str, Any], context: object) -> LambdaResponse: # noqa: ARG001 """Lambda Function entrypoint.""" try: import lxml # type: ignore @@ -35,7 +35,7 @@ def handler(event: Dict[str, Any], context: object) -> LambdaResponse: "message": None, "status": "success", } - except Exception as exc: + except Exception as exc: # noqa: BLE001 return { "code": 500, "data": { diff --git a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/type_defs.py b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/type_defs.py index 47019fe16..53e20f81e 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/type_defs.py +++ b/tests/functional/cfngin/hooks/test_awslambda/sample_app/src/type_defs.py @@ -2,13 +2,7 @@ from __future__ import annotations -import sys -from typing import Any, Dict, Optional - -if sys.version_info < (3, 8): - from typing_extensions import Literal, TypedDict # type: ignore -else: - from typing import Literal, TypedDict # type: ignore +from typing import Any, Literal, Optional, TypedDict # type: ignore class _LambdaResponseOptional(TypedDict, total=False): @@ -21,7 +15,7 @@ class _LambdaResponseRequired(TypedDict): """Required fields for a Lambda Response.""" code: int - data: Dict[str, Any] + data: dict[str, Any] message: Optional[str] status: Literal["error", "success"] diff --git a/tests/functional/cfngin/hooks/test_awslambda/test_runner.py b/tests/functional/cfngin/hooks/test_awslambda/test_runner.py index 4d42b31dd..70d08a60a 100644 --- a/tests/functional/cfngin/hooks/test_awslambda/test_runner.py +++ b/tests/functional/cfngin/hooks/test_awslambda/test_runner.py @@ -5,9 +5,8 @@ import json import shutil from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Generator, Optional +from typing import TYPE_CHECKING, Any, Optional -import boto3 import pytest from pydantic import root_validator @@ -16,6 +15,9 @@ from runway.utils import BaseModel if TYPE_CHECKING: + from collections.abc import Generator + + import boto3 from click.testing import CliRunner, Result from mypy_boto3_cloudformation.client import CloudFormationClient from mypy_boto3_cloudformation.type_defs import StackTypeDef @@ -67,7 +69,7 @@ class AwslambdaStackOutputs(BaseModel): Runtime: str @root_validator(allow_reuse=True, pre=True) - def _convert_null_to_none(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def _convert_null_to_none(self, values: dict[str, Any]) -> dict[str, Any]: """Convert ``null`` to ``NoneType``.""" def _handle_null(v: Any) -> Any: @@ -248,7 +250,7 @@ def test_xmlsec_layer(deploy_result: Result, namespace: str, runway_context: Run assert response["data"]["dir_contents"] == ["index.py"] -def test_plan(cli_runner: CliRunner, deploy_result: Result) -> None: +def test_plan(cli_runner: CliRunner, deploy_result: Result) -> None: # noqa: ARG001 """Test ``runway plan`` - this was not possible with old hook. deploy_result required so cleanup does not start before this runs. diff --git a/tests/functional/cfngin/test_assume_role/test_runner.py b/tests/functional/cfngin/test_assume_role/test_runner.py index 2936568e8..4199bc403 100644 --- a/tests/functional/cfngin/test_assume_role/test_runner.py +++ b/tests/functional/cfngin/test_assume_role/test_runner.py @@ -4,7 +4,7 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Generator +from typing import TYPE_CHECKING, Any import boto3 import pytest @@ -14,6 +14,8 @@ from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result AWS_REGION = "us-east-1" @@ -26,7 +28,7 @@ def assert_session_belongs_to_account(session: boto3.Session, account_id: str) - @pytest.fixture(scope="module") -def assumed_session(main_session: boto3.Session, variables: Dict[str, Any]) -> boto3.Session: +def assumed_session(main_session: boto3.Session, variables: dict[str, Any]) -> boto3.Session: """boto3 session for assumed account.""" role_arn = variables["runner_role"]["test-alt"] sts_client = main_session.client("sts") @@ -49,15 +51,15 @@ def main_session() -> boto3.Session: @pytest.fixture(scope="module") -def variables() -> Dict[str, Any]: +def variables() -> dict[str, Any]: """Contents of runway.variables.yml.""" return yaml.safe_load((CURRENT_DIR / "runway.variables.yml").read_bytes()) @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy", "--debug"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy", "--debug"], env={"CI": "1"}) @pytest.fixture(scope="module") @@ -74,7 +76,7 @@ def test_deploy_exit_code(deploy_result: Result) -> None: def test_does_not_exist_in_main_account( - main_session: boto3.Session, namespace: str, variables: Dict[str, Any] + main_session: boto3.Session, namespace: str, variables: dict[str, Any] ) -> None: """Test that the deployed stack does not exist in the main test account.""" assert_session_belongs_to_account(main_session, variables["account_id"]["test"]) @@ -86,7 +88,7 @@ def test_does_not_exist_in_main_account( def test_exists_in_assumed_account( - assumed_session: boto3.Session, namespace: str, variables: Dict[str, Any] + assumed_session: boto3.Session, namespace: str, variables: dict[str, Any] ) -> None: """Test that the deployed stack exists in the assumed account.""" assert_session_belongs_to_account(assumed_session, variables["account_id"]["test-alt"]) diff --git a/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/dockerize_src/dockerize.py b/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/dockerize_src/dockerize.py index b8afa21f8..3216d890c 100644 --- a/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/dockerize_src/dockerize.py +++ b/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/dockerize_src/dockerize.py @@ -1,13 +1,17 @@ -"""Test handler.""" +"""Test handler.""" # noqa: INP001 -import lib +from __future__ import annotations +from typing import Any -def handler(event, context): +import lib # type: ignore + + +def handler(event: Any, context: Any) -> dict[str, int | str]: # noqa: ARG001 """Handle lambda.""" try: if lib.RESPONSE_OBJ.shape == (3, 5): - return {"statusCode": 200, "body": str(lib.RESPONSE_OBJ.shape)} + return {"statusCode": 200, "body": str(lib.RESPONSE_OBJ.shape)} # type: ignore raise ValueError - except: + except: # noqa: E722 return {"statusCode": 500, "body": "fail"} diff --git a/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/nondockerize_src/nondockerize.py b/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/nondockerize_src/nondockerize.py index b8afa21f8..3216d890c 100644 --- a/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/nondockerize_src/nondockerize.py +++ b/tests/functional/cfngin/test_aws_lambda_hook/lambda_src/nondockerize_src/nondockerize.py @@ -1,13 +1,17 @@ -"""Test handler.""" +"""Test handler.""" # noqa: INP001 -import lib +from __future__ import annotations +from typing import Any -def handler(event, context): +import lib # type: ignore + + +def handler(event: Any, context: Any) -> dict[str, int | str]: # noqa: ARG001 """Handle lambda.""" try: if lib.RESPONSE_OBJ.shape == (3, 5): - return {"statusCode": 200, "body": str(lib.RESPONSE_OBJ.shape)} + return {"statusCode": 200, "body": str(lib.RESPONSE_OBJ.shape)} # type: ignore raise ValueError - except: + except: # noqa: E722 return {"statusCode": 500, "body": "fail"} diff --git a/tests/functional/cfngin/test_aws_lambda_hook/test_runner.py b/tests/functional/cfngin/test_aws_lambda_hook/test_runner.py index 0e4cb4b63..5547b8a78 100644 --- a/tests/functional/cfngin/test_aws_lambda_hook/test_runner.py +++ b/tests/functional/cfngin/test_aws_lambda_hook/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_destroy_removed/test_runner.py b/tests/functional/cfngin/test_destroy_removed/test_runner.py index 60862aef5..24414f841 100644 --- a/tests/functional/cfngin/test_destroy_removed/test_runner.py +++ b/tests/functional/cfngin/test_destroy_removed/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_duplicate_stack/test_runner.py b/tests/functional/cfngin/test_duplicate_stack/test_runner.py index 76f35c956..ee4b4b173 100644 --- a/tests/functional/cfngin/test_duplicate_stack/test_runner.py +++ b/tests/functional/cfngin/test_duplicate_stack/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_locked_stack/test_runner.py b/tests/functional/cfngin/test_locked_stack/test_runner.py index 449a5e5b6..6e57045de 100644 --- a/tests/functional/cfngin/test_locked_stack/test_runner.py +++ b/tests/functional/cfngin/test_locked_stack/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_parallel/test_runner.py b/tests/functional/cfngin/test_parallel/test_runner.py index 01f0d97b0..6ec32268a 100644 --- a/tests/functional/cfngin/test_parallel/test_runner.py +++ b/tests/functional/cfngin/test_parallel/test_runner.py @@ -5,22 +5,24 @@ import platform import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) @pytest.fixture(scope="module") diff --git a/tests/functional/cfngin/test_raw_cfn/test_runner.py b/tests/functional/cfngin/test_raw_cfn/test_runner.py index 19f9c84c0..51ac53696 100644 --- a/tests/functional/cfngin/test_raw_cfn/test_runner.py +++ b/tests/functional/cfngin/test_raw_cfn/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_recreate_failed/test_runner.py b/tests/functional/cfngin/test_recreate_failed/test_runner.py index 2d501209b..4f0e72753 100644 --- a/tests/functional/cfngin/test_recreate_failed/test_runner.py +++ b/tests/functional/cfngin/test_recreate_failed/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_rollback_dependant/test_runner.py b/tests/functional/cfngin/test_rollback_dependant/test_runner.py index b94ae3896..0a397a5bd 100644 --- a/tests/functional/cfngin/test_rollback_dependant/test_runner.py +++ b/tests/functional/cfngin/test_rollback_dependant/test_runner.py @@ -4,13 +4,15 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent diff --git a/tests/functional/cfngin/test_simple_build/test_runner.py b/tests/functional/cfngin/test_simple_build/test_runner.py index 59e05af62..5aaae4b56 100644 --- a/tests/functional/cfngin/test_simple_build/test_runner.py +++ b/tests/functional/cfngin/test_simple_build/test_runner.py @@ -4,7 +4,7 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest @@ -12,6 +12,8 @@ from runway.config import CfnginConfig if TYPE_CHECKING: + from collections.abc import Generator + from _pytest.fixtures import SubRequest from click.testing import CliRunner, Result @@ -160,9 +162,7 @@ def test_stacks_not_exists(cfngin_context: CfnginContext) -> None: client = cfngin_context.get_session(region="us-east-1").client("cloudformation") assert cfngin_context.stacks, "no stacks found in context/config" for stack in cfngin_context.stacks: - try: + with pytest.raises(client.exceptions.ClientError, match="does not exist"): assert not client.describe_stacks(StackName=stack.fqn)[ "Stacks" ], f"stack exists: {stack.fqn}" - except client.exceptions.ClientError as exc: - assert "does not exist" in str(exc) diff --git a/tests/functional/cfngin/test_simple_diff/blueprints.py b/tests/functional/cfngin/test_simple_diff/blueprints.py index a83b8e7db..a7479a7a0 100644 --- a/tests/functional/cfngin/test_simple_diff/blueprints.py +++ b/tests/functional/cfngin/test_simple_diff/blueprints.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar from troposphere.cloudformation import WaitConditionHandle @@ -16,7 +16,7 @@ class DiffTester(Blueprint): """Diff tester.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "InstanceType": { "type": CFNString, "description": "NAT EC2 instance type.", @@ -24,7 +24,7 @@ class DiffTester(Blueprint): }, "WaitConditionCount": { "type": int, - "description": "Number of WaitConditionHandle resources " "to add to the template", + "description": "Number of WaitConditionHandle resources to add to the template", }, } diff --git a/tests/functional/cfngin/test_simple_diff/test_runner.py b/tests/functional/cfngin/test_simple_diff/test_runner.py index ae7b54757..1aac871bd 100644 --- a/tests/functional/cfngin/test_simple_diff/test_runner.py +++ b/tests/functional/cfngin/test_simple_diff/test_runner.py @@ -2,13 +2,15 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result from runway.config import RunwayConfig @@ -23,7 +25,7 @@ def initial_deploy(cli_runner: CliRunner) -> Generator[None, None, None]: @pytest.fixture(scope="module") -def plan_result(cli_runner: CliRunner, initial_deploy: None) -> Result: +def plan_result(cli_runner: CliRunner, initial_deploy: None) -> Result: # noqa: ARG001 """Execute `runway plan`.""" return cli_runner.invoke(cli, ["plan"], env={"CI": "1", "DEPLOY_ENVIRONMENT": "test2"}) diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py index aca0a495d..5abacb0ea 100644 --- a/tests/functional/conftest.py +++ b/tests/functional/conftest.py @@ -4,11 +4,10 @@ import os from pathlib import Path -from typing import TYPE_CHECKING, Any, Generator +from typing import TYPE_CHECKING, Any +from unittest.mock import patch import pytest -from click.testing import CliRunner -from mock import patch from runway.config import CfnginConfig, RunwayConfig from runway.context import CfnginContext, RunwayContext @@ -18,11 +17,14 @@ from ..factories import cli_runner_factory if TYPE_CHECKING: + from collections.abc import Generator + from _pytest.config import Config from _pytest.fixtures import SubRequest + from click.testing import CliRunner -def pytest_ignore_collect(path: Any, config: Config) -> bool: +def pytest_ignore_collect(path: Any, config: Config) -> bool: # noqa: ARG001 """Determine if this directory should have its tests collected.""" return not config.option.functional @@ -80,7 +82,7 @@ def cfngin_context( @pytest.fixture(scope="module") -def cli_runner(cd_test_dir: Path, request: SubRequest) -> CliRunner: +def cli_runner(cd_test_dir: Path, request: SubRequest) -> CliRunner: # noqa: ARG001 """Initialize instance of `click.testing.CliRunner`.""" return cli_runner_factory(request) diff --git a/tests/functional/serverless/test_promotezip/test_runner.py b/tests/functional/serverless/test_promotezip/test_runner.py index 5dbde6319..2b16ebdc3 100644 --- a/tests/functional/serverless/test_promotezip/test_runner.py +++ b/tests/functional/serverless/test_promotezip/test_runner.py @@ -4,22 +4,24 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent @pytest.fixture(scope="module") -def deploy_promotezip_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_promotezip_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke( + return cli_runner.invoke( cli, ["deploy", "--tag", "sls"], env={"DEPLOY_ENVIRONMENT": "promotezip", "CI": "1"}, @@ -27,15 +29,15 @@ def deploy_promotezip_result(cli_runner: CliRunner) -> Generator[Result, None, N @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) @pytest.fixture(scope="module") -def destroy_promotezip_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def destroy_promotezip_result(cli_runner: CliRunner) -> Result: """Execute `runway destroy`.""" - yield cli_runner.invoke( + return cli_runner.invoke( cli, ["destroy", "--tag", "sls"], env={"DEPLOY_ENVIRONMENT": "promotezip", "CI": "1"}, diff --git a/tests/functional/sources/git/test_runner.py b/tests/functional/sources/git/test_runner.py index 99c2601cf..a68e9d8ab 100644 --- a/tests/functional/sources/git/test_runner.py +++ b/tests/functional/sources/git/test_runner.py @@ -4,22 +4,24 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) @pytest.fixture(scope="module") diff --git a/tests/functional/staticsite/test_simple_build/test_runner.py b/tests/functional/staticsite/test_simple_build/test_runner.py index b931dc304..2a1bd97de 100644 --- a/tests/functional/staticsite/test_simple_build/test_runner.py +++ b/tests/functional/staticsite/test_simple_build/test_runner.py @@ -4,22 +4,24 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest from runway._cli import cli if TYPE_CHECKING: + from collections.abc import Generator + from click.testing import CliRunner, Result CURRENT_DIR = Path(__file__).parent @pytest.fixture(scope="module") -def deploy_result(cli_runner: CliRunner) -> Generator[Result, None, None]: +def deploy_result(cli_runner: CliRunner) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) @pytest.fixture(scope="module") diff --git a/tests/functional/terraform/conftest.py b/tests/functional/terraform/conftest.py index 9d3335b50..32355317a 100644 --- a/tests/functional/terraform/conftest.py +++ b/tests/functional/terraform/conftest.py @@ -4,11 +4,13 @@ import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator +from typing import TYPE_CHECKING import pytest if TYPE_CHECKING: + from collections.abc import Generator + from _pytest.fixtures import SubRequest @@ -18,7 +20,7 @@ def fixture_dir() -> Path: return Path(__file__).parent / "fixtures" -@pytest.fixture(scope="function") +@pytest.fixture() def local_backend(fixture_dir: Path, request: SubRequest) -> Generator[Path, None, None]: """Copy local_backend.tf into the test directory.""" file_name = "local_backend.tf" @@ -29,7 +31,7 @@ def local_backend(fixture_dir: Path, request: SubRequest) -> Generator[Path, Non new_file.unlink() -@pytest.fixture(scope="function") +@pytest.fixture() def no_backend(fixture_dir: Path, request: SubRequest) -> Generator[Path, None, None]: """Copy no_backend.tf into the test directory.""" file_name = "no_backend.tf" @@ -40,7 +42,7 @@ def no_backend(fixture_dir: Path, request: SubRequest) -> Generator[Path, None, new_file.unlink() -@pytest.fixture(scope="function") +@pytest.fixture() def s3_backend(fixture_dir: Path, request: SubRequest) -> Generator[Path, None, None]: """Copy s3_backend.tf into the test directory.""" file_name = "s3_backend.tf" diff --git a/tests/functional/terraform/test_backend_local_2_s3/test_runner.py b/tests/functional/terraform/test_backend_local_2_s3/test_runner.py index 3694d1a37..29084e52f 100644 --- a/tests/functional/terraform/test_backend_local_2_s3/test_runner.py +++ b/tests/functional/terraform/test_backend_local_2_s3/test_runner.py @@ -5,7 +5,7 @@ import locale import shutil from pathlib import Path -from typing import TYPE_CHECKING, Iterator, cast +from typing import TYPE_CHECKING, cast import pytest @@ -13,6 +13,8 @@ from runway.env_mgr.tfenv import TF_VERSION_FILENAME if TYPE_CHECKING: + from collections.abc import Iterator + from _pytest.fixtures import SubRequest from click.testing import CliRunner, Result @@ -44,14 +46,18 @@ def tf_version(request: SubRequest) -> Iterator[str]: file_path.unlink(missing_ok=True) -@pytest.fixture(scope="function") -def deploy_local_backend_result(cli_runner: CliRunner, local_backend: Path) -> Iterator[Result]: +@pytest.fixture() +def deploy_local_backend_result( + cli_runner: CliRunner, local_backend: Path # noqa: ARG001 +) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy", "--tag", "local"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy", "--tag", "local"], env={"CI": "1"}) -@pytest.fixture(scope="function") -def deploy_s3_backend_result(cli_runner: CliRunner, s3_backend: Path) -> Iterator[Result]: +@pytest.fixture() +def deploy_s3_backend_result( + cli_runner: CliRunner, s3_backend: Path # noqa: ARG001 +) -> Iterator[Result]: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" yield cli_runner.invoke(cli, ["deploy", "--tag", "test"], env={"CI": "1"}) # cleanup files diff --git a/tests/functional/terraform/test_backend_no_2_local/test_runner.py b/tests/functional/terraform/test_backend_no_2_local/test_runner.py index d90b0cca7..e2c7f0c0d 100644 --- a/tests/functional/terraform/test_backend_no_2_local/test_runner.py +++ b/tests/functional/terraform/test_backend_no_2_local/test_runner.py @@ -5,7 +5,7 @@ import locale import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator, cast +from typing import TYPE_CHECKING, cast import pytest @@ -13,6 +13,8 @@ from runway.env_mgr.tfenv import TF_VERSION_FILENAME if TYPE_CHECKING: + from collections.abc import Generator + from _pytest.fixtures import SubRequest from click.testing import CliRunner, Result @@ -35,11 +37,11 @@ def tf_version(request: SubRequest) -> Generator[str, None, None]: file_path.unlink(missing_ok=True) -@pytest.fixture(scope="function") +@pytest.fixture() def deploy_local_backend_result( cli_runner: CliRunner, - local_backend: Path, - tf_version: str, + local_backend: Path, # noqa: ARG001 + tf_version: str, # noqa: ARG001 ) -> Generator[Result, None, None]: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" assert (CURRENT_DIR / "terraform.tfstate.d").exists() @@ -51,14 +53,14 @@ def deploy_local_backend_result( (CURRENT_DIR / ".terraform.lock.hcl").unlink(missing_ok=True) -@pytest.fixture(scope="function") +@pytest.fixture() def deploy_no_backend_result( cli_runner: CliRunner, - no_backend: Path, - tf_version: str, -) -> Generator[Result, None, None]: + no_backend: Path, # noqa: ARG001 + tf_version: str, # noqa: ARG001 +) -> Result: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" - yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) + return cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) def test_deploy_no_backend_result(deploy_no_backend_result: Result) -> None: diff --git a/tests/functional/terraform/test_base/test_runner.py b/tests/functional/terraform/test_base/test_runner.py index f4937facf..9e9354970 100644 --- a/tests/functional/terraform/test_base/test_runner.py +++ b/tests/functional/terraform/test_base/test_runner.py @@ -10,7 +10,7 @@ import locale import shutil from pathlib import Path -from typing import TYPE_CHECKING, Generator, cast +from typing import TYPE_CHECKING, cast import pytest @@ -18,6 +18,8 @@ from runway.env_mgr.tfenv import TF_VERSION_FILENAME if TYPE_CHECKING: + from collections.abc import Generator + from _pytest.fixtures import SubRequest from click.testing import CliRunner, Result @@ -40,8 +42,10 @@ def tf_version(request: SubRequest) -> Generator[str, None, None]: file_path.unlink(missing_ok=True) -@pytest.fixture(scope="function") -def deploy_result(cli_runner: CliRunner, no_backend: Path) -> Generator[Result, None, None]: +@pytest.fixture() +def deploy_result( + cli_runner: CliRunner, no_backend: Path # noqa: ARG001 +) -> Generator[Result, None, None]: """Execute `runway deploy` with `runway destroy` as a cleanup step.""" yield cli_runner.invoke(cli, ["deploy"], env={"CI": "1"}) destroy_result = cli_runner.invoke(cli, ["destroy"], env={"CI": "1"}) diff --git a/tests/integration/cli/commands/kbenv/test_install.py b/tests/integration/cli/commands/kbenv/test_install.py index 8a67306c0..5fdd23daf 100644 --- a/tests/integration/cli/commands/kbenv/test_install.py +++ b/tests/integration/cli/commands/kbenv/test_install.py @@ -6,44 +6,37 @@ from pathlib import Path from typing import TYPE_CHECKING -import pytest -from click.testing import CliRunner - from runway._cli import cli -from runway.env_mgr.kbenv import KB_VERSION_FILENAME, KBEnvManager +from runway.env_mgr.kbenv import KB_VERSION_FILENAME if TYPE_CHECKING: - from pytest import LogCaptureFixture - from pytest_mock import MockerFixture - - -@pytest.fixture(autouse=True, scope="function") -def patch_versions_dir(mocker: MockerFixture, tmp_path: Path) -> None: - """Patch TFEnvManager.versions_dir.""" - mocker.patch.object(KBEnvManager, "versions_dir", tmp_path) + import pytest + from click.testing import CliRunner -def test_kbenv_install(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_kbenv_install( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, tmp_path: Path +) -> None: """Test ``runway kbenv install`` reading version from a file. For best results, remove any existing installs. """ - caplog.set_level(logging.DEBUG, logger="runway.cli.commands.kbenv") - (cd_tmp_path / KB_VERSION_FILENAME).write_text("v1.14.1") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "install"]) + caplog.set_level(logging.DEBUG, logger="runway._cli.commands._kbenv") + (tmp_path / KB_VERSION_FILENAME).write_text("v1.14.1") + result = cli_runner.invoke(cli, ["kbenv", "install"]) assert result.exit_code == 0 kb_bin = Path(caplog.messages[-1].replace("kubectl path: ", "")) assert kb_bin.exists() -def test_kbenv_install_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_kbenv_install_no_version_file( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner +) -> None: """Test ``runway kbenv install`` no version file.""" caplog.set_level(logging.WARNING, logger="runway") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "install"]) + result = cli_runner.invoke(cli, ["kbenv", "install"]) assert result.exit_code == 1 assert ( @@ -52,15 +45,14 @@ def test_kbenv_install_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixt ) -def test_kbenv_install_version(caplog: LogCaptureFixture) -> None: +def test_kbenv_install_version(caplog: pytest.LogCaptureFixture, cli_runner: CliRunner) -> None: """Test ``runway kbenv install ``. For best results, remove any existing installs. """ - caplog.set_level(logging.DEBUG, logger="runway.cli.commands.kbenv") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "install", "v1.14.0"]) + caplog.set_level(logging.DEBUG, logger="runway._cli.commands._kbenv") + result = cli_runner.invoke(cli, ["kbenv", "install", "v1.14.0"]) assert result.exit_code == 0 kb_bin = Path(caplog.messages[-1].replace("kubectl path: ", "")) diff --git a/tests/integration/cli/commands/kbenv/test_list.py b/tests/integration/cli/commands/kbenv/test_list.py index ddb4bac0d..96b0df5d2 100644 --- a/tests/integration/cli/commands/kbenv/test_list.py +++ b/tests/integration/cli/commands/kbenv/test_list.py @@ -5,40 +5,40 @@ import logging from typing import TYPE_CHECKING -from click.testing import CliRunner - from runway._cli import cli -from runway.env_mgr.kbenv import KBEnvManager if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture - from pytest_mock import MockerFixture + import pytest + from click.testing import CliRunner -def test_kbenv_list(caplog: LogCaptureFixture, mocker: MockerFixture, tmp_path: Path) -> None: +def test_kbenv_list( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv list``.""" - caplog.set_level(logging.INFO, logger="runway.cli.commands.kbenv") - mocker.patch.object(KBEnvManager, "versions_dir", tmp_path) - version_dirs = [tmp_path / "v1.14.0", tmp_path / "v1.21.0"] + caplog.set_level(logging.INFO, logger="runway._cli.commands._kbenv") + version_dirs = [versions_dir / "v1.14.0", versions_dir / "v1.21.0"] for v_dir in version_dirs: v_dir.mkdir() - (tmp_path / "something.txt").touch() - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "list"]) + (versions_dir / "something.txt").touch() + result = cli_runner.invoke(cli, ["kbenv", "list"]) assert result.exit_code == 0 assert caplog.messages == ["kubectl versions installed:"] - assert result.stdout == "\n".join( - ["[runway] kubectl versions installed:", "v1.14.0", "v1.21.0", ""] - ) + assert {i.strip() for i in result.output.split("\n")} == { + "kubectl versions installed:", + "v1.14.0", + "v1.21.0", + "", + } -def test_kbenv_list_none(caplog: LogCaptureFixture, mocker: MockerFixture, tmp_path: Path) -> None: +def test_kbenv_list_none( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv list`` no versions installed.""" - caplog.set_level(logging.WARNING, logger="runway.cli.commands.kbenv") - mocker.patch.object(KBEnvManager, "versions_dir", tmp_path) - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "list"]) + caplog.set_level(logging.WARNING, logger="runway._cli.commands._kbenv") + result = cli_runner.invoke(cli, ["kbenv", "list"]) assert result.exit_code == 0 - assert caplog.messages == [f"no versions of kubectl installed at path {tmp_path}"] + assert caplog.messages == [f"no versions of kubectl installed at path {versions_dir}"] diff --git a/tests/integration/cli/commands/kbenv/test_run.py b/tests/integration/cli/commands/kbenv/test_run.py index 5b4fd5984..4327dfddd 100644 --- a/tests/integration/cli/commands/kbenv/test_run.py +++ b/tests/integration/cli/commands/kbenv/test_run.py @@ -5,22 +5,23 @@ import logging from typing import TYPE_CHECKING -from click.testing import CliRunner - from runway._cli import cli from runway.env_mgr.kbenv import KB_VERSION_FILENAME if TYPE_CHECKING: from pathlib import Path - from pytest import CaptureFixture, LogCaptureFixture + import pytest + from click.testing import CliRunner -def test_kbenv_run_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_kbenv_run_no_version_file( + caplog: pytest.LogCaptureFixture, + cli_runner: CliRunner, +) -> None: """Test ``runway kbenv run -- --help`` no version file.""" caplog.set_level(logging.WARNING, logger="runway") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "run", "--", "--help"]) + result = cli_runner.invoke(cli, ["kbenv", "run", "--", "--help"]) assert result.exit_code == 1 assert ( @@ -29,33 +30,35 @@ def test_kbenv_run_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixture) ) -def test_kbenv_run_separator(cd_tmp_path: Path, capfd: CaptureFixture[str]) -> None: +def test_kbenv_run_separator( + capfd: pytest.CaptureFixture[str], cli_runner: CliRunner, tmp_path: Path +) -> None: """Test ``runway kbenv run -- --help``. - Parsing of command using ``--`` as a separator between options and args. - Everything that comes after the separator should be forwarded on as an arg + Parsing of command using ``--`` as a seperator between options and args. + Everything that comes after the seperator should be forwarded on as an arg and not parsed as an option by click. This is only required when trying to pass options shared with Runway such as ``--help``. """ - (cd_tmp_path / KB_VERSION_FILENAME).write_text("v1.14.0") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "run", "--", "--help"]) + (tmp_path / KB_VERSION_FILENAME).write_text("v1.14.0") + result = cli_runner.invoke(cli, ["kbenv", "run", "--", "--help"]) captured = capfd.readouterr() # capfd required for subprocess assert result.exit_code == 0 assert "runway" not in captured.out assert "kubectl --help" in captured.out -def test_kbenv_run_version(cd_tmp_path: Path, capfd: CaptureFixture[str]) -> None: +def test_kbenv_run_version( + capfd: pytest.CaptureFixture[str], cli_runner: CliRunner, tmp_path: Path +) -> None: """Test ``runway kbenv run version``. Parsing of bare command. """ - (cd_tmp_path / KB_VERSION_FILENAME).write_text("v1.14.0") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "run", "version", "--client"]) + (tmp_path / KB_VERSION_FILENAME).write_text("v1.14.0") + result = cli_runner.invoke(cli, ["kbenv", "run", "version", "--client"]) captured = capfd.readouterr() # capfd required for subprocess assert result.exit_code == 0 assert "v1.14.0" in captured.out diff --git a/tests/integration/cli/commands/kbenv/test_uninstall.py b/tests/integration/cli/commands/kbenv/test_uninstall.py index e3bf4746d..089e652ed 100644 --- a/tests/integration/cli/commands/kbenv/test_uninstall.py +++ b/tests/integration/cli/commands/kbenv/test_uninstall.py @@ -3,112 +3,107 @@ from __future__ import annotations import logging -from pathlib import Path from typing import TYPE_CHECKING -import pytest -from click.testing import CliRunner - from runway._cli import cli -from runway.env_mgr.kbenv import KB_VERSION_FILENAME, KBEnvManager +from runway.env_mgr.kbenv import KB_VERSION_FILENAME if TYPE_CHECKING: - from pytest import LogCaptureFixture - from pytest_mock import MockerFixture - -LOGGER = "runway.cli.commands.kbenv" + from pathlib import Path + import pytest + from click.testing import CliRunner -@pytest.fixture(autouse=True, scope="function") -def patch_versions_dir(mocker: MockerFixture, tmp_path: Path) -> None: - """Patch KBEnvManager.versions_dir.""" - mocker.patch.object(KBEnvManager, "versions_dir", tmp_path) +LOGGER = "runway.cli.commands.kbenv" -def test_kbenv_uninstall(cd_tmp_path: Path) -> None: +def test_kbenv_uninstall(cli_runner: CliRunner, versions_dir: Path) -> None: """Test ``runway kbenv uninstall``.""" version = "v1.21.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", version]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall", version]) assert result.exit_code == 0 assert not version_dir.exists() -def test_kbenv_uninstall_all(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_all( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv uninstall --all``.""" caplog.set_level(logging.INFO, logger=LOGGER) - version_dirs = [cd_tmp_path / "v1.14.0", cd_tmp_path / "v1.21.0"] + version_dirs = [versions_dir / "v1.14.0", versions_dir / "v1.21.0"] for v in version_dirs: v.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", "--all"]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of kubectl..." in caplog.messages assert "all versions of kubectl have been uninstalled" in caplog.messages assert all(not v.exists() for v in version_dirs) -def test_kbenv_uninstall_all_takes_precedence(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_all_takes_precedence( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv uninstall --all`` takes precedence over arg.""" caplog.set_level(logging.INFO, logger=LOGGER) - version_dirs = [cd_tmp_path / "v1.14.0", cd_tmp_path / "v1.21.0"] + version_dirs = [versions_dir / "v1.14.0", versions_dir / "v1.21.0"] for v in version_dirs: v.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", "0.13.0", "--all"]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall", "0.13.0", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of kubectl..." in caplog.messages assert "all versions of kubectl have been uninstalled" in caplog.messages assert all(not v.exists() for v in version_dirs) -def test_kbenv_uninstall_all_none_installed(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_all_none_installed( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner +) -> None: """Test ``runway kbenv uninstall --all`` none installed.""" caplog.set_level(logging.INFO, logger=LOGGER) - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", "--all"]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of kubectl..." in caplog.messages assert "all versions of kubectl have been uninstalled" in caplog.messages -def test_kbenv_uninstall_arg_takes_precedence(cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_arg_takes_precedence( + cd_tmp_path: Path, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv uninstall`` arg takes precedence over file.""" version = "v1.21.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() (cd_tmp_path / KB_VERSION_FILENAME).write_text("v1.14.0") - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", version]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall", version]) assert result.exit_code == 0 assert not version_dir.exists() -def test_kbenv_uninstall_no_version(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_no_version( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner +) -> None: """Test ``runway kbenv uninstall`` no version.""" caplog.set_level(logging.ERROR, logger=LOGGER) - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall"]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall"]) assert result.exit_code != 0 assert "version not specified" in caplog.messages -def test_kbenv_uninstall_not_installed(cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_not_installed(cli_runner: CliRunner) -> None: """Test ``runway kbenv uninstall`` not installed.""" - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall", "1.21.0"]) - assert result.exit_code != 0 + assert cli_runner.invoke(cli, ["kbenv", "uninstall", "1.21.0"]).exit_code != 0 -def test_kbenv_uninstall_version_file(cd_tmp_path: Path) -> None: +def test_kbenv_uninstall_version_file( + cd_tmp_path: Path, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway kbenv uninstall`` version file.""" version = "v1.21.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() (cd_tmp_path / KB_VERSION_FILENAME).write_text(version) - runner = CliRunner() - result = runner.invoke(cli, ["kbenv", "uninstall"]) + result = cli_runner.invoke(cli, ["kbenv", "uninstall"]) assert result.exit_code == 0 assert not version_dir.exists() diff --git a/tests/integration/cli/commands/test_deploy.py b/tests/integration/cli/commands/test_deploy.py index f6222f3d7..54757a367 100644 --- a/tests/integration/cli/commands/test_deploy.py +++ b/tests/integration/cli/commands/test_deploy.py @@ -9,9 +9,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway.config import RunwayConfig @@ -21,7 +21,7 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest from pytest_mock import MockerFixture from ...conftest import CpConfigTypeDef @@ -32,7 +32,7 @@ def test_deploy( cd_tmp_path: Path, cp_config: CpConfigTypeDef, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, ) -> None: """Test deploy.""" @@ -86,7 +86,7 @@ def test_deploy_options_deploy_environment( def test_deploy_options_tag( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, mocker: MockerFixture, diff --git a/tests/integration/cli/commands/test_destroy.py b/tests/integration/cli/commands/test_destroy.py index 22333edc6..ff60a199f 100644 --- a/tests/integration/cli/commands/test_destroy.py +++ b/tests/integration/cli/commands/test_destroy.py @@ -9,9 +9,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway.config import RunwayConfig @@ -21,7 +21,7 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from pytest_mock import MockerFixture from ...conftest import CpConfigTypeDef @@ -96,10 +96,10 @@ def test_destroy_options_deploy_environment( def test_destroy_options_tag( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test destroy option --tag.""" caplog.set_level(logging.ERROR, logger="runway.cli.commands.destroy") @@ -131,7 +131,7 @@ def test_destroy_options_tag( def test_destroy_select_deployment( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select from two deployments.""" cp_config("min_required_multi", cd_tmp_path) @@ -149,7 +149,7 @@ def test_destroy_select_deployment( def test_destroy_select_deployment_all( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select all deployments.""" cp_config("min_required_multi", cd_tmp_path) @@ -167,7 +167,7 @@ def test_destroy_select_deployment_all( def test_destroy_select_module( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select from two modules.""" cp_config("min_required_multi", cd_tmp_path) @@ -183,7 +183,7 @@ def test_destroy_select_module( def test_destroy_select_module_all( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select all modules.""" cp_config("min_required_multi", cd_tmp_path) @@ -200,7 +200,7 @@ def test_destroy_select_module_all( def test_destroy_select_module_child_modules( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select child module.""" cp_config("simple_child_modules.1", cd_tmp_path) @@ -216,7 +216,7 @@ def test_destroy_select_module_child_modules( def test_destroy_select_module_child_modules_all( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy select all child module.""" cp_config("simple_child_modules.1", cd_tmp_path) diff --git a/tests/integration/cli/commands/test_dismantle.py b/tests/integration/cli/commands/test_dismantle.py index 43572b54e..3ce8eab54 100644 --- a/tests/integration/cli/commands/test_dismantle.py +++ b/tests/integration/cli/commands/test_dismantle.py @@ -4,9 +4,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway._cli.commands import destroy @@ -14,16 +14,16 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from ...conftest import CpConfigTypeDef def test_dismantle( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test dismantle.""" cp_config("min_required", cd_tmp_path) diff --git a/tests/integration/cli/commands/test_docs.py b/tests/integration/cli/commands/test_docs.py index 677680bed..9e5f6bc6c 100644 --- a/tests/integration/cli/commands/test_docs.py +++ b/tests/integration/cli/commands/test_docs.py @@ -3,14 +3,14 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import patch from click.testing import CliRunner -from mock import patch from runway._cli import cli if TYPE_CHECKING: - from mock import MagicMock + from unittest.mock import MagicMock DOCS_URL = "https://docs.onica.com/projects/runway/" diff --git a/tests/integration/cli/commands/test_envvars.py b/tests/integration/cli/commands/test_envvars.py index 78f6504e6..a64c69bdf 100644 --- a/tests/integration/cli/commands/test_envvars.py +++ b/tests/integration/cli/commands/test_envvars.py @@ -4,16 +4,16 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from ...conftest import CpConfigTypeDef @@ -29,7 +29,9 @@ ) -def test_envvars(cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch) -> None: +def test_envvars( + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch +) -> None: """Test envvars.""" monkeypatch.setattr("platform.system", Mock(return_value="Darwin")) cp_config("simple_env_vars", cd_tmp_path) @@ -40,7 +42,7 @@ def test_envvars(cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: Mon def test_envvar_windows( - cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch + cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: pytest.MonkeyPatch ) -> None: """Test envvars for Windows.""" monkeypatch.setattr("platform.system", Mock(return_value="Windows")) @@ -56,7 +58,7 @@ def test_envvar_windows( assert result1.output == POSIX_OUTPUT -def test_envvars_no_config(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_envvars_no_config(caplog: pytest.LogCaptureFixture, cd_tmp_path: Path) -> None: """Test envvars with no config in the directory or parent.""" caplog.set_level(logging.ERROR, logger="runway") runner = CliRunner() @@ -70,7 +72,7 @@ def test_envvars_no_config(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None def test_envvars_no_env_vars( - caplog: LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef ) -> None: """Test envvars with no env_vars in the config.""" caplog.set_level(logging.ERROR, logger="runway") diff --git a/tests/integration/cli/commands/test_gen_sample.py b/tests/integration/cli/commands/test_gen_sample.py index 734f89b70..8596dda4d 100644 --- a/tests/integration/cli/commands/test_gen_sample.py +++ b/tests/integration/cli/commands/test_gen_sample.py @@ -13,10 +13,8 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture - -def test_cdk_csharp(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_cdk_csharp(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample cdk-csharp`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -49,7 +47,7 @@ def test_cdk_csharp(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_cdk_py(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_cdk_py(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample cdk-py`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -81,7 +79,7 @@ def test_cdk_py(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_cdk_tsc(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_cdk_tsc(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample cdk-tsc`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -113,7 +111,7 @@ def test_cdk_tsc(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_cfn(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_cfn(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample cfn`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -131,7 +129,7 @@ def test_cfn(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"Sample CloudFormation module created at {module}"] -def test_cfngin(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_cfngin(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample cfngin`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -154,7 +152,7 @@ def test_cfngin(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"Sample CFNgin module created at {module}"] -def test_k8s_cfn_repo(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_k8s_cfn_repo(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample k8s-cfn-repo`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -205,7 +203,7 @@ def test_k8s_cfn_repo(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_k8s_tf_repo(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_k8s_tf_repo(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample k8s-tf-repo`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -251,7 +249,7 @@ def test_k8s_tf_repo(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_sls_py(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_sls_py(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample sls-py`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -282,7 +280,7 @@ def test_sls_py(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_sls_tsc(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_sls_tsc(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample sls-tsc`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -315,7 +313,7 @@ def test_sls_tsc(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_static_angular(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_static_angular(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample static-angular`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -369,7 +367,7 @@ def test_static_angular(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_static_react(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_static_react(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample static-react`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -409,7 +407,7 @@ def test_static_react(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_tf(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_tf(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway gen-sample tf`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -450,7 +448,7 @@ def test_tf(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ], ) def test_dir_exists( - command: str, dir_name: str, caplog: LogCaptureFixture, cd_tmp_path: Path + command: str, dir_name: str, caplog: pytest.LogCaptureFixture, cd_tmp_path: Path ) -> None: """Test ``runway gen-sample`` commands when directory exists.""" caplog.set_level(logging.ERROR, logger="runway.cli.gen_sample") diff --git a/tests/integration/cli/commands/test_init.py b/tests/integration/cli/commands/test_init.py index 0d46a22bd..b188d06b4 100644 --- a/tests/integration/cli/commands/test_init.py +++ b/tests/integration/cli/commands/test_init.py @@ -9,9 +9,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from pydantic import ValidationError from runway._cli import cli @@ -23,7 +23,7 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest from pytest_mock import MockerFixture from ...conftest import CpConfigTypeDef @@ -34,7 +34,7 @@ def test_init( cd_tmp_path: Path, cp_config: CpConfigTypeDef, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, ) -> None: """Test init.""" @@ -124,7 +124,7 @@ def test_init_options_deploy_environment( def test_init_options_tag( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, mocker: MockerFixture, diff --git a/tests/integration/cli/commands/test_new.py b/tests/integration/cli/commands/test_new.py index 447644fd7..da77ab412 100644 --- a/tests/integration/cli/commands/test_new.py +++ b/tests/integration/cli/commands/test_new.py @@ -13,10 +13,10 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest -def test_new(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_new(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway new`` command.""" caplog.set_level(logging.INFO, logger="runway.cli") runner = CliRunner() @@ -36,7 +36,7 @@ def test_new(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: ] -def test_new_file_exists(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_new_file_exists(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway new`` command with existing file.""" caplog.set_level(logging.ERROR, logger="runway.cli") (cd_tmp_path / "runway.yml").touch() diff --git a/tests/integration/cli/commands/test_plan.py b/tests/integration/cli/commands/test_plan.py index 87afc3489..3dca1d8da 100644 --- a/tests/integration/cli/commands/test_plan.py +++ b/tests/integration/cli/commands/test_plan.py @@ -9,9 +9,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway.config import RunwayConfig @@ -21,7 +21,7 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest from pytest_mock import MockerFixture from ...conftest import CpConfigTypeDef @@ -78,7 +78,7 @@ def test_plan_options_deploy_environment( def test_plan_options_tag( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, mocker: MockerFixture, diff --git a/tests/integration/cli/commands/test_preflight.py b/tests/integration/cli/commands/test_preflight.py index bb307bbd3..ebe95952f 100644 --- a/tests/integration/cli/commands/test_preflight.py +++ b/tests/integration/cli/commands/test_preflight.py @@ -4,9 +4,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway._cli.commands import test @@ -14,16 +14,16 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from ...conftest import CpConfigTypeDef def test_preflight( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test ``runway preflight``.""" cp_config("min_required", cd_tmp_path) diff --git a/tests/integration/cli/commands/test_takeoff.py b/tests/integration/cli/commands/test_takeoff.py index f78ee1b89..a53761a54 100644 --- a/tests/integration/cli/commands/test_takeoff.py +++ b/tests/integration/cli/commands/test_takeoff.py @@ -4,9 +4,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway._cli.commands import deploy @@ -14,16 +14,16 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from ...conftest import CpConfigTypeDef def test_takeoff( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test takeoff.""" cp_config("min_required", cd_tmp_path) diff --git a/tests/integration/cli/commands/test_taxi.py b/tests/integration/cli/commands/test_taxi.py index 782596f93..035d7f9e3 100644 --- a/tests/integration/cli/commands/test_taxi.py +++ b/tests/integration/cli/commands/test_taxi.py @@ -4,9 +4,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import Mock from click.testing import CliRunner -from mock import Mock from runway._cli import cli from runway._cli.commands import plan @@ -14,16 +14,16 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch + import pytest from ...conftest import CpConfigTypeDef def test_taxi( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cd_tmp_path: Path, cp_config: CpConfigTypeDef, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test taxi.""" cp_config("min_required", cd_tmp_path) diff --git a/tests/integration/cli/commands/test_test.py b/tests/integration/cli/commands/test_test.py index da75fa1c7..ae2156b25 100644 --- a/tests/integration/cli/commands/test_test.py +++ b/tests/integration/cli/commands/test_test.py @@ -37,13 +37,13 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import CaptureFixture, LogCaptureFixture + import pytest # def test_test_invalid_type(cd_tmp_path, capfd, caplog): def test_test_invalid_type( cd_tmp_path: Path, -) -> None: # TODO update after catching error +) -> None: # TODO (kyle): update after catching error """Test ``runway test`` with two tests; one invalid.""" # caplog.set_level(logging.INFO, logger="runway.core") runway_yml = cd_tmp_path / "runway.yml" @@ -55,20 +55,10 @@ def test_test_invalid_type( result = runner.invoke(cli, ["test"]) assert result.exit_code == 1 - assert result.exception.errors()[0]["loc"] == ("tests", 0, "type") + assert result.exception.errors()[0]["loc"] == ("tests", 0, "type") # type: ignore - # captured = capfd.readouterr() - # logs = "\n".join(caplog.messages) - # print(captured) - # assert "found 2 test(s)" in logs - # assert "invalid-type:running test (in progress)" in logs - # assert 'invalid-type:unable to find handler of type "invalid"' in logs - # assert "success:running test (in progress)" in logs - # assert "Hello world" in captured.out - # assert "success:running test (pass)" in logs - -def test_test_not_defined(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_test_not_defined(cd_tmp_path: Path, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway test`` with no tests defined.""" caplog.set_level(logging.ERROR) runway_yml = cd_tmp_path / "runway.yml" @@ -81,7 +71,7 @@ def test_test_not_defined(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: def test_test_single_successful( - cd_tmp_path: Path, capfd: CaptureFixture[str], caplog: LogCaptureFixture + cd_tmp_path: Path, capfd: pytest.CaptureFixture[str], caplog: pytest.LogCaptureFixture ) -> None: """Test ``runway test`` with a single, successful test.""" caplog.set_level(logging.INFO, logger="runway.core") @@ -101,7 +91,7 @@ def test_test_single_successful( def test_test_two_test( - cd_tmp_path: Path, capfd: CaptureFixture[str], caplog: LogCaptureFixture + cd_tmp_path: Path, capfd: pytest.CaptureFixture[str], caplog: pytest.LogCaptureFixture ) -> None: """Test ``runway test`` with two tests; one failing.""" caplog.set_level(logging.INFO, logger="runway.core") @@ -126,7 +116,7 @@ def test_test_two_test( def test_test_two_test_required( - cd_tmp_path: Path, capfd: CaptureFixture[str], caplog: LogCaptureFixture + cd_tmp_path: Path, capfd: pytest.CaptureFixture[str], caplog: pytest.LogCaptureFixture ) -> None: """Test ``runway test`` with two tests; one failing required.""" caplog.set_level(logging.INFO) diff --git a/tests/integration/cli/commands/test_whichenv.py b/tests/integration/cli/commands/test_whichenv.py index e66858755..71a29a83f 100644 --- a/tests/integration/cli/commands/test_whichenv.py +++ b/tests/integration/cli/commands/test_whichenv.py @@ -13,10 +13,10 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest -def test_whichenv(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_whichenv(caplog: pytest.LogCaptureFixture, cd_tmp_path: Path) -> None: """Test ``runway whichenv``.""" caplog.set_level(logging.DEBUG, logger="runway") runway_yml = cd_tmp_path / "runway.yml" @@ -27,7 +27,7 @@ def test_whichenv(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: assert result.output == cd_tmp_path.name + "\n" -def test_whichenv_debug(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_whichenv_debug(caplog: pytest.LogCaptureFixture, cd_tmp_path: Path) -> None: """Test ``runway whichenv`` debug.""" caplog.set_level(logging.DEBUG, logger="runway") runway_yml = cd_tmp_path / "runway.yml" @@ -39,7 +39,7 @@ def test_whichenv_debug(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: assert "set dependency log level to debug" not in caplog.messages -def test_whichenv_debug_debug(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_whichenv_debug_debug(caplog: pytest.LogCaptureFixture, cd_tmp_path: Path) -> None: """Test ``runway whichenv`` debug.""" caplog.set_level(logging.DEBUG, logger="runway") runway_yml = cd_tmp_path / "runway.yml" diff --git a/tests/integration/cli/commands/tfenv/test_install.py b/tests/integration/cli/commands/tfenv/test_install.py index b06820300..a88114cd7 100644 --- a/tests/integration/cli/commands/tfenv/test_install.py +++ b/tests/integration/cli/commands/tfenv/test_install.py @@ -6,59 +6,48 @@ from pathlib import Path from typing import TYPE_CHECKING -import pytest -from click.testing import CliRunner - from runway._cli import cli -from runway.env_mgr.tfenv import TFEnvManager if TYPE_CHECKING: - from pytest import LogCaptureFixture - from pytest_mock import MockerFixture - + import pytest + from click.testing import CliRunner -@pytest.fixture(autouse=True, scope="function") -def patch_versions_dir(mocker: MockerFixture, tmp_path: Path) -> None: - """Patch TFEnvManager.versions_dir.""" - mocker.patch.object(TFEnvManager, "versions_dir", tmp_path) - -def test_tfenv_install(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_tfenv_install( + cd_tmp_path: Path, cli_runner: CliRunner, caplog: pytest.LogCaptureFixture +) -> None: """Test ``runway tfenv install`` reading version from a file. For best results, remove any existing installs. """ - caplog.set_level(logging.DEBUG, logger="runway.cli.commands.tfenv") + caplog.set_level(logging.DEBUG, logger="runway._cli.commands._tfenv") (cd_tmp_path / ".terraform-version").write_text("0.12.0") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "install"]) + result = cli_runner.invoke(cli, ["tfenv", "install"]) assert result.exit_code == 0 tf_bin = Path(caplog.messages[-1].replace("terraform path: ", "")) assert tf_bin.exists() -def test_tfenv_install_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_tfenv_install_no_version_file( + cli_runner: CliRunner, caplog: pytest.LogCaptureFixture +) -> None: """Test ``runway tfenv install`` no version file.""" caplog.set_level(logging.ERROR, logger="runway") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "install"]) - assert result.exit_code == 1 + assert cli_runner.invoke(cli, ["tfenv", "install"]).exit_code == 1 assert "unable to find a .terraform-version file" in "\n".join(caplog.messages) -def test_tfenv_install_version(caplog: LogCaptureFixture) -> None: +def test_tfenv_install_version(caplog: pytest.LogCaptureFixture, cli_runner: CliRunner) -> None: """Test ``runway tfenv install ``. For best results, remove any existing installs. """ - caplog.set_level(logging.DEBUG, logger="runway.cli.commands.tfenv") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "install", "0.12.1"]) - assert result.exit_code == 0 + caplog.set_level(logging.DEBUG, logger="runway._cli.commands._tfenv") + assert cli_runner.invoke(cli, ["tfenv", "install", "0.12.1"]).exit_code == 0 kb_bin = Path(caplog.messages[-1].replace("terraform path: ", "")) assert kb_bin.exists() diff --git a/tests/integration/cli/commands/tfenv/test_list.py b/tests/integration/cli/commands/tfenv/test_list.py index f0a35df89..e6aaf6a06 100644 --- a/tests/integration/cli/commands/tfenv/test_list.py +++ b/tests/integration/cli/commands/tfenv/test_list.py @@ -13,11 +13,13 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture + import pytest from pytest_mock import MockerFixture -def test_tfenv_list(caplog: LogCaptureFixture, mocker: MockerFixture, tmp_path: Path) -> None: +def test_tfenv_list( + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, tmp_path: Path +) -> None: """Test ``runway tfenv list``.""" caplog.set_level(logging.INFO, logger="runway.cli.commands.tfenv") mocker.patch.object(TFEnvManager, "versions_dir", tmp_path) @@ -29,12 +31,12 @@ def test_tfenv_list(caplog: LogCaptureFixture, mocker: MockerFixture, tmp_path: result = runner.invoke(cli, ["tfenv", "list"]) assert result.exit_code == 0 assert caplog.messages == ["Terraform versions installed:"] - assert result.stdout == "\n".join( - ["[runway] Terraform versions installed:", "0.13.0", "1.0.0", ""] - ) + assert result.stdout == "[runway] Terraform versions installed:\n0.13.0\n1.0.0\n" -def test_tfenv_list_none(caplog: LogCaptureFixture, mocker: MockerFixture, tmp_path: Path) -> None: +def test_tfenv_list_none( + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, tmp_path: Path +) -> None: """Test ``runway tfenv list`` no versions installed.""" caplog.set_level(logging.WARNING, logger="runway.cli.commands.tfenv") mocker.patch.object(TFEnvManager, "versions_dir", tmp_path) diff --git a/tests/integration/cli/commands/tfenv/test_run.py b/tests/integration/cli/commands/tfenv/test_run.py index a65ec0fe7..e8ab9b1b4 100644 --- a/tests/integration/cli/commands/tfenv/test_run.py +++ b/tests/integration/cli/commands/tfenv/test_run.py @@ -5,26 +5,25 @@ import logging from typing import TYPE_CHECKING -from click.testing import CliRunner - from runway._cli import cli if TYPE_CHECKING: from pathlib import Path - from pytest import CaptureFixture, LogCaptureFixture + import pytest + from click.testing import CliRunner -def test_tfenv_run_no_version_file(cd_tmp_path: Path, caplog: LogCaptureFixture) -> None: +def test_tfenv_run_no_version_file(cli_runner: CliRunner, caplog: pytest.LogCaptureFixture) -> None: """Test ``runway tfenv run -- --help`` no version file.""" caplog.set_level(logging.ERROR, logger="runway") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "run", "--", "--help"]) - assert result.exit_code == 1 + assert cli_runner.invoke(cli, ["tfenv", "run", "--", "--help"]).exit_code == 1 assert "unable to find a .terraform-version file" in "\n".join(caplog.messages) -def test_tfenv_run_separator(cd_tmp_path: Path, capfd: CaptureFixture[str]) -> None: +def test_tfenv_run_separator( + cli_runner: CliRunner, capfd: pytest.CaptureFixture[str], tmp_path: Path +) -> None: """Test ``runway tfenv run -- --help``. Parsing of command using ``--`` as a separator between options and args. @@ -33,25 +32,25 @@ def test_tfenv_run_separator(cd_tmp_path: Path, capfd: CaptureFixture[str]) -> N pass options shared with Runway such as ``--help``. """ - (cd_tmp_path / ".terraform-version").write_text("0.12.0") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "run", "--", "--help"]) + (tmp_path / ".terraform-version").write_text("0.12.0") + result = cli_runner.invoke(cli, ["tfenv", "run", "--", "--help"]) captured = capfd.readouterr() # capfd required for subprocess assert result.exit_code == 0 assert "runway" not in captured.out assert "terraform [-version] [-help] [args]" in captured.out -def test_tfenv_run_version(cd_tmp_path: Path, capfd: CaptureFixture[str]) -> None: +def test_tfenv_run_version( + cli_runner: CliRunner, capfd: pytest.CaptureFixture[str], tmp_path: Path +) -> None: """Test ``runway tfenv run --version``. Parsing of bare command. """ version = "0.12.0" - (cd_tmp_path / ".terraform-version").write_text(version) - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "run", "--version"]) + (tmp_path / ".terraform-version").write_text(version) + result = cli_runner.invoke(cli, ["tfenv", "run", "--version"]) captured = capfd.readouterr() # capfd required for subprocess assert result.exit_code == 0 assert f"Terraform v{version}" in captured.out diff --git a/tests/integration/cli/commands/tfenv/test_uninstall.py b/tests/integration/cli/commands/tfenv/test_uninstall.py index b7edcd2f1..f658a0af9 100644 --- a/tests/integration/cli/commands/tfenv/test_uninstall.py +++ b/tests/integration/cli/commands/tfenv/test_uninstall.py @@ -3,112 +3,107 @@ from __future__ import annotations import logging -from pathlib import Path from typing import TYPE_CHECKING -import pytest -from click.testing import CliRunner - from runway._cli import cli -from runway.env_mgr.tfenv import TF_VERSION_FILENAME, TFEnvManager +from runway.env_mgr.tfenv import TF_VERSION_FILENAME if TYPE_CHECKING: - from pytest import LogCaptureFixture - from pytest_mock import MockerFixture - -LOGGER = "runway.cli.commands.tfenv" + from pathlib import Path + import pytest + from click.testing import CliRunner -@pytest.fixture(autouse=True, scope="function") -def patch_versions_dir(mocker: MockerFixture, tmp_path: Path) -> None: - """Patch TFEnvManager.versions_dir.""" - mocker.patch.object(TFEnvManager, "versions_dir", tmp_path) +LOGGER = "runway.cli.commands.tfenv" -def test_tfenv_uninstall(cd_tmp_path: Path) -> None: +def test_tfenv_uninstall(cli_runner: CliRunner, versions_dir: Path) -> None: """Test ``runway tfenv uninstall``.""" version = "1.0.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]) assert result.exit_code == 0 assert not version_dir.exists() -def test_tfenv_uninstall_all(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_all( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway tfenv uninstall --all``.""" caplog.set_level(logging.INFO, logger=LOGGER) - version_dirs = [cd_tmp_path / "0.12.0", cd_tmp_path / "1.0.0"] + version_dirs = [versions_dir / "0.12.0", versions_dir / "1.0.0"] for v in version_dirs: v.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "--all"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of Terraform..." in caplog.messages assert "all versions of Terraform have been uninstalled" in caplog.messages assert all(not v.exists() for v in version_dirs) -def test_tfenv_uninstall_all_takes_precedence(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_all_takes_precedence( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway tfenv uninstall --all`` takes precedence over arg.""" caplog.set_level(logging.INFO, logger=LOGGER) - version_dirs = [cd_tmp_path / "0.12.0", cd_tmp_path / "1.0.0"] + version_dirs = [versions_dir / "0.12.0", versions_dir / "1.0.0"] for v in version_dirs: v.mkdir() - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "0.13.0", "--all"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall", "0.13.0", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of Terraform..." in caplog.messages assert "all versions of Terraform have been uninstalled" in caplog.messages assert all(not v.exists() for v in version_dirs) -def test_tfenv_uninstall_all_none_installed(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_all_none_installed( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner +) -> None: """Test ``runway tfenv uninstall --all`` none installed.""" caplog.set_level(logging.INFO, logger=LOGGER) - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "--all"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall", "--all"]) assert result.exit_code == 0 assert "uninstalling all versions of Terraform..." in caplog.messages assert "all versions of Terraform have been uninstalled" in caplog.messages -def test_tfenv_uninstall_arg_takes_precedence(cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_arg_takes_precedence( + cd_tmp_path: Path, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway tfenv uninstall`` arg takes precedence over file.""" version = "1.0.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() (cd_tmp_path / TF_VERSION_FILENAME).write_text("0.12.0") - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]) assert result.exit_code == 0 assert not version_dir.exists() -def test_tfenv_uninstall_no_version(caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_no_version( + caplog: pytest.LogCaptureFixture, cli_runner: CliRunner +) -> None: """Test ``runway tfenv uninstall`` no version.""" caplog.set_level(logging.ERROR, logger=LOGGER) - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall"]) assert result.exit_code != 0 assert "version not specified" in caplog.messages -def test_tfenv_uninstall_not_installed(cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_not_installed(cli_runner: CliRunner) -> None: """Test ``runway tfenv uninstall`` not installed.""" - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]) - assert result.exit_code != 0 + assert cli_runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"]).exit_code != 0 -def test_tfenv_uninstall_version_file(cd_tmp_path: Path) -> None: +def test_tfenv_uninstall_version_file( + cd_tmp_path: Path, cli_runner: CliRunner, versions_dir: Path +) -> None: """Test ``runway tfenv uninstall`` version file.""" version = "1.0.0" - version_dir = cd_tmp_path / version + version_dir = versions_dir / version version_dir.mkdir() (cd_tmp_path / TF_VERSION_FILENAME).write_text(version) - runner = CliRunner() - result = runner.invoke(cli, ["tfenv", "uninstall"]) + result = cli_runner.invoke(cli, ["tfenv", "uninstall"]) assert result.exit_code == 0 assert not version_dir.exists() diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index b45ce2263..7d642d531 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -16,7 +16,7 @@ CpConfigTypeDef = Callable[[str, Path], Path] -def pytest_ignore_collect(path: Any, config: Config) -> bool: +def pytest_ignore_collect(path: Any, config: Config) -> bool: # noqa: ARG001 """Determine if this directory should have its tests collected.""" if config.option.functional: return True @@ -25,13 +25,13 @@ def pytest_ignore_collect(path: Any, config: Config) -> bool: return not (config.option.integration or config.option.integration_only) -@pytest.fixture +@pytest.fixture() def configs() -> Path: """Path to Runway config fixtures.""" return TEST_ROOT.parent / "fixtures" / "configs" -@pytest.fixture +@pytest.fixture() def cp_config(configs: Path) -> Callable[[str, Path], Path]: """Copy a config file.""" diff --git a/tests/unit/cfngin/actions/conftest.py b/tests/unit/cfngin/actions/conftest.py index ac890d883..829d9d9d6 100644 --- a/tests/unit/cfngin/actions/conftest.py +++ b/tests/unit/cfngin/actions/conftest.py @@ -7,16 +7,17 @@ from typing import TYPE_CHECKING import pytest -from mock import MagicMock from runway.cfngin.providers.aws.default import Provider if TYPE_CHECKING: + from unittest.mock import MagicMock + from mypy_boto3_cloudformation.type_defs import StackTypeDef from pytest_mock import MockerFixture -@pytest.fixture(scope="function") +@pytest.fixture() def provider_get_stack(mocker: MockerFixture) -> MagicMock: """Patches ``runway.cfngin.providers.aws.default.Provider.get_stack``.""" return_value: StackTypeDef = { diff --git a/tests/unit/cfngin/actions/test_base.py b/tests/unit/cfngin/actions/test_base.py index 6bd2190a8..e11b7d39d 100644 --- a/tests/unit/cfngin/actions/test_base.py +++ b/tests/unit/cfngin/actions/test_base.py @@ -2,9 +2,10 @@ # pyright: basic import unittest +from unittest.mock import MagicMock, PropertyMock, patch import botocore.exceptions -from mock import MagicMock, PropertyMock, patch +import pytest from runway.cfngin.actions.base import BaseAction from runway.cfngin.blueprints.base import Blueprint @@ -82,7 +83,7 @@ def test_ensure_cfn_bucket_exists_raise_cfngin_bucket_not_found( context=mock_context("mynamespace"), provider_builder=MockProviderBuilder(provider=Provider(get_session("us-east-1"))), ) - with self.assertRaises(CfnginBucketNotFound): + with pytest.raises(CfnginBucketNotFound): assert action.ensure_cfn_bucket() mock_ensure_s3_bucket.assert_called_once_with( action.s3_conn, action.bucket_name, None, create=False @@ -109,14 +110,14 @@ def test_generate_plan_no_persist_exclude( plan = action._generate_plan(include_persistent_graph=False) mock_tags.assert_not_called() - self.assertIsInstance(plan, Plan) + assert isinstance(plan, Plan) # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(2, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["stack1"]) - self.assertEqual({"stack1"}, result_graph_dict["stack2"]) - self.assertEqual(BaseAction.DESCRIPTION, plan.description) - self.assertTrue(plan.require_unlocked) + assert len(result_graph_dict) == 2 + assert set() == result_graph_dict["stack1"] + assert {"stack1"} == result_graph_dict["stack2"] + assert plan.description == BaseAction.DESCRIPTION + assert plan.require_unlocked @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock) @@ -139,14 +140,14 @@ def test_generate_plan_no_persist_include( plan = action._generate_plan(include_persistent_graph=True) mock_tags.assert_not_called() - self.assertIsInstance(plan, Plan) + assert isinstance(plan, Plan) # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(2, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["stack1"]) - self.assertEqual({"stack1"}, result_graph_dict["stack2"]) - self.assertEqual(BaseAction.DESCRIPTION, plan.description) - self.assertTrue(plan.require_unlocked) + assert len(result_graph_dict) == 2 + assert set() == result_graph_dict["stack1"] + assert {"stack1"} == result_graph_dict["stack2"] + assert plan.description == BaseAction.DESCRIPTION + assert plan.require_unlocked @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock) @@ -168,14 +169,14 @@ def test_generate_plan_with_persist_exclude( plan = action._generate_plan(include_persistent_graph=False) - self.assertIsInstance(plan, Plan) + assert isinstance(plan, Plan) # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(2, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["stack1"]) - self.assertEqual({"stack1"}, result_graph_dict["stack2"]) - self.assertEqual(BaseAction.DESCRIPTION, plan.description) - self.assertTrue(plan.require_unlocked) + assert len(result_graph_dict) == 2 + assert set() == result_graph_dict["stack1"] + assert {"stack1"} == result_graph_dict["stack2"] + assert plan.description == BaseAction.DESCRIPTION + assert plan.require_unlocked @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock) @@ -197,16 +198,16 @@ def test_generate_plan_with_persist_include( plan = action._generate_plan(include_persistent_graph=True) - self.assertIsInstance(plan, Plan) + assert isinstance(plan, Plan) mock_tags.assert_called_once() # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(3, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["stack1"]) - self.assertEqual({"stack1"}, result_graph_dict["stack2"]) - self.assertEqual(set(), result_graph_dict["removed"]) - self.assertEqual(BaseAction.DESCRIPTION, plan.description) - self.assertTrue(plan.require_unlocked) + assert len(result_graph_dict) == 3 + assert set() == result_graph_dict["stack1"] + assert {"stack1"} == result_graph_dict["stack2"] + assert set() == result_graph_dict["removed"] + assert plan.description == BaseAction.DESCRIPTION + assert plan.require_unlocked @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock) @@ -228,21 +229,21 @@ def test_generate_plan_with_persist_no_lock_req( plan = action._generate_plan(include_persistent_graph=True, require_unlocked=False) - self.assertIsInstance(plan, Plan) + assert isinstance(plan, Plan) mock_tags.assert_called_once() # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(3, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["stack1"]) - self.assertEqual({"stack1"}, result_graph_dict["stack2"]) - self.assertEqual(set(), result_graph_dict["removed"]) - self.assertEqual(BaseAction.DESCRIPTION, plan.description) - self.assertFalse(plan.require_unlocked) + assert len(result_graph_dict) == 3 + assert set() == result_graph_dict["stack1"] + assert {"stack1"} == result_graph_dict["stack2"] + assert set() == result_graph_dict["removed"] + assert plan.description == BaseAction.DESCRIPTION + assert not plan.require_unlocked def test_stack_template_url(self) -> None: """Test stack template url.""" context = mock_context("mynamespace") - blueprint = MockBlueprint(name="myblueprint", context=context) + blueprint = MockBlueprint(name="test-blueprint", context=context) region = "us-east-1" endpoint = "https://example.com" @@ -258,8 +259,8 @@ def test_stack_template_url(self) -> None: autospec=True, return_value=endpoint, ): - self.assertEqual( - action.stack_template_url(blueprint), - f"{endpoint}/cfngin-{context.namespace}-{region}/stack_templates/" - f"{context.namespace}-{blueprint.name}/{blueprint.name}-{MOCK_VERSION}.json", + assert ( + action.stack_template_url(blueprint) + == f"{endpoint}/cfngin-{context.namespace}-{region}/stack_templates/" + f"{context.namespace}-{blueprint.name}/{blueprint.name}-{MOCK_VERSION}.json" ) diff --git a/tests/unit/cfngin/actions/test_deploy.py b/tests/unit/cfngin/actions/test_deploy.py index d1b70b1f5..ed187c41d 100644 --- a/tests/unit/cfngin/actions/test_deploy.py +++ b/tests/unit/cfngin/actions/test_deploy.py @@ -1,14 +1,14 @@ """Tests for runway.cfngin.actions.deploy.""" -# pyright: basic from __future__ import annotations import unittest from collections import namedtuple -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from datetime import datetime +from typing import TYPE_CHECKING, Any, Optional, cast +from unittest.mock import MagicMock, PropertyMock, patch import pytest -from mock import MagicMock, PropertyMock, patch from runway.cfngin import exceptions from runway.cfngin.actions import deploy @@ -49,7 +49,7 @@ from runway.cfngin.status import Status -def mock_stack_parameters(parameters: Dict[str, Any]) -> StackTypeDef: +def mock_stack_parameters(parameters: dict[str, Any]) -> StackTypeDef: """Mock stack parameters.""" return { # type: ignore "Parameters": [{"ParameterKey": k, "ParameterValue": v} for k, v in parameters.items()] @@ -59,25 +59,25 @@ def mock_stack_parameters(parameters: Dict[str, Any]) -> StackTypeDef: class MockProvider(BaseProvider): """Mock provider.""" - _outputs: Dict[str, Dict[str, str]] + _outputs: dict[str, dict[str, str]] - def __init__(self, *, outputs: Optional[Dict[str, Dict[str, str]]] = None, **_: Any) -> None: + def __init__(self, *, outputs: dict[str, dict[str, str]] | None = None, **_: Any) -> None: """Instantiate class.""" self._outputs = outputs or {} - def set_outputs(self, outputs: Dict[str, Dict[str, str]]) -> None: + def set_outputs(self, outputs: dict[str, dict[str, str]]) -> None: """Set outputs.""" self._outputs = outputs def get_stack( self, stack_name: str, *_args: Any, **_kwargs: Any - ) -> Dict[str, Union[Dict[str, str], str]]: + ) -> dict[str, dict[str, str] | str]: """Get stack.""" if stack_name not in self._outputs: raise exceptions.StackDoesNotExist(stack_name) return {"name": stack_name, "outputs": self._outputs[stack_name]} - def get_outputs(self, stack_name: str, *args: Any, **kwargs: Any) -> Dict[str, Any]: + def get_outputs(self, stack_name: str, *_args: Any, **_kwargs: Any) -> dict[str, Any]: """Get outputs.""" stack = self.get_stack(stack_name) return stack["outputs"] # type: ignore @@ -89,9 +89,9 @@ class MockStack: def __init__( self, name: str, - in_progress_behavior: Optional[str] = None, - tags: Any = None, - **_: Any, + in_progress_behavior: str | None = None, + *_args: Any, + **_kwargs: Any, ) -> None: """Instantiate class.""" self.name = name @@ -151,7 +151,7 @@ def test_upload_disabled_setter_raise_cfngin_bucket_required( Action(cfngin_context).upload_disabled = False -class TestBuildAction(unittest.TestCase): # TODO: refactor tests into the TestAction class +class TestBuildAction(unittest.TestCase): # TODO (kyle): refactor tests into the TestAction class """Tests for runway.cfngin.actions.deploy.BuildAction.""" def setUp(self) -> None: @@ -164,10 +164,10 @@ def setUp(self) -> None: ) def _get_context( - self, extra_config_args: Optional[Dict[str, Any]] = None, **kwargs: Any + self, extra_config_args: Optional[dict[str, Any]] = None, **kwargs: Any ) -> CfnginContext: """Get context.""" - config: Dict[str, Any] = { + config: dict[str, Any] = { "namespace": "namespace", "stacks": [ {"name": "vpc", "template_path": "."}, @@ -226,22 +226,22 @@ def test_generate_plan_persist_destroy(self, mock_graph_tags: PropertyMock) -> N deploy_action = deploy.Action(context=context) plan = cast(Plan, deploy_action._Action__generate_plan()) # type: ignore - self.assertIsInstance(plan, Plan) - self.assertEqual(deploy.Action.DESCRIPTION, plan.description) + assert isinstance(plan, Plan) + assert plan.description == deploy.Action.DESCRIPTION mock_graph_tags.assert_called_once() # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(5, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict["other"]) - self.assertEqual(set(), result_graph_dict["removed"]) - self.assertEqual(set(), result_graph_dict["vpc"]) - self.assertEqual({"vpc"}, result_graph_dict["bastion"]) - self.assertEqual({"bastion", "vpc"}, result_graph_dict["db"]) - self.assertEqual(deploy_action._destroy_stack, plan.graph.steps["removed"].fn) - self.assertEqual(deploy_action._launch_stack, plan.graph.steps["vpc"].fn) - self.assertEqual(deploy_action._launch_stack, plan.graph.steps["bastion"].fn) - self.assertEqual(deploy_action._launch_stack, plan.graph.steps["db"].fn) - self.assertEqual(deploy_action._launch_stack, plan.graph.steps["other"].fn) + assert len(result_graph_dict) == 5 + assert set() == result_graph_dict["other"] + assert set() == result_graph_dict["removed"] + assert set() == result_graph_dict["vpc"] + assert {"vpc"} == result_graph_dict["bastion"] + assert {"bastion", "vpc"} == result_graph_dict["db"] + assert deploy_action._destroy_stack == plan.graph.steps["removed"].fn + assert deploy_action._launch_stack == plan.graph.steps["vpc"].fn + assert deploy_action._launch_stack == plan.graph.steps["bastion"].fn + assert deploy_action._launch_stack == plan.graph.steps["db"].fn + assert deploy_action._launch_stack == plan.graph.steps["other"].fn def test_handle_missing_params(self) -> None: """Test handle missing params.""" @@ -257,17 +257,17 @@ def test_handle_missing_params(self) -> None: result = _handle_missing_parameters( parameter_values, all_params, required, existing_stack_params ) - self.assertEqual(sorted(result), sorted(expected_params.items())) + assert sorted(result) == sorted(expected_params.items()) def test_missing_params_no_existing_stack(self) -> None: """Test missing params no existing stack.""" all_params = ["Address", "StackName"] required = ["Address"] - parameter_values: Dict[str, Any] = {} - with self.assertRaises(exceptions.MissingParameterException) as result: + parameter_values: dict[str, Any] = {} + with pytest.raises(exceptions.MissingParameterException) as result: _handle_missing_parameters(parameter_values, all_params, required) - self.assertEqual(result.exception.parameters, required) + assert result.exception.parameters == required def test_existing_stack_params_does_not_override_given_params(self) -> None: """Test existing stack params does not override given params.""" @@ -279,22 +279,19 @@ def test_existing_stack_params_does_not_override_given_params(self) -> None: result = _handle_missing_parameters( parameter_values, all_params, required, existing_stack_params ) - self.assertEqual(sorted(result), sorted(parameter_values.items())) + assert sorted(result) == sorted(parameter_values.items()) def test_generate_plan(self) -> None: """Test generate plan.""" context = self._get_context() deploy_action = deploy.Action(context, cancel=MockThreadingEvent()) # type: ignore plan = cast(Plan, deploy_action._Action__generate_plan()) # type: ignore - self.assertEqual( - { - "db": {"bastion", "vpc"}, - "bastion": {"vpc"}, - "other": set(), - "vpc": set(), - }, - plan.graph.to_dict(), - ) + assert plan.graph.to_dict() == { + "db": {"bastion", "vpc"}, + "bastion": {"vpc"}, + "other": set(), + "vpc": set(), + } def test_does_not_execute_plan_when_outline_specified(self) -> None: """Test does not execute plan when outline specified.""" @@ -302,7 +299,7 @@ def test_does_not_execute_plan_when_outline_specified(self) -> None: deploy_action = deploy.Action(context, cancel=MockThreadingEvent()) # type: ignore with patch.object(deploy_action, "_generate_plan") as mock_generate_plan: deploy_action.run(outline=True) - self.assertEqual(mock_generate_plan().execute.call_count, 0) + assert mock_generate_plan().execute.call_count == 0 def test_execute_plan_when_outline_not_specified(self) -> None: """Test execute plan when outline not specified.""" @@ -310,7 +307,7 @@ def test_execute_plan_when_outline_not_specified(self) -> None: deploy_action = deploy.Action(context, cancel=MockThreadingEvent()) # type: ignore with patch.object(deploy_action, "_generate_plan") as mock_generate_plan: deploy_action.run(outline=False) - self.assertEqual(mock_generate_plan().execute.call_count, 1) + assert mock_generate_plan().execute.call_count == 1 @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.context.CfnginContext.lock_persistent_graph", new_callable=MagicMock) @@ -349,7 +346,7 @@ def test_should_update(self) -> None: for test in test_scenarios: mock_stack.locked = test.locked mock_stack.force = test.force - self.assertEqual(deploy.should_update(mock_stack), test.result) # type: ignore + assert deploy.should_update(mock_stack) == test.result # type: ignore def test_should_ensure_cfn_bucket(self) -> None: """Test should ensure cfn bucket.""" @@ -366,9 +363,7 @@ def test_should_ensure_cfn_bucket(self) -> None: dump = scenario["dump"] result = scenario["result"] try: - self.assertEqual( - deploy.should_ensure_cfn_bucket(outline, dump), result # type: ignore - ) + assert deploy.should_ensure_cfn_bucket(outline, dump) == result # type: ignore except AssertionError as err: err.args += ("scenario", str(scenario)) raise @@ -385,10 +380,10 @@ def test_should_submit(self) -> None: mock_stack.name = "test-stack" for test in test_scenarios: mock_stack.enabled = test.enabled - self.assertEqual(deploy.should_submit(mock_stack), test.result) # type: ignore + assert deploy.should_submit(mock_stack) == test.result # type: ignore -class TestLaunchStack(TestBuildAction): # TODO: refactor tests to be pytest tests +class TestLaunchStack(TestBuildAction): # TODO (kyle): refactor tests to be pytest tests """Tests for runway.cfngin.actions.deploy.BuildAction launch stack.""" def setUp(self) -> None: @@ -420,7 +415,7 @@ def patch_object(*args: Any, **kwargs: Any) -> None: self.addCleanup(mock_object.stop) mock_object.start() - def get_stack(name: str, *_args: Any, **_kwargs: Any) -> Dict[str, Any]: + def get_stack(name: str, *_args: Any, **_kwargs: Any) -> dict[str, Any]: if name != self.stack.name or not self.stack_status: raise StackDoesNotExist(name) @@ -431,11 +426,12 @@ def get_stack(name: str, *_args: Any, **_kwargs: Any) -> Dict[str, Any]: "Tags": [], } - def get_events(name: str, *_args: Any, **_kwargs: Any) -> List[Dict[str, str]]: + def get_events(*_args: Any, **_kwargs: Any) -> list[dict[str, Any]]: return [ { "ResourceStatus": "ROLLBACK_IN_PROGRESS", "ResourceStatusReason": "CFN fail", + "Timestamp": datetime(2015, 1, 1), } ] @@ -456,12 +452,12 @@ def _advance( """Advance.""" self.stack_status = new_provider_status status = self.step._run_once() - self.assertEqual(status, expected_status) - self.assertEqual(status.reason, expected_reason) + assert status == expected_status + assert status.reason == expected_reason def test_launch_stack_disabled(self) -> None: """Test launch stack disabled.""" - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING self.stack.enabled = False self._advance(None, NotSubmittedStatus(), "disabled") @@ -469,7 +465,7 @@ def test_launch_stack_disabled(self) -> None: def test_launch_stack_create(self) -> None: """Test launch stack create.""" # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # initial run should return SUBMITTED since we've passed off to CF self._advance(None, SUBMITTED, "creating new stack") @@ -483,7 +479,7 @@ def test_launch_stack_create(self) -> None: def test_launch_stack_create_rollback(self) -> None: """Test launch stack create rollback.""" # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # initial run should return SUBMITTED since we've passed off to CF self._advance(None, SUBMITTED, "creating new stack") @@ -505,7 +501,7 @@ def test_launch_stack_recreate(self) -> None: self.provider.recreate_failed = True # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # first action with an existing failed stack should be deleting it self._advance("ROLLBACK_COMPLETE", SUBMITTED, "destroying stack for re-creation") @@ -525,7 +521,7 @@ def test_launch_stack_recreate(self) -> None: def test_launch_stack_update_skipped(self) -> None: """Test launch stack update skipped.""" # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # start the upgrade, that will be skipped self.provider.update_stack.side_effect = StackDidNotChange # type: ignore @@ -534,7 +530,7 @@ def test_launch_stack_update_skipped(self) -> None: def test_launch_stack_update_rollback(self) -> None: """Test launch stack update rollback.""" # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # initial run should return SUBMITTED since we've passed off to CF self._advance("CREATE_COMPLETE", SUBMITTED, "updating existing stack") @@ -551,7 +547,7 @@ def test_launch_stack_update_rollback(self) -> None: def test_launch_stack_update_success(self) -> None: """Test launch stack update success.""" # initial status should be PENDING - self.assertEqual(self.step.status, PENDING) + assert self.step.status == PENDING # initial run should return SUBMITTED since we've passed off to CF self._advance("CREATE_COMPLETE", SUBMITTED, "updating existing stack") @@ -563,7 +559,7 @@ def test_launch_stack_update_success(self) -> None: self._advance("UPDATE_COMPLETE", COMPLETE, "updating existing stack") -class TestFunctions(unittest.TestCase): # TODO: refactor tests to be pytest tests +class TestFunctions(unittest.TestCase): # TODO (kyle): refactor tests to be pytest tests """Tests for runway.cfngin.actions.deploy module level functions.""" def setUp(self) -> None: @@ -580,8 +576,8 @@ def test_resolve_parameters_unused_parameter(self) -> None: } params = {"a": "Apple", "c": "Carrot"} resolved_params = _resolve_parameters(params, self.blueprint) - self.assertNotIn("c", resolved_params) - self.assertIn("a", resolved_params) + assert "c" not in resolved_params + assert "a" in resolved_params def test_resolve_parameters_none_conversion(self) -> None: """Test resolve parameters none conversion.""" @@ -591,7 +587,7 @@ def test_resolve_parameters_none_conversion(self) -> None: } params = {"a": None, "c": "Carrot"} resolved_params = _resolve_parameters(params, self.blueprint) - self.assertNotIn("a", resolved_params) + assert "a" not in resolved_params def test_resolve_parameters_booleans(self) -> None: """Test resolve parameters booleans.""" @@ -601,5 +597,5 @@ def test_resolve_parameters_booleans(self) -> None: } params = {"a": True, "b": False} resolved_params = _resolve_parameters(params, self.blueprint) - self.assertEqual("true", resolved_params["a"]) - self.assertEqual("false", resolved_params["b"]) + assert resolved_params["a"] == "true" + assert resolved_params["b"] == "false" diff --git a/tests/unit/cfngin/actions/test_destroy.py b/tests/unit/cfngin/actions/test_destroy.py index e74ade880..c118b6ccd 100644 --- a/tests/unit/cfngin/actions/test_destroy.py +++ b/tests/unit/cfngin/actions/test_destroy.py @@ -4,9 +4,8 @@ from __future__ import annotations import unittest -from typing import Any, Dict, Optional - -from mock import MagicMock, PropertyMock, patch +from typing import Any, Optional +from unittest.mock import MagicMock, PropertyMock, patch from runway.cfngin.actions import destroy from runway.cfngin.exceptions import StackDoesNotExist @@ -21,7 +20,7 @@ class MockStack: """Mock our local CFNgin stack and an AWS provider stack.""" - def __init__(self, name: str, tags: Any = None, **_: Any) -> None: + def __init__(self, name: str, *_args: Any, **_kwargs: Any) -> None: """Instantiate class.""" self.name = name self.fqn = name @@ -33,13 +32,13 @@ def __init__(self, name: str, tags: Any = None, **_: Any) -> None: class TestDestroyAction(unittest.TestCase): """Tests for runway.cfngin.actions.destroy.DestroyAction.""" - def setUp(self): + def setUp(self) -> None: """Run before tests.""" self.context = self._get_context() self.action = destroy.Action(self.context, cancel=MockThreadingEvent()) # type: ignore def _get_context( - self, extra_config_args: Optional[Dict[str, Any]] = None, **kwargs: Any + self, extra_config_args: Optional[dict[str, Any]] = None, **kwargs: Any ) -> CfnginContext: """Get context.""" config = { @@ -67,28 +66,25 @@ def _get_context( def test_generate_plan(self) -> None: """Test generate plan.""" plan = self.action._generate_plan(reverse=True) - self.assertEqual( - { - "vpc": {"db", "instance", "bastion"}, - "other": set(), - "bastion": {"instance", "db"}, - "instance": {"db"}, - "db": {"other"}, - }, - plan.graph.to_dict(), - ) + assert plan.graph.to_dict() == { + "vpc": {"db", "instance", "bastion"}, + "other": set(), + "bastion": {"instance", "db"}, + "instance": {"db"}, + "db": {"other"}, + } def test_only_execute_plan_when_forced(self) -> None: """Test only execute plan when forced.""" with patch.object(self.action, "_generate_plan") as mock_generate_plan: self.action.run(force=False) - self.assertEqual(mock_generate_plan().execute.call_count, 0) + assert mock_generate_plan().execute.call_count == 0 def test_execute_plan_when_forced(self) -> None: """Test execute plan when forced.""" with patch.object(self.action, "_generate_plan") as mock_generate_plan: self.action.run(force=True) - self.assertEqual(mock_generate_plan().execute.call_count, 1) + assert mock_generate_plan().execute.call_count == 1 def test_destroy_stack_complete_if_state_submitted(self) -> None: """Test destroy stack complete if state submitted.""" @@ -100,11 +96,11 @@ def test_destroy_stack_complete_if_state_submitted(self) -> None: status = self.action._destroy_stack(MockStack("vpc"), status=PENDING) # type: ignore # if we haven't processed the step (ie. has never been SUBMITTED, # should be skipped) - self.assertEqual(status, SKIPPED) + assert status == SKIPPED status = self.action._destroy_stack(MockStack("vpc"), status=SUBMITTED) # type: ignore # if we have processed the step and then can't find the stack, it means # we successfully deleted it - self.assertEqual(status, COMPLETE) + assert status == COMPLETE def test_destroy_stack_delete_failed(self) -> None: """Test _destroy_stack DELETE_FAILED.""" @@ -147,7 +143,7 @@ def get_stack(stack_name: Any) -> Any: mock_provider.get_stack.side_effect = StackDoesNotExist("mock") step.run() - self.assertEqual(step.status, SKIPPED) + assert step.status == SKIPPED # simulate stack getting successfully deleted mock_provider.get_stack.side_effect = get_stack @@ -155,17 +151,17 @@ def get_stack(stack_name: Any) -> Any: mock_provider.is_stack_in_progress.return_value = False step._run_once() - self.assertEqual(step.status, SUBMITTED) + assert step.status == SUBMITTED mock_provider.is_stack_destroyed.return_value = False mock_provider.is_stack_in_progress.return_value = True step._run_once() - self.assertEqual(step.status, SUBMITTED) + assert step.status == SUBMITTED mock_provider.is_stack_destroyed.return_value = True mock_provider.is_stack_in_progress.return_value = False step._run_once() - self.assertEqual(step.status, COMPLETE) + assert step.status == COMPLETE @patch("runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock) @patch("runway.context.CfnginContext.lock_persistent_graph", new_callable=MagicMock) diff --git a/tests/unit/cfngin/actions/test_diff.py b/tests/unit/cfngin/actions/test_diff.py index 5522ab96f..af271bc3d 100644 --- a/tests/unit/cfngin/actions/test_diff.py +++ b/tests/unit/cfngin/actions/test_diff.py @@ -7,10 +7,10 @@ import unittest from operator import attrgetter from typing import TYPE_CHECKING, Optional +from unittest.mock import MagicMock, Mock, patch import pytest from botocore.exceptions import ClientError -from mock import MagicMock, Mock, patch from runway.cfngin.actions.diff import ( Action, @@ -25,10 +25,9 @@ from ..factories import MockProviderBuilder, MockThreadingEvent if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext MODULE = "runway.cfngin.actions.diff" @@ -49,11 +48,11 @@ class TestAction: def test_pre_run( self, mock_bucket_init: MagicMock, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, bucket_name: Optional[str], forbidden: bool, not_found: bool, - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, ) -> None: """Test pre_run.""" caplog.set_level(logging.DEBUG, logger=MODULE) @@ -85,8 +84,8 @@ def test_pre_run( @pytest.mark.parametrize("stack_not_exist", [False, True]) def test__diff_stack_validationerror_template_too_large( self, - caplog: LogCaptureFixture, - cfngin_context: MockCFNginContext, + caplog: pytest.LogCaptureFixture, + cfngin_context: MockCfnginContext, mocker: MockerFixture, provider_get_stack: MagicMock, stack_not_exist: bool, @@ -142,31 +141,28 @@ class TestDictValueFormat(unittest.TestCase): def test_status(self) -> None: """Test status.""" added = DictValue("k0", None, "value_0") - self.assertEqual(added.status(), DictValue.ADDED) + assert added.status() == DictValue.ADDED removed = DictValue("k1", "value_1", None) - self.assertEqual(removed.status(), DictValue.REMOVED) + assert removed.status() == DictValue.REMOVED modified = DictValue("k2", "value_1", "value_2") - self.assertEqual(modified.status(), DictValue.MODIFIED) + assert modified.status() == DictValue.MODIFIED unmodified = DictValue("k3", "value_1", "value_1") - self.assertEqual(unmodified.status(), DictValue.UNMODIFIED) + assert unmodified.status() == DictValue.UNMODIFIED def test_format(self) -> None: """Test format.""" added = DictValue("k0", None, "value_0") - self.assertEqual(added.changes(), [f"+{added.key} = {added.new_value}"]) + assert added.changes() == [f"+{added.key} = {added.new_value}"] removed = DictValue("k1", "value_1", None) - self.assertEqual(removed.changes(), [f"-{removed.key} = {removed.old_value}"]) + assert removed.changes() == [f"-{removed.key} = {removed.old_value}"] modified = DictValue("k2", "value_1", "value_2") - self.assertEqual( - modified.changes(), - [ - f"-{modified.key} = {modified.old_value}", - f"+{modified.key} = {modified.new_value}", - ], - ) + assert modified.changes() == [ + f"-{modified.key} = {modified.old_value}", + f"+{modified.key} = {modified.new_value}", + ] unmodified = DictValue("k3", "value_1", "value_1") - self.assertEqual(unmodified.changes(), [f" {unmodified.key} = {unmodified.old_value}"]) - self.assertEqual(unmodified.changes(), [f" {unmodified.key} = {unmodified.new_value}"]) + assert unmodified.changes() == [f" {unmodified.key} = {unmodified.old_value}"] + assert unmodified.changes() == [f" {unmodified.key} = {unmodified.new_value}"] class TestDiffDictionary(unittest.TestCase): @@ -186,7 +182,7 @@ def test_diff_dictionaries(self) -> None: } count, changes = diff_dictionaries(old_dict, new_dict) - self.assertEqual(count, 3) + assert count == 3 expected_output = [ DictValue("a", "Apple", "Apple"), DictValue("b", "Banana", "Bob"), @@ -198,10 +194,10 @@ def test_diff_dictionaries(self) -> None: # compare all the outputs to the expected change for expected_change in expected_output: change = changes.pop(0) - self.assertEqual(change, expected_change) + assert change == expected_change # No extra output - self.assertEqual(len(changes), 0) + assert len(changes) == 0 class TestDiffParameters(unittest.TestCase): @@ -213,4 +209,4 @@ def test_diff_parameters_no_changes(self) -> None: new_params = {"a": "Apple"} param_diffs = diff_parameters(old_params, new_params) - self.assertEqual(param_diffs, []) + assert param_diffs == [] diff --git a/tests/unit/cfngin/actions/test_init.py b/tests/unit/cfngin/actions/test_init.py index 1e0c8bb95..117663ddc 100644 --- a/tests/unit/cfngin/actions/test_init.py +++ b/tests/unit/cfngin/actions/test_init.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from runway._logging import LogLevels from runway.cfngin.actions.init import Action @@ -14,7 +14,6 @@ from runway.core.providers.aws.s3 import Bucket if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from runway.context import CfnginContext @@ -75,7 +74,7 @@ def test_default_cfngin_bucket_stack( def test_run( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ) -> None: @@ -104,7 +103,7 @@ def test_run( def test_run_cfngin_bucket_region( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ) -> None: @@ -136,7 +135,7 @@ def test_run_cfngin_bucket_region( def test_run_exists( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ) -> None: @@ -164,7 +163,7 @@ def test_run_forbidden(self, cfngin_context: CfnginContext, mocker: MockerFixtur def test_run_get_stack( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ) -> None: @@ -193,7 +192,7 @@ def test_run_get_stack( def test_run_no_cfngin_bucket( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ) -> None: diff --git a/tests/unit/cfngin/blueprints/test_base.py b/tests/unit/cfngin/blueprints/test_base.py index 7d92cf7ef..a6ba79324 100644 --- a/tests/unit/cfngin/blueprints/test_base.py +++ b/tests/unit/cfngin/blueprints/test_base.py @@ -3,10 +3,10 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Union +from typing import TYPE_CHECKING, Any, ClassVar, Union +from unittest.mock import Mock import pytest -from mock import Mock from troposphere import Parameter, Ref, s3, sns from runway.cfngin.blueprints.base import ( @@ -46,14 +46,14 @@ class SampleBlueprint(Blueprint): """Sample Blueprint to use for testing.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Var0": {"type": CFNString, "default": "test"}, "Var1": {"type": str, "default": ""}, } def create_template(self) -> None: """Create template.""" - return None + return def resolve_troposphere_var(tpe: Any, value: Any, **kwargs: Any) -> Any: @@ -171,11 +171,11 @@ def test_init_raise_attribute_error(self, cfngin_context: CfnginContext) -> None """Test __init__.""" class _Blueprint(Blueprint): - PARAMETERS: ClassVar[Dict[str, BlueprintVariableTypeDef]] = {} + PARAMETERS: ClassVar[dict[str, BlueprintVariableTypeDef]] = {} def create_template(self) -> None: """Create template.""" - return None + return with pytest.raises(AttributeError): _Blueprint("test", cfngin_context) @@ -226,7 +226,7 @@ def test_required_parameter_definitions(self, cfngin_context: CfnginContext) -> """Test required_parameter_definitions.""" class _Blueprint(SampleBlueprint): - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Var0": {"type": CFNString}, "Var1": {"type": str, "default": ""}, } @@ -269,7 +269,7 @@ def test_to_json(self, cfngin_context: CfnginContext) -> None: """Test to_json.""" class _Blueprint(Blueprint): - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Param1": {"default": "default", "type": CFNString}, "Param2": {"type": CFNNumber}, "Param3": {"type": CFNCommaDelimitedList}, @@ -307,7 +307,7 @@ def test_variables(self, cfngin_context: CfnginContext) -> None: """Test variables.""" class _Blueprint(Blueprint): - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = {"Var0": {"type": str}} + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = {"Var0": {"type": str}} def create_template(self) -> None: """Create template.""" @@ -359,7 +359,7 @@ def test_to_parameter_value(self) -> None: (1, "1"), ], ) - def test_value(self, expected: Union[List[str], str], provided: Any) -> None: + def test_value(self, expected: Union[list[str], str], provided: Any) -> None: """Test value.""" assert CFNParameter("myParameter", provided).value == expected @@ -407,7 +407,7 @@ def test_resolve_variable_allowed_values() -> None: """Test resolve_variable.""" var_name = "testVar" var_def: BlueprintVariableTypeDef = {"type": str, "allowed_values": ["allowed"]} - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 resolve_variable(var_name, var_def, Variable(var_name, "not_allowed", "cfngin"), "test") assert ( resolve_variable(var_name, var_def, Variable(var_name, "allowed", "cfngin"), "test") diff --git a/tests/unit/cfngin/blueprints/test_cfngin_bucket.py b/tests/unit/cfngin/blueprints/test_cfngin_bucket.py index 8c3df5227..da2e2abb3 100644 --- a/tests/unit/cfngin/blueprints/test_cfngin_bucket.py +++ b/tests/unit/cfngin/blueprints/test_cfngin_bucket.py @@ -3,8 +3,8 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import ANY, Mock -from mock import ANY, Mock from troposphere import s3 from runway import __version__ diff --git a/tests/unit/cfngin/blueprints/test_raw.py b/tests/unit/cfngin/blueprints/test_raw.py index 1af96f2fb..402870905 100644 --- a/tests/unit/cfngin/blueprints/test_raw.py +++ b/tests/unit/cfngin/blueprints/test_raw.py @@ -6,9 +6,9 @@ import json from pathlib import Path from typing import TYPE_CHECKING, cast +from unittest.mock import MagicMock, Mock import pytest -from mock import MagicMock, Mock from runway.cfngin.blueprints.raw import ( RawTemplateBlueprint, @@ -25,7 +25,6 @@ from ..factories import mock_context if TYPE_CHECKING: - from pytest import MonkeyPatch from pytest_mock import MockerFixture from runway.context import CfnginContext @@ -139,7 +138,8 @@ def test_parameter_definitions_yaml(self) -> None: def test_parameter_values(self, cfngin_context: CfnginContext, tmp_path: Path) -> None: """Test parameter_values.""" obj = RawTemplateBlueprint("test", cfngin_context, raw_template_path=tmp_path) - assert not obj.parameter_values and isinstance(obj.parameter_values, dict) + assert not obj.parameter_values + assert isinstance(obj.parameter_values, dict) obj._resolved_variables = {"var": "val"} del obj.parameter_values assert obj.parameter_values == {"var": "val"} @@ -310,12 +310,12 @@ def test_get_template_path_local_file(tmp_path: Path) -> None: assert template_path.samefile(cast(Path, result)) -def test_get_template_path_invalid_file(cd_tmp_path: Path) -> None: +def test_get_template_path_invalid_file(cd_tmp_path: Path) -> None: # noqa: ARG001 """Verify get_template_path with an invalid filename.""" assert get_template_path(Path("cfn_template.json")) is None -def test_get_template_path_file_in_syspath(tmp_path: Path, monkeypatch: MonkeyPatch) -> None: +def test_get_template_path_file_in_syspath(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: """Verify get_template_path with a file in sys.path. This ensures templates are able to be retrieved from remote packages. diff --git a/tests/unit/cfngin/blueprints/test_testutil.py b/tests/unit/cfngin/blueprints/test_testutil.py index 5dee92cd6..ee8dd8ee5 100644 --- a/tests/unit/cfngin/blueprints/test_testutil.py +++ b/tests/unit/cfngin/blueprints/test_testutil.py @@ -3,6 +3,7 @@ # pyright: basic import unittest +import pytest from troposphere import ecr from runway.cfngin.blueprints.base import Blueprint @@ -48,7 +49,7 @@ def test_create_template_fails(self) -> None: [Variable("Repositories", ["repo1", "repo2", "repo3"], "cfngin")] ) blueprint.create_template() - with self.assertRaises(AssertionError): + with pytest.raises(AssertionError): self.assertRenderedBlueprint(blueprint) diff --git a/tests/unit/cfngin/blueprints/variables/test_types.py b/tests/unit/cfngin/blueprints/variables/test_types.py index 17e5a6178..09e592493 100644 --- a/tests/unit/cfngin/blueprints/variables/test_types.py +++ b/tests/unit/cfngin/blueprints/variables/test_types.py @@ -3,7 +3,6 @@ from __future__ import annotations import re -from typing import Type import pytest @@ -24,7 +23,7 @@ def handle_ssm_parameter_value(value: str) -> str: @pytest.mark.parametrize("kls", AWS_CLASSES) -def test_aws_types(kls: Type[CFNType]) -> None: +def test_aws_types(kls: type[CFNType]) -> None: """Test variable types for parameter types beginning with ``AWS::``. This does not test the formatting of the value. @@ -43,7 +42,7 @@ def test_aws_types(kls: Type[CFNType]) -> None: @pytest.mark.parametrize("kls", CFN_CLASSES) -def test_cfn_types(kls: Type[CFNType]) -> None: +def test_cfn_types(kls: type[CFNType]) -> None: """Test variable types beginning with CFN.""" if kls.__name__.endswith("List") and "CommaDelimited" not in kls.__name__: match = re.search(PATTERN_LIST, kls.__name__) diff --git a/tests/unit/cfngin/conftest.py b/tests/unit/cfngin/conftest.py index fd4ccc771..48b5ce9a2 100644 --- a/tests/unit/cfngin/conftest.py +++ b/tests/unit/cfngin/conftest.py @@ -1,7 +1,5 @@ """Pytest fixtures and plugins.""" -# pyright: basic -import os from pathlib import Path import pytest @@ -12,17 +10,16 @@ @pytest.fixture(scope="package") def cfngin_fixtures() -> Path: """CFNgin fixture directory Path object.""" - path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures") - return Path(path) + return Path(__file__).parent / "fixtures" -@pytest.fixture +@pytest.fixture() def empty_dag() -> DAG: """Create an empty DAG.""" return DAG() -@pytest.fixture +@pytest.fixture() def basic_dag() -> DAG: """Create a basic DAG.""" dag = DAG() diff --git a/tests/unit/cfngin/factories.py b/tests/unit/cfngin/factories.py index abb9914d3..ce1599417 100644 --- a/tests/unit/cfngin/factories.py +++ b/tests/unit/cfngin/factories.py @@ -3,9 +3,8 @@ # pyright: basic from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, NamedTuple, Optional - -from mock import MagicMock +from typing import TYPE_CHECKING, Any, NamedTuple, Optional +from unittest.mock import MagicMock from runway.cfngin.providers.aws.default import ProviderBuilder from runway.config import CfnginConfig, CfnginStackDefinitionModel @@ -26,7 +25,7 @@ class Lookup(NamedTuple): class MockThreadingEvent: """Mock thread events.""" - def wait(self, timeout: Optional[int] = None) -> bool: + def wait(self, timeout: Optional[int] = None) -> bool: # noqa: ARG002 """Mock wait method.""" return False @@ -39,7 +38,9 @@ def __init__(self, *, provider: Provider, region: Optional[str] = None, **_: Any self.provider = provider self.region = region - def build(self, *, profile: Optional[str] = None, region: Optional[str] = None) -> Provider: + def build( + self, *, profile: Optional[str] = None, region: Optional[str] = None # noqa: ARG002 + ) -> Provider: """Mock build method.""" return self.provider @@ -51,7 +52,7 @@ def mock_provider(**kwargs: Any) -> MagicMock: def mock_context( namespace: str = "default", - extra_config_args: Optional[Dict[str, Any]] = None, + extra_config_args: Optional[dict[str, Any]] = None, **kwargs: Any, ) -> CfnginContext: """Mock context.""" @@ -68,7 +69,7 @@ def generate_definition( base_name: str, stack_id: Any = None, **overrides: Any ) -> CfnginStackDefinitionModel: """Generate definitions.""" - definition: Dict[str, Any] = { + definition: dict[str, Any] = { "name": f"{base_name}-{stack_id}" if stack_id else base_name, "class_path": f"tests.unit.cfngin.fixtures.mock_blueprints.{base_name.upper()}", "requires": [], @@ -102,11 +103,11 @@ def myfile_test(self, client_stub): """ - def __init__(self, client_stub: Any): + def __init__(self, client_stub: Any) -> None: """Instantiate class.""" self.client_stub = client_stub - def client(self, region: str) -> Any: + def client(self, region: str) -> Any: # noqa: ARG002 """Return the stubbed client object. Args: diff --git a/tests/unit/cfngin/fixtures/mock_blueprints.py b/tests/unit/cfngin/fixtures/mock_blueprints.py index 2ea718d41..8daf38b10 100644 --- a/tests/unit/cfngin/fixtures/mock_blueprints.py +++ b/tests/unit/cfngin/fixtures/mock_blueprints.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar, Dict +from typing import TYPE_CHECKING, ClassVar import awacs import awacs.cloudformation @@ -31,7 +31,7 @@ class FunctionalTests(Blueprint): """Creates a stack with an IAM user and access key for functional tests.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Namespace": { "type": CFNString, "description": "The namespace that the tests will use. " @@ -174,7 +174,7 @@ def create_template(self) -> None: class Dummy(Blueprint): """Dummy blueprint.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "StringVariable": {"type": str, "default": ""} } @@ -192,7 +192,7 @@ class Dummy2(Blueprint): """ - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "StringVariable": {"type": str, "default": ""} } @@ -212,7 +212,7 @@ class LongRunningDummy(Blueprint): """ - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "Count": { "type": int, "description": "The # of WaitConditionHandles to create.", @@ -226,7 +226,7 @@ class LongRunningDummy(Blueprint): }, "OutputValue": { "type": str, - "description": "The value to put in an output to allow for " "updates.", + "description": "The value to put in an output to allow for updates.", "default": "DefaultOutput", }, } @@ -269,7 +269,7 @@ class Broken(Blueprint): """ - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "StringVariable": {"type": str, "default": ""} } @@ -292,7 +292,7 @@ def create_template(self) -> None: class VPC(Blueprint): """VPC blueprint.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "AZCount": {"type": int, "default": 2}, "PrivateSubnets": { "type": CFNCommaDelimitedList, @@ -349,7 +349,7 @@ def create_template(self) -> None: class DiffTester(Blueprint): """Diff test blueprint.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "InstanceType": { "type": CFNString, "description": "NAT EC2 instance type.", @@ -357,11 +357,11 @@ class DiffTester(Blueprint): }, "WaitConditionCount": { "type": int, - "description": "Number of WaitConditionHandle resources " "to add to the template", + "description": "Number of WaitConditionHandle resources to add to the template", }, } - def create_template(self): + def create_template(self) -> None: """Create template.""" for i in range(self.variables["WaitConditionCount"]): self.template.add_resource(WaitConditionHandle(f"VPC{i}")) @@ -370,7 +370,7 @@ def create_template(self): class Bastion(Blueprint): """Bastion blueprint.""" - VARIABLES: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + VARIABLES: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "VpcId": {"type": EC2VPCId, "description": "Vpc Id"}, "DefaultSG": { "type": EC2SecurityGroupId, @@ -378,15 +378,15 @@ class Bastion(Blueprint): }, "PublicSubnets": { "type": EC2SubnetIdList, - "description": "Subnets to deploy public " "instances in.", + "description": "Subnets to deploy public instances in.", }, "PrivateSubnets": { "type": EC2SubnetIdList, - "description": "Subnets to deploy private " "instances in.", + "description": "Subnets to deploy private instances in.", }, "AvailabilityZones": { "type": CFNCommaDelimitedList, - "description": "Availability Zones to deploy " "instances in.", + "description": "Availability Zones to deploy instances in.", }, "InstanceType": { "type": CFNString, @@ -416,7 +416,7 @@ class Bastion(Blueprint): }, } - def create_template(self): + def create_template(self) -> None: """Create template.""" return @@ -424,7 +424,7 @@ def create_template(self): class PreOneOhBastion(Blueprint): """Used to ensure old blueprints won't be usable in 1.0.""" - PARAMETERS: ClassVar[Dict[str, BlueprintVariableTypeDef]] = { + PARAMETERS: ClassVar[dict[str, BlueprintVariableTypeDef]] = { "VpcId": {"type": "AWS::EC2::VPC::Id", "description": "Vpc Id"}, "DefaultSG": { "type": "AWS::EC2::SecurityGroup::Id", @@ -432,15 +432,15 @@ class PreOneOhBastion(Blueprint): }, "PublicSubnets": { "type": "List", - "description": "Subnets to deploy public " "instances in.", + "description": "Subnets to deploy public instances in.", }, "PrivateSubnets": { "type": "List", - "description": "Subnets to deploy private " "instances in.", + "description": "Subnets to deploy private instances in.", }, "AvailabilityZones": { "type": "CommaDelimitedList", - "description": "Availability Zones to deploy " "instances in.", + "description": "Availability Zones to deploy instances in.", }, "InstanceType": { "type": "String", diff --git a/tests/unit/cfngin/fixtures/mock_hooks.py b/tests/unit/cfngin/fixtures/mock_hooks.py index 665d88009..5ee91a062 100644 --- a/tests/unit/cfngin/fixtures/mock_hooks.py +++ b/tests/unit/cfngin/fixtures/mock_hooks.py @@ -1,9 +1,9 @@ """Mock hook.""" -from typing import Any, Dict +from typing import Any -def mock_hook(*, value: Any, **_: Any) -> Dict[str, Any]: +def mock_hook(*, value: Any, **_: Any) -> dict[str, Any]: """Mock hook. Returns: diff --git a/tests/unit/cfngin/hooks/awslambda/factories.py b/tests/unit/cfngin/hooks/awslambda/factories.py index c153952b0..b6a62510b 100644 --- a/tests/unit/cfngin/hooks/awslambda/factories.py +++ b/tests/unit/cfngin/hooks/awslambda/factories.py @@ -3,8 +3,7 @@ from __future__ import annotations from typing import TYPE_CHECKING - -from mock import Mock +from unittest.mock import Mock from runway.cfngin.hooks.awslambda.base_classes import Project from runway.cfngin.hooks.awslambda.models.args import AwsLambdaHookArgs diff --git a/tests/unit/cfngin/hooks/awslambda/models/test_args.py b/tests/unit/cfngin/hooks/awslambda/models/test_args.py index f5b03cf9f..8bd2a6e15 100644 --- a/tests/unit/cfngin/hooks/awslambda/models/test_args.py +++ b/tests/unit/cfngin/hooks/awslambda/models/test_args.py @@ -3,7 +3,7 @@ from __future__ import annotations from pathlib import Path -from typing import Any, Dict +from typing import Any import pytest from pydantic import ValidationError @@ -43,7 +43,7 @@ def test__validate_runtime_or_docker(self, tmp_path: Path) -> None: "kwargs", [{"image": "test"}, {"file": ""}, {"file": "", "image": "test"}] ) def test__validate_runtime_or_docker_docker_no_runtime( - self, kwargs: Dict[str, Any], tmp_path: Path + self, kwargs: dict[str, Any], tmp_path: Path ) -> None: """Test _validate_runtime_or_docker no runtime if Docker.""" if "file" in kwargs: @@ -93,7 +93,8 @@ def test_field_defaults(self, tmp_path: Path) -> None: runtime="test", source_code=tmp_path, ) - assert not obj.extend_gitignore and isinstance(obj.extend_gitignore, list) + assert not obj.extend_gitignore + assert isinstance(obj.extend_gitignore, list) assert not obj.object_prefix def test_source_code_is_file(self, tmp_path: Path) -> None: diff --git a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__deployment_package.py b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__deployment_package.py index b699fac0e..eda784429 100644 --- a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__deployment_package.py +++ b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__deployment_package.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock, call import pytest -from mock import Mock, call from runway.cfngin.hooks.awslambda.python_requirements import PythonDeploymentPackage diff --git a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__docker.py b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__docker.py index f40004b14..42c37e662 100644 --- a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__docker.py +++ b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__docker.py @@ -4,10 +4,10 @@ import logging from typing import TYPE_CHECKING, Optional +from unittest.mock import Mock import pytest from docker.types.services import Mount -from mock import Mock from runway.cfngin.hooks.awslambda.python_requirements import ( PythonDockerDependencyInstaller, @@ -90,7 +90,8 @@ def test_install_commands_no_requirements(self) -> None: result = PythonDockerDependencyInstaller( Mock(requirements_txt=None), client=Mock() ).install_commands - assert not result and isinstance(result, list) + assert not result + assert isinstance(result, list) def test_python_version(self, mocker: MockerFixture) -> None: """Test python_version.""" diff --git a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__project.py b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__project.py index 3f0da7460..cdfc56fac 100644 --- a/tests/unit/cfngin/hooks/awslambda/python_requirements/test__project.py +++ b/tests/unit/cfngin/hooks/awslambda/python_requirements/test__project.py @@ -3,10 +3,10 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, List, Sequence +from typing import TYPE_CHECKING +from unittest.mock import Mock, call import pytest -from mock import Mock, call from runway.cfngin.hooks.awslambda.exceptions import RuntimeMismatchError from runway.cfngin.hooks.awslambda.python_requirements import PythonProject @@ -20,9 +20,9 @@ ) if TYPE_CHECKING: + from collections.abc import Sequence from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture @@ -181,7 +181,7 @@ def test_install_dependencies_does_not_catch_errors(self, mocker: MockerFixture) ) def test_install_dependencies_skip( - self, caplog: LogCaptureFixture, mocker: MockerFixture + self, caplog: pytest.LogCaptureFixture, mocker: MockerFixture ) -> None: """Test install_dependencies skip because no dependencies.""" caplog.set_level(logging.INFO, logger=MODULE.replace("._", ".")) @@ -315,7 +315,7 @@ def test_poetry_not_poetry_project(self, mocker: MockerFixture) -> None: ) def test_project_type( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, expected: str, mocker: MockerFixture, pipenv_project: bool, @@ -445,7 +445,7 @@ def test_runtime_raise_runtime_mismatch_error_pip(self, mocker: MockerFixture) - ], ) def test_supported_metadata_files( - self, update_expected: List[str], use_pipenv: bool, use_poetry: bool + self, update_expected: list[str], use_pipenv: bool, use_poetry: bool ) -> None: """Test supported_metadata_files.""" expected = {*Pip.CONFIG_FILES} diff --git a/tests/unit/cfngin/hooks/awslambda/test__python_hooks.py b/tests/unit/cfngin/hooks/awslambda/test__python_hooks.py index b9285819b..70ea2a366 100644 --- a/tests/unit/cfngin/hooks/awslambda/test__python_hooks.py +++ b/tests/unit/cfngin/hooks/awslambda/test__python_hooks.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from pydantic import ValidationError from runway.cfngin.hooks.awslambda import PythonFunction, PythonLayer @@ -19,7 +19,7 @@ MODULE = "runway.cfngin.hooks.awslambda._python_hooks" -@pytest.fixture(scope="function") +@pytest.fixture() def args(tmp_path: Path) -> PythonHookArgs: """Fixture for creating default function args.""" return PythonHookArgs( @@ -95,8 +95,8 @@ def test_pre_deploy_always_cleanup(self, args: PythonHookArgs, mocker: MockerFix "deployment_package", Mock(upload=Mock(side_effect=Exception)), ) - with pytest.raises(Exception, match=""): - assert PythonFunction(Mock(), **args.dict()).pre_deploy() + with pytest.raises(Exception): # noqa: B017, PT011 + assert PythonFunction(Mock(), **args.model_dump()).pre_deploy() deployment_package.upload.assert_called_once_with() build_response.assert_not_called() cleanup_on_error.assert_called_once_with() diff --git a/tests/unit/cfngin/hooks/awslambda/test_base_classes.py b/tests/unit/cfngin/hooks/awslambda/test_base_classes.py index 863f593d5..4ecacde96 100644 --- a/tests/unit/cfngin/hooks/awslambda/test_base_classes.py +++ b/tests/unit/cfngin/hooks/awslambda/test_base_classes.py @@ -3,11 +3,10 @@ from __future__ import annotations import logging -from pathlib import Path from typing import TYPE_CHECKING, Any, cast +from unittest.mock import Mock import pytest -from mock import Mock from runway.cfngin.hooks.awslambda.base_classes import AwsLambdaHook, Project from runway.cfngin.hooks.awslambda.deployment_package import DeploymentPackage @@ -16,7 +15,8 @@ from runway.cfngin.hooks.awslambda.models.responses import AwsLambdaHookDeployResponse if TYPE_CHECKING: - from pytest import LogCaptureFixture + from pathlib import Path + from pytest_mock import MockerFixture from runway.context import CfnginContext @@ -132,25 +132,25 @@ def test_plan(self, mocker: MockerFixture) -> None: build_response.assert_called_once_with("plan") response_obj.dict.assert_called_once_with(by_alias=True) - def test_post_deploy(self, caplog: LogCaptureFixture) -> None: + def test_post_deploy(self, caplog: pytest.LogCaptureFixture) -> None: """Test post_deploy.""" caplog.set_level(logging.WARNING, logger=MODULE) assert AwsLambdaHook(Mock()).post_deploy() assert f"post_deploy not implimented for {AwsLambdaHook.__name__}" in caplog.messages - def test_post_destroy(self, caplog: LogCaptureFixture) -> None: + def test_post_destroy(self, caplog: pytest.LogCaptureFixture) -> None: """Test post_destroy.""" caplog.set_level(logging.WARNING, logger=MODULE) assert AwsLambdaHook(Mock()).post_destroy() assert f"post_destroy not implimented for {AwsLambdaHook.__name__}" in caplog.messages - def test_pre_deploy(self, caplog: LogCaptureFixture) -> None: + def test_pre_deploy(self, caplog: pytest.LogCaptureFixture) -> None: """Test pre_deploy.""" caplog.set_level(logging.WARNING, logger=MODULE) assert AwsLambdaHook(Mock()).pre_deploy() assert f"pre_deploy not implimented for {AwsLambdaHook.__name__}" in caplog.messages - def test_pre_destroy(self, caplog: LogCaptureFixture) -> None: + def test_pre_destroy(self, caplog: pytest.LogCaptureFixture) -> None: """Test pre_destroy.""" caplog.set_level(logging.WARNING, logger=MODULE) assert AwsLambdaHook(Mock()).pre_destroy() @@ -195,7 +195,7 @@ def test_cache_dir(self, tmp_path: Path) -> None: ) assert Project(args, Mock()).cache_dir == cache_dir - def test_cache_dir_default(self, mocker: MockerFixture, tmp_path: Path) -> None: + def test_cache_dir_default(self, tmp_path: Path) -> None: """Test cache_dir default.""" cache_dir = tmp_path / Project.DEFAULT_CACHE_DIR_NAME cache_dir.mkdir() @@ -242,14 +242,14 @@ def test_compatible_runtimes(self, mocker: MockerFixture, tmp_path: Path) -> Non "foobar" ] - def test_compatible_runtimes_raise_value_error( - self, mocker: MockerFixture, tmp_path: Path - ) -> None: + def test_compatible_runtimes_raise_value_error(self, mocker: MockerFixture) -> None: """Test compatible_runtimes raise ValueError.""" mocker.patch.object(Project, "runtime", "foobar") - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, + match=r"runtime \(foobar\) not in compatible runtimes \(foo, bar\)", + ): assert Project(Mock(compatible_runtimes=["foo", "bar"]), Mock()).compatible_runtimes - assert str(excinfo.value) == "runtime (foobar) not in compatible runtimes (foo, bar)" def test_dependency_directory(self, mocker: MockerFixture, tmp_path: Path) -> None: """Test dependency_directory.""" @@ -276,7 +276,8 @@ def test_license(self, tmp_path: Path) -> None: def test_metadata_files(self) -> None: """Test metadata_files.""" result = Project(Mock(), Mock()).metadata_files - assert not result and isinstance(result, tuple) + assert not result + assert isinstance(result, tuple) def test_project_root(self, tmp_path: Path) -> None: """Test project_root.""" @@ -294,7 +295,7 @@ def test_project_root_config_path_is_dir(self, tmp_path: Path) -> None: ) def test_project_root_config_path_not_parent_of_source_code( - self, caplog: LogCaptureFixture, tmp_path: Path + self, caplog: pytest.LogCaptureFixture, tmp_path: Path ) -> None: """Test project_root ctx.config_path is not a parent of args.source_code.""" caplog.set_level(logging.INFO) @@ -308,7 +309,7 @@ def test_project_root_config_path_not_parent_of_source_code( == src_path ) assert ( - "ignoring project directory; " "source code located outside of project directory" + "ignoring project directory; source code located outside of project directory" ) in caplog.messages @pytest.mark.parametrize("create_metadata_file", [False, True]) @@ -352,9 +353,10 @@ def test_runtime_raise_runtime_mismatch_error(self, mocker: MockerFixture) -> No def test_runtime_raise_value_error(self, mocker: MockerFixture) -> None: """Test runtime raise ValueError.""" mocker.patch.object(Project, "docker", None, create=True) - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, match="runtime could not be determined from the build system" + ): assert not Project(Mock(runtime=None), Mock()).runtime - assert str(excinfo.value) == "runtime could not be determined from the build system" def test_source_code(self, mocker: MockerFixture) -> None: """Test source_code.""" diff --git a/tests/unit/cfngin/hooks/awslambda/test_deployment_package.py b/tests/unit/cfngin/hooks/awslambda/test_deployment_package.py index b26172ae4..4c7d71d70 100644 --- a/tests/unit/cfngin/hooks/awslambda/test_deployment_package.py +++ b/tests/unit/cfngin/hooks/awslambda/test_deployment_package.py @@ -3,15 +3,13 @@ from __future__ import annotations import zipfile -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast +from typing import TYPE_CHECKING, Any, Optional, cast +from unittest.mock import MagicMock, Mock, PropertyMock, call from urllib.parse import urlencode import igittigitt import pytest from botocore.exceptions import ClientError -from mock import MagicMock, Mock, PropertyMock, call -from typing_extensions import Literal from runway._logging import LogLevels from runway.cfngin.hooks.awslambda.base_classes import Project @@ -35,10 +33,12 @@ from .factories import MockProject if TYPE_CHECKING: + from pathlib import Path + from botocore.stub import Stubber from mypy_boto3_s3.type_defs import PutObjectOutputTypeDef - from pytest import LogCaptureFixture from pytest_mock import MockerFixture + from typing_extensions import Literal from runway.context import CfnginContext @@ -47,7 +47,7 @@ ProjectTypeAlias = Project[AwsLambdaHookArgs] -@pytest.fixture(scope="function") +@pytest.fixture() def project(cfngin_context: CfnginContext, tmp_path: Path) -> ProjectTypeAlias: """Mock project object.""" args = AwsLambdaHookArgs( @@ -229,7 +229,7 @@ def test_bucket_forbidden(self, mocker: MockerFixture, project: ProjectTypeAlias def test_build( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -271,7 +271,7 @@ def test_build_file_empty_after_build( archive_file = project.build_directory / "foobar.zip" mocker.patch.object(DeploymentPackage, "archive_file", archive_file) - def _write_zip(package: DeploymentPackage[Any], archive_file: Mock) -> None: + def _write_zip(package: DeploymentPackage[Any], archive_file: Mock) -> None: # noqa: ARG001 package.archive_file.touch() mock_build_zip_dependencies = mocker.patch.object( @@ -289,7 +289,7 @@ def _write_zip(package: DeploymentPackage[Any], archive_file: Mock) -> None: def test_build_file_exists( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -329,7 +329,7 @@ def test_build_raise_runtime_mismatch_error( mock_build_zip_source_code.assert_not_called() mock_build_fix_file_permissions.assert_not_called() - @pytest.mark.parametrize("url_encoded", [False, True, False, True]) + @pytest.mark.parametrize("url_encoded", [False, True]) def test_build_tag_set( self, mocker: MockerFixture, @@ -432,7 +432,7 @@ def test_init( def test_init_runtime_change( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -522,7 +522,7 @@ def test_object_version_id( expected: Optional[str], mocker: MockerFixture, project: ProjectTypeAlias, - response: Dict[str, Any], + response: dict[str, Any], ) -> None: """Test object_version_id.""" mocker.patch.object(DeploymentPackage, "_put_object_response", response) @@ -606,7 +606,7 @@ class TestDeploymentPackageS3Object: def test_build_exists( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -743,7 +743,7 @@ def test_delete( def test_exists( self, expected: bool, - head: Dict[str, Any], + head: dict[str, Any], project: ProjectTypeAlias, mocker: MockerFixture, ) -> None: @@ -773,7 +773,7 @@ def test_head(self, mocker: MockerFixture, project: ProjectTypeAlias) -> None: def test_head_403( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -799,7 +799,7 @@ def test_head_403( def test_head_404( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -871,10 +871,10 @@ def test_md5_checksum_raise_required_tag_not_found( ) def test_object_tags( self, - expected: Dict[str, str], + expected: dict[str, str], mocker: MockerFixture, project: ProjectTypeAlias, - response: Dict[str, List[Dict[str, str]]], + response: dict[str, list[dict[str, str]]], ) -> None: """Test object_tags.""" mocker.patch.object( @@ -901,7 +901,7 @@ def test_object_tags( def test_object_version_id( self, expected: Optional[str], - head: Dict[str, str], + head: dict[str, str], mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -966,7 +966,7 @@ def test_update_tags(self, mocker: MockerFixture, project: ProjectTypeAlias) -> def test_update_tags_no_change( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: @@ -993,7 +993,7 @@ def test_update_tags_no_change( def test_upload_exists( self, build: bool, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, project: ProjectTypeAlias, ) -> None: diff --git a/tests/unit/cfngin/hooks/awslambda/test_docker.py b/tests/unit/cfngin/hooks/awslambda/test_docker.py index de59330ea..6f8d5d8e6 100644 --- a/tests/unit/cfngin/hooks/awslambda/test_docker.py +++ b/tests/unit/cfngin/hooks/awslambda/test_docker.py @@ -4,12 +4,12 @@ import logging from typing import TYPE_CHECKING, Optional +from unittest.mock import Mock, call import pytest from docker.errors import DockerException, ImageNotFound from docker.models.images import Image from docker.types.services import Mount -from mock import Mock, call from runway.cfngin.hooks.awslambda.constants import ( AWS_SAM_BUILD_IMAGE_PREFIX, @@ -25,7 +25,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from runway.context import CfnginContext @@ -269,11 +268,10 @@ def test_image_raise_value_error(self, mocker: MockerFixture) -> None: build_image = mocker.patch.object(DockerDependencyInstaller, "build_image") pull_image = mocker.patch.object(DockerDependencyInstaller, "pull_image") obj = DockerDependencyInstaller(project, client=Mock()) - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="docker.file, docker.image, or runtime is required"): assert not obj.image build_image.assert_not_called() pull_image.assert_not_called() - assert str(excinfo.value) == "docker.file, docker.image, or runtime is required" def test_install(self, mocker: MockerFixture) -> None: """Test install.""" @@ -302,7 +300,8 @@ def test_install(self, mocker: MockerFixture) -> None: def test_install_commands(self) -> None: """Test install_commands.""" obj = DockerDependencyInstaller(Mock(), client=Mock()) - assert not obj.install_commands and isinstance(obj.install_commands, list) + assert not obj.install_commands + assert isinstance(obj.install_commands, list) @pytest.mark.parametrize("level", [logging.INFO, logging.DEBUG]) def test_log_docker_msg_bytes(self, level: int, mocker: MockerFixture) -> None: @@ -337,7 +336,7 @@ def test_log_docker_msg_dict(self, level: int, mocker: MockerFixture) -> None: def test_post_install_commands( self, mocker: MockerFixture, - platform_linux: None, + platform_linux: None, # noqa: ARG002 ) -> None: """Test post_install_commands.""" # these methods don't exist on windows so they need to be mocked @@ -353,7 +352,7 @@ def test_post_install_commands( def test_post_install_commands_cache_dir( self, mocker: MockerFixture, - platform_linux: None, + platform_linux: None, # noqa: ARG002 ) -> None: """Test post_install_commands with cache_dir.""" # these methods don't exist on windows so they need to be mocked @@ -368,7 +367,7 @@ def test_post_install_commands_cache_dir( def test_post_install_commands_extra_files( self, mocker: MockerFixture, - platform_linux: None, + platform_linux: None, # noqa: ARG002 ) -> None: """Test post_install_commands with extra_files.""" # these methods don't exist on windows so they need to be mocked @@ -384,7 +383,7 @@ def test_post_install_commands_extra_files( f"chown -R {getuid.return_value}:{getgid.return_value} /var/task/lambda", ] - def test_post_install_commands_windows(self, platform_windows: None) -> None: + def test_post_install_commands_windows(self, platform_windows: None) -> None: # noqa: ARG002 """Test post_install_commands Windows.""" obj = DockerDependencyInstaller( Mock(args=Mock(docker=Mock(extra_files=[])), cache_dir=False), client=Mock() @@ -408,15 +407,16 @@ def test_pre_install_commands_cache_dir(self) -> None: "exists_locally, force", [(False, False), (False, True), (True, True), (True, False)], ) - def test_pull_image(self, caplog: LogCaptureFixture, exists_locally: bool, force: bool) -> None: + def test_pull_image( + self, caplog: pytest.LogCaptureFixture, exists_locally: bool, force: bool + ) -> None: """Test pull_image.""" caplog.set_level(logging.INFO, logger=MODULE) name = "foo:latest" image = Mock(spec=Image, id=FAKE_IMAGE_ID) - if exists_locally: - mock_get = Mock(return_value=image) - else: - mock_get = Mock(side_effect=ImageNotFound("test")) + mock_get = ( + Mock(return_value=image) if exists_locally else Mock(side_effect=ImageNotFound("test")) + ) mock_pull = Mock(return_value=image) assert ( diff --git a/tests/unit/cfngin/hooks/awslambda/test_source_code.py b/tests/unit/cfngin/hooks/awslambda/test_source_code.py index 2aeafea8e..289cdb2bb 100644 --- a/tests/unit/cfngin/hooks/awslambda/test_source_code.py +++ b/tests/unit/cfngin/hooks/awslambda/test_source_code.py @@ -4,9 +4,9 @@ from pathlib import Path from typing import TYPE_CHECKING +from unittest.mock import Mock, call import pytest -from mock import Mock, call from runway.cfngin.hooks.awslambda.source_code import SourceCode diff --git a/tests/unit/cfngin/hooks/docker/image/test_build.py b/tests/unit/cfngin/hooks/docker/image/test_build.py index d21de8593..e759829e6 100644 --- a/tests/unit/cfngin/hooks/docker/image/test_build.py +++ b/tests/unit/cfngin/hooks/docker/image/test_build.py @@ -3,12 +3,11 @@ # pyright: basic from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, Optional +from unittest.mock import MagicMock import pytest from docker.models.images import Image -from mock import MagicMock from pydantic import ValidationError from runway.cfngin.hooks.docker.data_models import ( @@ -26,15 +25,17 @@ from .....mock_docker.fake_api import FAKE_IMAGE_ID if TYPE_CHECKING: + from pathlib import Path + from pytest_mock import MockerFixture - from .....factories import MockCFNginContext + from .....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker.image._build" -@pytest.fixture(scope="function") +@pytest.fixture() def tmp_dockerfile(cd_tmp_path: Path) -> Path: """Create temporary Dockerfile.""" dockerfile = cd_tmp_path / "Dockerfile" @@ -42,7 +43,7 @@ def tmp_dockerfile(cd_tmp_path: Path) -> Path: return dockerfile -def test_build(cfngin_context: MockCFNginContext, mocker: MockerFixture, tmp_path: Path) -> None: +def test_build(cfngin_context: MockCfnginContext, mocker: MockerFixture, tmp_path: Path) -> None: """Test build.""" (tmp_path / "Dockerfile").touch() mock_image = MagicMock(spec=Image, id=FAKE_IMAGE_ID, tags=MagicMock(return_value=["latest"])) @@ -75,7 +76,8 @@ class TestDockerImageBuildApiOptions: def test_field_defaults(self) -> None: """Test field defaults.""" obj = DockerImageBuildApiOptions() - assert not obj.buildargs and isinstance(obj.buildargs, dict) + assert not obj.buildargs + assert isinstance(obj.buildargs, dict) assert obj.custom_context is False assert not obj.extra_hosts assert obj.forcerm is False diff --git a/tests/unit/cfngin/hooks/docker/image/test_push.py b/tests/unit/cfngin/hooks/docker/image/test_push.py index ee7479908..57b128cc6 100644 --- a/tests/unit/cfngin/hooks/docker/image/test_push.py +++ b/tests/unit/cfngin/hooks/docker/image/test_push.py @@ -4,9 +4,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import call from docker.models.images import Image -from mock import call from runway.cfngin.hooks.docker.data_models import ( DockerImage, @@ -21,13 +21,13 @@ from docker import DockerClient from pytest_mock import MockerFixture - from .....factories import MockCFNginContext + from .....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker.image._push" def test_push( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, mock_docker_client: DockerClient, mocker: MockerFixture, ) -> None: diff --git a/tests/unit/cfngin/hooks/docker/image/test_remove.py b/tests/unit/cfngin/hooks/docker/image/test_remove.py index a8e9a2eda..3110cbc7e 100644 --- a/tests/unit/cfngin/hooks/docker/image/test_remove.py +++ b/tests/unit/cfngin/hooks/docker/image/test_remove.py @@ -4,10 +4,10 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import call from docker.errors import ImageNotFound from docker.models.images import Image -from mock import call from runway.cfngin.hooks.docker.data_models import ( DockerImage, @@ -22,13 +22,13 @@ from docker import DockerClient from pytest_mock import MockerFixture - from .....factories import MockCFNginContext + from .....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker.image._remove" def test_remove( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, mock_docker_client: DockerClient, mocker: MockerFixture, ) -> None: @@ -61,7 +61,7 @@ def test_remove( def test_remove_image_not_found( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, mock_docker_client: DockerClient, mocker: MockerFixture, ) -> None: diff --git a/tests/unit/cfngin/hooks/docker/test_data_models.py b/tests/unit/cfngin/hooks/docker/test_data_models.py index 24fbb8578..3d802dbd8 100644 --- a/tests/unit/cfngin/hooks/docker/test_data_models.py +++ b/tests/unit/cfngin/hooks/docker/test_data_models.py @@ -4,10 +4,10 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import MagicMock import pytest from docker.models.images import Image -from mock import MagicMock from pydantic import ValidationError from runway.cfngin.hooks.docker.data_models import ( @@ -18,7 +18,7 @@ from runway.utils import MutableMap if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker.data_models" MOCK_IMAGE_REPO = "dkr.test.com/image" @@ -30,7 +30,7 @@ } -@pytest.fixture(scope="function") +@pytest.fixture() def mock_image() -> MagicMock: """Return a mock docker.models.images.Image.""" return MagicMock(spec=Image, **MOCK_IMAGE_PROPS) @@ -78,7 +78,7 @@ def test_fqn_public(self) -> None: obj = ElasticContainerRegistry(alias="test") assert obj.fqn == "public.ecr.aws/test/" - def test_init_default(self, cfngin_context: MockCFNginContext) -> None: + def test_init_default(self, cfngin_context: MockCfnginContext) -> None: """Test init default values.""" account_id = "123456789012" sts_stubber = cfngin_context.add_stubber("sts") @@ -130,7 +130,7 @@ def test_init_public(self) -> None: class TestElasticContainerRegistryRepository: """Test runway.cfngin.hooks.docker._data_models.ElasticContainerRegistryRepository.""" - def test_fqn(self, cfngin_context: MockCFNginContext) -> None: + def test_fqn(self, cfngin_context: MockCfnginContext) -> None: """Test init private.""" account_id = "123456789012" region = "us-east-1" diff --git a/tests/unit/cfngin/hooks/docker/test_hook_data.py b/tests/unit/cfngin/hooks/docker/test_hook_data.py index 6ce162b4c..605d5029c 100644 --- a/tests/unit/cfngin/hooks/docker/test_hook_data.py +++ b/tests/unit/cfngin/hooks/docker/test_hook_data.py @@ -10,7 +10,7 @@ if TYPE_CHECKING: from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker.hook_data" @@ -24,7 +24,7 @@ def test_client(self, mocker: MockerFixture) -> None: obj = DockerHookData() assert obj.client == mock_local_client.from_env.return_value - def test_from_cfngin_context(self, cfngin_context: MockCFNginContext) -> None: + def test_from_cfngin_context(self, cfngin_context: MockCfnginContext) -> None: """Test from_cfngin_context.""" obj = DockerHookData.from_cfngin_context(cfngin_context) assert isinstance(obj, DockerHookData) @@ -37,7 +37,7 @@ def test_from_cfngin_context(self, cfngin_context: MockCFNginContext) -> None: # compare instance id as these should NOT be the same instance assert id(obj) != id(new_obj) - def test_update_context(self, cfngin_context: MockCFNginContext) -> None: + def test_update_context(self, cfngin_context: MockCfnginContext) -> None: """Test update_context.""" obj = DockerHookData() assert obj.update_context(cfngin_context) == obj diff --git a/tests/unit/cfngin/hooks/docker/test_login.py b/tests/unit/cfngin/hooks/docker/test_login.py index acc560a01..639d310ba 100644 --- a/tests/unit/cfngin/hooks/docker/test_login.py +++ b/tests/unit/cfngin/hooks/docker/test_login.py @@ -17,13 +17,13 @@ from docker import DockerClient from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.docker._login" def test_login( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, mock_docker_client: DockerClient, mocker: MockerFixture, ) -> None: diff --git a/tests/unit/cfngin/hooks/ecr/test__purge_repositroy.py b/tests/unit/cfngin/hooks/ecr/test__purge_repositroy.py index 687c8eabe..28c10d828 100644 --- a/tests/unit/cfngin/hooks/ecr/test__purge_repositroy.py +++ b/tests/unit/cfngin/hooks/ecr/test__purge_repositroy.py @@ -3,7 +3,7 @@ # pyright: basic from __future__ import annotations -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING import boto3 import pytest @@ -16,7 +16,7 @@ from mypy_boto3_ecr.type_defs import ImageIdentifierTypeDef from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.ecr._purge_repository" @@ -25,7 +25,7 @@ def test_delete_ecr_images() -> None: """Test delete_ecr_images.""" client = boto3.client("ecr") stubber = Stubber(client) - image_ids: List[ImageIdentifierTypeDef] = [{"imageDigest": "image0"}] + image_ids: list[ImageIdentifierTypeDef] = [{"imageDigest": "image0"}] repo_name = "test-repo" stubber.add_response( @@ -42,7 +42,7 @@ def test_delete_ecr_images_failures() -> None: """Test delete_ecr_images with failures.""" client = boto3.client("ecr") stubber = Stubber(client) - image_ids: List[ImageIdentifierTypeDef] = [{"imageDigest": "image0"}] + image_ids: list[ImageIdentifierTypeDef] = [{"imageDigest": "image0"}] repo_name = "test-repo" stubber.add_response( @@ -60,7 +60,7 @@ def test_delete_ecr_images_failures() -> None: {"repositoryName": repo_name, "imageIds": image_ids}, ) - with stubber, pytest.raises(ValueError): + with stubber, pytest.raises(ValueError): # noqa: PT011 delete_ecr_images(client, image_ids=image_ids, repository_name=repo_name) @@ -107,7 +107,7 @@ def test_list_ecr_images_repository_not_found() -> None: assert list_ecr_images(client, repository_name="test-repo") == [] -def test_purge_repository(cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: +def test_purge_repository(cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test purge_repository.""" mock_list_ecr_images = mocker.patch( MODULE + ".list_ecr_images", return_value=[{"imageDigest": "abc123"}] @@ -124,7 +124,7 @@ def test_purge_repository(cfngin_context: MockCFNginContext, mocker: MockerFixtu ) -def test_purge_repository_skip(cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: +def test_purge_repository_skip(cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test purge_repository.""" mock_list_ecr_images = mocker.patch(MODULE + ".list_ecr_images", return_value=[]) mock_delete_ecr_images = mocker.patch(MODULE + ".delete_ecr_images") diff --git a/tests/unit/cfngin/hooks/ssm/conftest.py b/tests/unit/cfngin/hooks/ssm/conftest.py index 2e0c9a4d2..3701e1e3a 100644 --- a/tests/unit/cfngin/hooks/ssm/conftest.py +++ b/tests/unit/cfngin/hooks/ssm/conftest.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING import pytest @@ -10,16 +10,18 @@ from botocore.stub import Stubber from mypy_boto3_ssm.client import SSMClient - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext -@pytest.fixture(scope="function") -def ssm_client(cfngin_context: MockCFNginContext, ssm_stubber: Stubber) -> SSMClient: +@pytest.fixture() +def ssm_client( + cfngin_context: MockCfnginContext, ssm_stubber: Stubber # noqa: ARG001 +) -> SSMClient: """Create SSM client.""" - return cast("SSMClient", cfngin_context.get_session().client("ssm")) + return cfngin_context.get_session().client("ssm") -@pytest.fixture(scope="function") -def ssm_stubber(cfngin_context: MockCFNginContext) -> Stubber: +@pytest.fixture() +def ssm_stubber(cfngin_context: MockCfnginContext) -> Stubber: """Create SSM stubber.""" return cfngin_context.add_stubber("ssm") diff --git a/tests/unit/cfngin/hooks/ssm/test_parameter.py b/tests/unit/cfngin/hooks/ssm/test_parameter.py index 1a5b5ba90..04a1f3535 100644 --- a/tests/unit/cfngin/hooks/ssm/test_parameter.py +++ b/tests/unit/cfngin/hooks/ssm/test_parameter.py @@ -17,7 +17,6 @@ if TYPE_CHECKING: from botocore.stub import Stubber from mypy_boto3_ssm.client import SSMClient - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from runway.context import CfnginContext @@ -140,7 +139,7 @@ def test_client( def test_delete( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, ssm_stubber: Stubber, ) -> None: @@ -154,7 +153,7 @@ def test_delete( def test_delete_handle_parameter_not_found( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, ssm_stubber: Stubber, ) -> None: @@ -220,7 +219,7 @@ def test_get_force( def test_get_handle_parameter_not_found( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, ssm_stubber: Stubber, ) -> None: @@ -333,7 +332,7 @@ def test_pre_destroy(self, cfngin_context: CfnginContext, mocker: MockerFixture) def test_put( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ssm_stubber: Stubber, @@ -370,7 +369,7 @@ def test_put( def test_put_handle_parameter_already_exists( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ssm_stubber: Stubber, @@ -391,7 +390,7 @@ def test_put_handle_parameter_already_exists( def test_put_no_value( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, ) -> None: """Test put.""" @@ -539,7 +538,7 @@ def test_update_tags_delete_only_raise_client_error( def test_update_tags_handle_invalid_resource_id( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, cfngin_context: CfnginContext, mocker: MockerFixture, ssm_stubber: Stubber, diff --git a/tests/unit/cfngin/hooks/staticsite/test_cleanup.py b/tests/unit/cfngin/hooks/staticsite/test_cleanup.py index 65931e18a..8e9b1aec6 100644 --- a/tests/unit/cfngin/hooks/staticsite/test_cleanup.py +++ b/tests/unit/cfngin/hooks/staticsite/test_cleanup.py @@ -16,10 +16,9 @@ if TYPE_CHECKING: from mypy_boto3_cloudformation.type_defs import OutputTypeDef - from pytest import LogCaptureFixture from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.hooks.staticsite.cleanup" @@ -41,7 +40,7 @@ def test_get_replicated_function_names(expected: list[str], outputs: list[Output def test_warn( - caplog: LogCaptureFixture, cfngin_context: MockCFNginContext, mocker: MockerFixture + caplog: pytest.LogCaptureFixture, cfngin_context: MockCfnginContext, mocker: MockerFixture ) -> None: """Test warn.""" caplog.set_level(LogLevels.WARNING, MODULE) @@ -76,7 +75,7 @@ def test_warn( def test_warn_ignore_client_error( - caplog: LogCaptureFixture, cfngin_context: MockCFNginContext + caplog: pytest.LogCaptureFixture, cfngin_context: MockCfnginContext ) -> None: """Test warn ignore ClientError.""" caplog.set_level(LogLevels.WARNING, MODULE) diff --git a/tests/unit/cfngin/hooks/staticsite/test_upload_staticsite.py b/tests/unit/cfngin/hooks/staticsite/test_upload_staticsite.py index d05d7ee18..1e9a1a9d9 100644 --- a/tests/unit/cfngin/hooks/staticsite/test_upload_staticsite.py +++ b/tests/unit/cfngin/hooks/staticsite/test_upload_staticsite.py @@ -20,7 +20,7 @@ from runway.module.staticsite.options.models import RunwayStaticSiteExtraFileDataModel if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext @pytest.mark.parametrize( @@ -97,7 +97,7 @@ def test_get_content_yaml() -> None: def test_get_content_unknown() -> None: """Get content unknown.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_content(RunwayStaticSiteExtraFileDataModel(content={"a": 0}, name="")) @@ -131,7 +131,7 @@ def test_calculate_hash_of_extra_files( assert calculate_hash_of_extra_files([a]) != calculate_hash_of_extra_files([b]) -def test_sync_extra_files_json_content(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_json_content(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files json content is put in s3.""" s3_stub = cfngin_context.add_stubber("s3") @@ -155,7 +155,7 @@ def test_sync_extra_files_json_content(cfngin_context: MockCFNginContext) -> Non stub.assert_no_pending_responses() -def test_sync_extra_files_yaml_content(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_yaml_content(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files yaml content is put in s3.""" s3_stub = cfngin_context.add_stubber("s3") @@ -179,7 +179,7 @@ def test_sync_extra_files_yaml_content(cfngin_context: MockCFNginContext) -> Non stub.assert_no_pending_responses() -def test_sync_extra_files_empty_content(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_empty_content(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files empty content is not uploaded.""" s3_stub = cfngin_context.add_stubber("s3") @@ -196,7 +196,7 @@ def test_sync_extra_files_empty_content(cfngin_context: MockCFNginContext) -> No stub.assert_no_pending_responses() -def test_sync_extra_files_file_reference(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_file_reference(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files file is uploaded.""" s3_stub = cfngin_context.add_stubber("s3") @@ -222,7 +222,7 @@ def test_sync_extra_files_file_reference(cfngin_context: MockCFNginContext) -> N def test_sync_extra_files_file_reference_with_content_type( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, ) -> None: """Test sync_extra_files file is uploaded with the content type.""" s3_stub = cfngin_context.add_stubber("s3") @@ -245,7 +245,7 @@ def test_sync_extra_files_file_reference_with_content_type( stub.assert_no_pending_responses() -def test_sync_extra_files_hash_unchanged(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_hash_unchanged(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files upload is skipped if the has was unchanged.""" s3_stub = cfngin_context.add_stubber("s3") ssm_stub = cfngin_context.add_stubber("ssm") @@ -272,7 +272,7 @@ def test_sync_extra_files_hash_unchanged(cfngin_context: MockCFNginContext) -> N ssm_stub.assert_no_pending_responses() -def test_sync_extra_files_hash_updated(cfngin_context: MockCFNginContext) -> None: +def test_sync_extra_files_hash_updated(cfngin_context: MockCfnginContext) -> None: """Test sync_extra_files extra files hash is updated.""" s3_stub = cfngin_context.add_stubber("s3") ssm_stub = cfngin_context.add_stubber("ssm") @@ -306,7 +306,7 @@ def test_sync_extra_files_hash_updated(cfngin_context: MockCFNginContext) -> Non { "Bucket": "bucket", "Key": "test", - "Body": "test".encode(), + "Body": b"test", "ContentType": "text/plain", }, ) diff --git a/tests/unit/cfngin/hooks/staticsite/test_utils.py b/tests/unit/cfngin/hooks/staticsite/test_utils.py index 51096d196..c8829be4a 100644 --- a/tests/unit/cfngin/hooks/staticsite/test_utils.py +++ b/tests/unit/cfngin/hooks/staticsite/test_utils.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Optional, Union, cast +from unittest.mock import Mock, call import igittigitt import pytest -from mock import Mock, call from runway.cfngin.hooks.staticsite.utils import ( calculate_hash_of_files, @@ -39,7 +39,7 @@ def test_calculate_hash_of_files(mocker: MockerFixture, tmp_path: Path) -> None: "directories", [None, [{"path": "./"}], [{"path": "./", "exclusions": ["foobar"]}]] ) def test_get_hash_of_files( - directories: Optional[List[Dict[str, Union[List[str], str]]]], + directories: Optional[list[dict[str, Union[list[str], str]]]], mocker: MockerFixture, tmp_path: Path, ) -> None: @@ -85,7 +85,7 @@ def test_get_hash_of_files( @pytest.mark.parametrize("additional_exclusions", [None, [], ["foo"], ["foo", "bar"]]) def test_get_ignorer( - additional_exclusions: Optional[List[str]], mocker: MockerFixture, tmp_path: Path + additional_exclusions: Optional[list[str]], mocker: MockerFixture, tmp_path: Path ) -> None: """Test get_ignorer.""" ignore_parser = mocker.patch(f"{MODULE}.igittigitt.IgnoreParser") diff --git a/tests/unit/cfngin/hooks/test_acm.py b/tests/unit/cfngin/hooks/test_acm.py index bc7c1a015..2f4563bd3 100644 --- a/tests/unit/cfngin/hooks/test_acm.py +++ b/tests/unit/cfngin/hooks/test_acm.py @@ -1,19 +1,18 @@ """Tests for runway.cfngin.hooks.acm.""" -# pyright: basic, reportUnknownArgumentType=none, reportUnknownVariableType=none +# pyright: reportUnknownArgumentType=none, reportUnknownVariableType=none # pyright: reportUnknownLambdaType=none from __future__ import annotations from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, NoReturn, Union, cast +from typing import TYPE_CHECKING, Any, Literal, NoReturn, cast +from unittest.mock import MagicMock import boto3 import pytest from botocore.exceptions import ClientError from botocore.stub import ANY, Stubber -from mock import MagicMock from troposphere.certificatemanager import Certificate as CertificateResource -from typing_extensions import Literal from runway.cfngin.exceptions import ( StackDoesNotExist, @@ -33,21 +32,24 @@ ChangeTypeDef, ResourceRecordSetTypeDef, ) - from pytest import MonkeyPatch - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext STATUS = MutableMap( - **{ - "failed": FAILED, - "new": SubmittedStatus("creating new stack"), - "no": NO_CHANGE, - "recreate": SubmittedStatus("destroying stack for re-creation"), - "update": SubmittedStatus("updating existing stack"), - } + failed=FAILED, + new=SubmittedStatus("creating new stack"), + no=NO_CHANGE, + recreate=SubmittedStatus("destroying stack for re-creation"), + update=SubmittedStatus("updating existing stack"), ) +@pytest.fixture(autouse=True) +def sub_s3(cfngin_context: MockCfnginContext) -> None: + """Sub s3 for MockCfnginContext as this hook uses a ``cached_property`` that creates it.""" + cfngin_context.add_stubber("s3") + + def check_bool_is_true(val: Any) -> bool: """Check if a value is a true bool.""" if val and isinstance(val, bool): @@ -62,7 +64,7 @@ def check_bool_is_false(val: Any) -> bool: raise ValueError(f'Value should be "False"; got {val}') -def gen_certificate(**kwargs: Any) -> Dict[str, Any]: +def gen_certificate(**kwargs: Any) -> dict[str, Any]: """Generate a response to describe_certificate.""" data = { "CertificateArn": kwargs.pop("CertificateArn"), @@ -80,7 +82,7 @@ def gen_change( return {"Action": action, "ResourceRecordSet": record_set} -def gen_change_batch(changes: Any = ANY, comment: Any = ANY) -> Dict[str, Any]: +def gen_change_batch(changes: Any = ANY, comment: Any = ANY) -> dict[str, Any]: """Generate expected change batch.""" return {"Comment": comment, "Changes": changes} @@ -118,9 +120,9 @@ def gen_domain_validation_option(**kwargs: Any) -> DomainValidationTypeDef: def gen_record_set( use_resource_record: bool = False, **kwargs: Any -) -> Union[ResourceRecordSetTypeDef, ResourceRecordTypeDef]: +) -> ResourceRecordSetTypeDef | ResourceRecordTypeDef: """Generate a record set.""" - data: Dict[str, Any] = { + data: dict[str, Any] = { "Name": "placeholder_name", "Type": "CNAME", "Value": "placeholder_value", @@ -151,11 +153,11 @@ def gen_stack_resource(**kwargs: Any) -> StackResourceTypeDef: class TestCertificate: """Tests for runway.cfngin.hooks.acm.Certificate.""" - def test_attributes(self, cfngin_context: MockCFNginContext) -> None: + def test_attributes(self, cfngin_context: MockCfnginContext) -> None: """Test attributes set during __init__.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" result = Certificate( @@ -176,6 +178,7 @@ def test_attributes(self, cfngin_context: MockCFNginContext) -> None: assert result.properties["ValidationMethod"] == "DNS" # blueprint attributes + assert result.blueprint assert result.blueprint.VARIABLES["DomainName"] assert result.blueprint.VARIABLES["ValidateRecordTTL"] @@ -194,14 +197,15 @@ def test_attributes(self, cfngin_context: MockCFNginContext) -> None: assert not template.transform # stack attributes + assert result.stack assert result.stack.fqn == "test-stack-name" - assert result.stack._blueprint == result.blueprint + assert result.stack.blueprint == result.blueprint # type: ignore - def test_domain_changed(self, cfngin_context: MockCFNginContext) -> None: + def test_domain_changed(self, cfngin_context: MockCfnginContext) -> None: """Test for domain_changed.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" provider = MagicMock() @@ -241,11 +245,13 @@ def test_domain_changed(self, cfngin_context: MockCFNginContext) -> None: assert not cert.domain_changed() assert not cert.domain_changed() - def test_get_certificate(self, cfngin_context: MockCFNginContext, patch_time: None) -> None: + def test_get_certificate( + self, cfngin_context: MockCfnginContext, mock_sleep: None # noqa: ARG002 + ) -> None: """Test get_certificate.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" provider = MagicMock(cloudformation=boto3.client("cloudformation")) @@ -285,15 +291,15 @@ def test_get_certificate(self, cfngin_context: MockCFNginContext, patch_time: No @pytest.mark.parametrize("status", ["PENDING_VALIDATION", "SUCCESS", "FAILED"]) def test_get_validation_record( self, - cfngin_context: MockCFNginContext, - monkeypatch: MonkeyPatch, - patch_time: None, + cfngin_context: MockCfnginContext, + monkeypatch: pytest.MonkeyPatch, + mock_sleep: None, # noqa: ARG002 status: str, ) -> None: """Test get_validation_record.""" # setup context - acm_stubber = cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + acm_stubber = cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -345,13 +351,13 @@ def test_get_validation_record( ("FAILED", "SUCCESS"), ], ) - def test_get_validation_record_status_mismatch( - self, cfngin_context: MockCFNginContext, check: str, found: str + def test_get_validation_record_status_missmatch( + self, cfngin_context: MockCfnginContext, check: str, found: str ) -> None: - """Test get get_validation_record with a mismatched record status.""" + """Test get get_validation_record with a missmatched record status.""" # setup context - acm_stubber = cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + acm_stubber = cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -373,17 +379,15 @@ def test_get_validation_record_status_mismatch( expected_request, ) - with acm_stubber, pytest.raises(ValueError) as excinfo: + with acm_stubber, pytest.raises(ValueError, match="No validations with status"): cert.get_validation_record(cert_arn=cert_arn, status=check) - - assert "No validations with status" in str(excinfo.value) acm_stubber.assert_no_pending_responses() - def test_get_validation_record_gt_one(self, cfngin_context: MockCFNginContext) -> None: + def test_get_validation_record_gt_one(self, cfngin_context: MockCfnginContext) -> None: """Test get get_validation_record more than one result.""" # setup context - acm_stubber = cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + acm_stubber = cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -408,17 +412,18 @@ def test_get_validation_record_gt_one(self, cfngin_context: MockCFNginContext) - expected_request, ) - with acm_stubber, pytest.raises(ValueError) as excinfo: + with ( + acm_stubber, + pytest.raises(ValueError, match="only one option is supported"), + ): cert.get_validation_record(cert_arn=cert_arn) - - assert "only one option is supported" in str(excinfo.value) acm_stubber.assert_no_pending_responses() - def test_put_record_set(self, cfngin_context: MockCFNginContext) -> None: + def test_put_record_set(self, cfngin_context: MockCfnginContext) -> None: """Test put_record.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - r53_stubber = cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + r53_stubber = cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -451,12 +456,12 @@ def test_put_record_set(self, cfngin_context: MockCFNginContext) -> None: r53_stubber.assert_no_pending_responses() def test_remove_validation_records( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test remove_validation_records.""" # setup context - acm_stubber = cfngin_context.add_stubber("acm", "us-east-1") - r53_stubber = cfngin_context.add_stubber("route53", "us-east-1") + acm_stubber = cfngin_context.add_stubber("acm", region="us-east-1") + r53_stubber = cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -510,19 +515,22 @@ def test_remove_validation_records( }, ) - with acm_stubber, r53_stubber, pytest.raises(ValueError) as excinfo: + with ( # noqa: PT012 + acm_stubber, + r53_stubber, + pytest.raises(ValueError, match="Must provide one of more record sets"), + ): assert not cert.remove_validation_records() cert.remove_validation_records() acm_stubber.assert_no_pending_responses() r53_stubber.assert_no_pending_responses() - assert str(excinfo.value) == "Must provide one of more record sets" - def test_update_record_set(self, cfngin_context: MockCFNginContext) -> None: + def test_update_record_set(self, cfngin_context: MockCfnginContext) -> None: """Test update_record_set.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - r53_stubber = cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + r53_stubber = cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -555,11 +563,13 @@ def test_update_record_set(self, cfngin_context: MockCFNginContext) -> None: assert not cert.update_record_set(cast("ResourceRecordTypeDef", gen_record_set())) r53_stubber.assert_no_pending_responses() - def test_deploy(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch) -> None: + def test_deploy( + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch + ) -> None: """Test deploy.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -584,17 +594,17 @@ def test_deploy(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatc "put_record_set", lambda x: None if x == "get_validation_record" else ValueError, ) - monkeypatch.setattr(cert, "_wait_for_stack", lambda x, last_status: None) + monkeypatch.setattr(cert, "_wait_for_stack", lambda _, last_status: None) # noqa: ARG005 assert cert.deploy() == expected def test_deploy_update( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test deploy update stack.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -621,17 +631,17 @@ def test_deploy_update( "update_record_set", lambda x: None if x == "get_validation_record" else ValueError, ) - monkeypatch.setattr(cert, "_wait_for_stack", lambda x, last_status: None) + monkeypatch.setattr(cert, "_wait_for_stack", lambda _, last_status: None) # noqa: ARG005 assert cert.deploy() == expected def test_deploy_no_change( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test deploy no change.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -649,11 +659,13 @@ def test_deploy_no_change( assert cert.deploy() == expected - def test_deploy_recreate(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch): + def test_deploy_recreate( + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch + ) -> None: """Test deploy with stack recreation.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -685,12 +697,12 @@ def test_deploy_recreate(self, cfngin_context: MockCFNginContext, monkeypatch: M assert cert.deploy() == expected def test_deploy_domain_changed( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test deploy domain changed.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -704,12 +716,12 @@ def test_deploy_domain_changed( assert not cert.deploy() def test_deploy_error_destroy( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test deploy with errors that result in destroy being called.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert_arn = "arn:aws:acm:us-east-1:012345678901:certificate/test" @@ -740,24 +752,30 @@ def test_deploy_error_destroy( ] ), ) - monkeypatch.setattr(cert, "destroy", lambda records, skip_r53: check_bool_is_true(skip_r53)) + monkeypatch.setattr( + cert, + "destroy", + lambda records, skip_r53: check_bool_is_true(skip_r53), # noqa: ARG005 + ) monkeypatch.setattr(cert, "_wait_for_stack", MagicMock(side_effect=StackFailed("test"))) assert not cert.deploy() # cert.r53_client.exceptions.InvalidChangeBatch assert not cert.deploy() # cert.r53_client.exceptions.NoSuchHostedZone monkeypatch.setattr( - cert, "destroy", lambda records, skip_r53: check_bool_is_false(skip_r53) + cert, + "destroy", + lambda records, skip_r53: check_bool_is_false(skip_r53), # noqa: ARG005 ) assert not cert.deploy() # StackFailed def test_deploy_error_no_destroy( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test deploy with errors that don't result in destroy being called.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -775,11 +793,13 @@ def test_deploy_error_no_destroy( assert not cert.deploy() - def test_destroy(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch) -> None: + def test_destroy( + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch + ) -> None: """Test destroy.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -790,19 +810,19 @@ def test_destroy(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPat ) # should only be called once monkeypatch.setattr(cert, "remove_validation_records", MagicMock(return_value=None)) - monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) + monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) # noqa: ARG005 assert cert.destroy() assert cert.destroy(skip_r53=True) assert cert.remove_validation_records.call_count == 1 # type: ignore def test_destroy_aws_errors( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy with errors from AWS.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( @@ -823,19 +843,19 @@ def test_destroy_aws_errors( ] ), ) - monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) + monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) # noqa: ARG005 assert cert.destroy() assert cert.destroy() assert cert.destroy() def test_destroy_raise_client_error( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test destroy with ClientError raised.""" # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" def build_client_error(msg: str) -> ClientError: @@ -848,10 +868,11 @@ def build_client_error(msg: str) -> ClientError: domain="example.com", hosted_zone_id="test", ) - monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) + monkeypatch.setattr(cert, "destroy_stack", lambda wait: None) # noqa: ARG005 def raise_stack_not_exist(_records: Any) -> NoReturn: """Raise ClientError mimicking stack not existing.""" + assert cert.stack raise build_client_error(f"Stack with id {cert.stack.fqn} does not exist") def raise_other(_records: Any) -> NoReturn: @@ -877,8 +898,8 @@ def raise_other(_records: Any) -> NoReturn: ) def test_stage_methods( self, - cfngin_context: MockCFNginContext, - monkeypatch: MonkeyPatch, + cfngin_context: MockCfnginContext, + monkeypatch: pytest.MonkeyPatch, stage: str, expected: str, ) -> None: @@ -890,8 +911,8 @@ def test_stage_methods( """ # setup context - cfngin_context.add_stubber("acm", "us-east-1") - cfngin_context.add_stubber("route53", "us-east-1") + cfngin_context.add_stubber("acm", region="us-east-1") + cfngin_context.add_stubber("route53", region="us-east-1") cfngin_context.config.namespace = "test" cert = Certificate( diff --git a/tests/unit/cfngin/hooks/test_aws_lambda.py b/tests/unit/cfngin/hooks/test_aws_lambda.py index cd5bb774a..79ae4a2a6 100644 --- a/tests/unit/cfngin/hooks/test_aws_lambda.py +++ b/tests/unit/cfngin/hooks/test_aws_lambda.py @@ -14,13 +14,13 @@ import unittest from io import BytesIO as StringIO from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Optional, Union, cast +from unittest.mock import ANY, MagicMock, patch from zipfile import ZipFile import boto3 import pytest from botocore.exceptions import ClientError -from mock import ANY, MagicMock, patch from moto import mock_s3 from testfixtures.comparison import compare from testfixtures.shouldraise import ShouldRaise @@ -48,7 +48,6 @@ if TYPE_CHECKING: from mypy_boto3_s3.client import S3Client - from pytest import LogCaptureFixture, MonkeyPatch REGION = "us-east-1" ALL_FILES = ( @@ -72,7 +71,7 @@ class TestLambdaHooks(unittest.TestCase): @classmethod def temp_directory_with_files( - cls, files: Union[List[str], Tuple[str, ...]] = ALL_FILES + cls, files: Union[list[str], tuple[str, ...]] = ALL_FILES ) -> TempDirectory: """Create a temp directory with files.""" temp_dict = TempDirectory() @@ -87,7 +86,7 @@ def s3(self) -> S3Client: self._s3 = boto3.client("s3", region_name=REGION) return self._s3 - def assert_s3_zip_file_list(self, bucket: str, key: str, files: List[str]) -> None: + def assert_s3_zip_file_list(self, bucket: str, key: str, files: list[str]) -> None: """Assert s3 zip file list.""" object_info = self.s3.get_object(Bucket=bucket, Key=key) zip_data = StringIO(object_info["Body"].read()) @@ -96,7 +95,7 @@ def assert_s3_zip_file_list(self, bucket: str, key: str, files: List[str]) -> No with ZipFile(zip_data, "r") as zip_file: for zip_info in zip_file.infolist(): perms = (zip_info.external_attr & ZIP_PERMS_MASK) >> 16 - self.assertIn(perms, (0o755, 0o644), "ZIP member permission must be 755 or 644") + assert perms in (493, 420), "ZIP member permission must be 755 or 644" found_files.add(zip_info.filename) compare(found_files, set(files)) @@ -118,7 +117,7 @@ def setUp(self) -> None: ) self.provider = mock_provider(region="us-east-1") - def run_hook(self, **kwargs: Any) -> Dict[Any, Any]: + def run_hook(self, **kwargs: Any) -> dict[Any, Any]: """Run hook.""" real_kwargs = { "context": self.context, @@ -131,14 +130,14 @@ def run_hook(self, **kwargs: Any) -> Dict[Any, Any]: @mock_s3 def test_bucket_default(self) -> None: """Test bucket default.""" - self.assertIsNotNone(self.run_hook(functions={})) + assert self.run_hook(functions={}) is not None self.assert_s3_bucket("test") @mock_s3 def test_bucket_custom(self) -> None: """Test bucket custom.""" - self.assertIsNotNone(self.run_hook(bucket="custom", functions={})) + assert self.run_hook(bucket="custom", functions={}) is not None self.assert_s3_bucket("test", present=False) self.assert_s3_bucket("custom") @@ -152,12 +151,12 @@ def test_prefix(self) -> None: functions={"MyFunction": {"path": temp_dir.path + "/f1"}}, ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list(code.S3Bucket, code.S3Key, F1_FILES) - self.assertTrue(code.S3Key.startswith("cloudformation-custom-resources/lambda-MyFunction-")) + assert code.S3Key.startswith("cloudformation-custom-resources/lambda-MyFunction-") @mock_s3 def test_prefix_missing(self) -> None: @@ -165,12 +164,12 @@ def test_prefix_missing(self) -> None: with self.temp_directory_with_files() as temp_dir: results = self.run_hook(functions={"MyFunction": {"path": temp_dir.path + "/f1"}}) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list(code.S3Bucket, code.S3Key, F1_FILES) - self.assertTrue(code.S3Key.startswith("lambda-MyFunction-")) + assert code.S3Key.startswith("lambda-MyFunction-") @mock_s3 def test_path_missing(self) -> None: @@ -191,10 +190,10 @@ def test_path_relative(self) -> None: ), ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list(code.S3Bucket, code.S3Key, ["test.py"]) @mock_s3 @@ -213,10 +212,10 @@ def test_path_home_relative(self) -> None: results = self.run_hook(functions={"MyFunction": {"path": test_path}}) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list(code.S3Bucket, code.S3Key, ["test.py"]) @mock_s3 @@ -230,20 +229,20 @@ def test_multiple_functions(self) -> None: } ) - self.assertIsNotNone(results) + assert results is not None f1_code = results.get("MyFunction") - self.assertIsInstance(f1_code, Code) + assert isinstance(f1_code, Code) self.assert_s3_zip_file_list(f1_code.S3Bucket, f1_code.S3Key, F1_FILES) f2_code = results.get("OtherFunction") - self.assertIsInstance(f2_code, Code) + assert isinstance(f2_code, Code) self.assert_s3_zip_file_list(f2_code.S3Bucket, f2_code.S3Key, F2_FILES) @mock_s3 def test_patterns_invalid(self) -> None: """Test patterns invalid.""" - msg = "Invalid file patterns in key 'include': must be a string or " "list of strings" + msg = "Invalid file patterns in key 'include': must be a string or list of strings" with ShouldRaise(ValueError(msg)): self.run_hook( @@ -263,10 +262,10 @@ def test_patterns_include(self) -> None: } ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list( code.S3Bucket, code.S3Key, @@ -292,10 +291,10 @@ def test_patterns_exclude(self) -> None: } ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list( code.S3Bucket, code.S3Key, ["f1.py", "__init__.py", "test2/test.txt"] ) @@ -314,10 +313,10 @@ def test_patterns_include_exclude(self) -> None: } ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list(code.S3Bucket, code.S3Key, ["f1.py", "__init__.py"]) @mock_s3 @@ -333,7 +332,7 @@ def test_patterns_exclude_all(self) -> None: functions={"MyFunction": {"path": temp_dir.path + "/f1", "exclude": ["**"]}} ) - self.assertIsNone(results) + assert results is None @mock_s3 def test_idempotence(self) -> None: @@ -348,10 +347,10 @@ def test_idempotence(self) -> None: previous = None for _ in range(2): results = self.run_hook(bucket=bucket_name, functions=functions) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) if not previous: previous = code.S3Key @@ -360,7 +359,7 @@ def test_idempotence(self) -> None: compare( previous, code.S3Key, - prefix="zipfile name should not be modified in " "repeated runs.", + prefix="zipfile name should not be modified in repeated runs.", ) def test_calculate_hash(self) -> None: @@ -375,13 +374,13 @@ def test_calculate_hash(self) -> None: with self.temp_directory_with_files() as temp_dir3: root = cast(str, temp_dir3.path) - with open(os.path.join(root, ALL_FILES[0]), "w", encoding="utf-8") as _file: + with (Path(root) / ALL_FILES[0]).open("w") as _file: _file.write("modified file data") hash3 = _calculate_hash(ALL_FILES, root) - self.assertEqual(hash1, hash2) - self.assertNotEqual(hash1, hash3) - self.assertNotEqual(hash2, hash3) + assert hash1 == hash2 + assert hash1 != hash3 + assert hash2 != hash3 def test_calculate_hash_diff_filename_same_contents(self) -> None: """Test calculate hash diff filename same contents.""" @@ -393,7 +392,7 @@ def test_calculate_hash_diff_filename_same_contents(self) -> None: temp_dir.write(file_name, b"data") hash1 = _calculate_hash([file1], root) hash2 = _calculate_hash([file2], root) - self.assertNotEqual(hash1, hash2) + assert hash1 != hash2 def test_calculate_hash_different_ordering(self) -> None: """Test calculate hash different ordering.""" @@ -409,11 +408,11 @@ def test_calculate_hash_different_ordering(self) -> None: temp_dir2.write(file_name, b"") hash1 = _calculate_hash(files1, root1) hash2 = _calculate_hash(files2, root2) - self.assertEqual(hash1, hash2) + assert hash1 == hash2 def test_select_bucket_region(self) -> None: """Test select bucket region.""" - tests: Tuple[Tuple[Tuple[Optional[str], Optional[str], Optional[str], str], str], ...] = ( + tests: tuple[tuple[tuple[Optional[str], Optional[str], Optional[str], str], str], ...] = ( (("myBucket", "us-east-1", "us-west-1", "eu-west-1"), "us-east-1"), (("myBucket", None, "us-west-1", "eu-west-1"), "eu-west-1"), ((None, "us-east-1", "us-west-1", "eu-west-1"), "us-west-1"), @@ -421,7 +420,7 @@ def test_select_bucket_region(self) -> None: ) for args, result in tests: - self.assertEqual(select_bucket_region(*args), result) # type: ignore + assert select_bucket_region(*args) == result # type: ignore @mock_s3 def test_follow_symlink_nonbool(self) -> None: @@ -441,10 +440,10 @@ def test_follow_symlink_true(self) -> None: results = self.run_hook( follow_symlinks=True, functions={"MyFunction": {"path": root2}} ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list( code.S3Bucket, code.S3Key, @@ -478,10 +477,10 @@ def test_follow_symlink_false(self) -> None: results = self.run_hook( follow_symlinks=False, functions={"MyFunction": {"path": root2}} ) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list( code.S3Bucket, code.S3Key, @@ -506,10 +505,10 @@ def test_follow_symlink_omitted(self) -> None: root2 = temp_dir2.path os.symlink(root1 + "/f1", root2 + "/f3") results = self.run_hook(functions={"MyFunction": {"path": root2}}) - self.assertIsNotNone(results) + assert results is not None code = results.get("MyFunction") - self.assertIsInstance(code, Code) + assert isinstance(code, Code) self.assert_s3_zip_file_list( code.S3Bucket, code.S3Key, @@ -581,9 +580,9 @@ class TestDockerizePip: { "Target": "/var/task", "Source": ( - os.getcwd().replace("\\", "/") + str(Path.cwd()).replace("\\", "/") if platform.system() == "Windows" - else os.getcwd() + else str(Path.cwd()) ), "Type": "bind", "ReadOnly": False, @@ -596,7 +595,7 @@ def test_with_docker_file(self) -> None: client = make_fake_client() with TempDirectory() as tmp_dir: docker_file = tmp_dir.write("Dockerfile", b"") - dockerized_pip(os.getcwd(), client=client, docker_file=docker_file) + dockerized_pip(str(Path.cwd()), client=client, docker_file=docker_file) client.api.build.assert_called_with( path=tmp_dir.path, dockerfile="Dockerfile", forcerm=True @@ -617,7 +616,7 @@ def test_with_docker_image(self) -> None: """Test with docker_image provided.""" client = make_fake_client() image = "alpine" - dockerized_pip(os.getcwd(), client=client, docker_image=image) + dockerized_pip(str(Path.cwd()), client=client, docker_image=image) client.api.create_container.assert_called_with( detach=True, image=image, command=self.command, host_config=self.host_config @@ -632,7 +631,7 @@ def test_with_runtime(self) -> None: """Test with runtime provided.""" client = make_fake_client() runtime = "python3.8" - dockerized_pip(os.getcwd(), client=client, runtime=runtime) + dockerized_pip(str(Path.cwd()), client=client, runtime=runtime) client.api.create_container.assert_called_with( detach=True, @@ -651,7 +650,7 @@ def test_raises_invalid_config(self) -> None: client = make_fake_client() with pytest.raises(InvalidDockerizePipConfiguration): dockerized_pip( - os.getcwd(), + str(Path.cwd()), client=client, docker_file="docker_file", docker_image="docker_image", @@ -659,50 +658,44 @@ def test_raises_invalid_config(self) -> None: ) with pytest.raises(InvalidDockerizePipConfiguration): dockerized_pip( - os.getcwd(), + str(Path.cwd()), client=client, docker_file="docker_file", docker_image="docker_image", ) with pytest.raises(InvalidDockerizePipConfiguration): - dockerized_pip(os.getcwd(), client=client, docker_file="docker_file", runtime="runtime") + dockerized_pip( + str(Path.cwd()), client=client, docker_file="docker_file", runtime="runtime" + ) with pytest.raises(InvalidDockerizePipConfiguration): dockerized_pip( - os.getcwd(), + str(Path.cwd()), client=client, docker_image="docker_image", runtime="runtime", ) with pytest.raises(InvalidDockerizePipConfiguration): - dockerized_pip(os.getcwd(), client=client) + dockerized_pip(str(Path.cwd()), client=client) def test_raises_value_error_missing_dockerfile(self) -> None: """ValueError raised when provided Dockerfile is not found.""" client = make_fake_client() - with pytest.raises(ValueError) as excinfo: - dockerized_pip(os.getcwd(), client=client, docker_file="not-a-Dockerfile") - assert "docker_file" in str(excinfo.value) + with pytest.raises(ValueError, match=".*docker_file.*"): + dockerized_pip(str(Path.cwd()), client=client, docker_file="not-a-Dockerfile") def test_raises_value_error_runtime(self) -> None: """ValueError raised if runtime provided is not supported.""" client = make_fake_client() - with pytest.raises(ValueError) as excinfo: - dockerized_pip(os.getcwd(), client=client, runtime="node") - assert "node" in str(excinfo.value) + with pytest.raises(ValueError, match=".*node.*"): + dockerized_pip(str(Path.cwd()), client=client, runtime="node") class TestHandleRequirements: """Test handle_requirements.""" - PIPFILE = "\n".join( - [ - "[[source]]", - 'url = "https://pypi.org/simple"', - "verify_ssl = true", - 'name = "pypi"', - "[packages]", - "[dev-packages]", - ] + PIPFILE = ( + '[[source]]\nurl = "https://pypi.org/simple"\nverify_ssl = true\nname = "pypi"\n' + "[packages]\n[dev-packages]" ) REQUIREMENTS = "-i https://pypi.org/simple\n\n" @@ -715,11 +708,13 @@ def test_default(self) -> None: req_path = handle_requirements( package_root=cast(str, tmp_dir.path), dest_path=cast(str, tmp_dir.path), - requirements=cast(Dict[str, bool], find_requirements(cast(str, tmp_dir.path))), + requirements=cast(dict[str, bool], find_requirements(cast(str, tmp_dir.path))), ) - assert req_path == os.path.join(cast(str, tmp_dir.path), "requirements.txt") - assert not os.path.isfile(os.path.join(cast(str, tmp_dir.path), "Pipfile.lock")) + assert req_path == os.path.join( # noqa: PTH118 + cast(str, tmp_dir.path), "requirements.txt" + ) + assert not (Path(cast(str, tmp_dir.path)) / "Pipfile.lock").is_file() assert tmp_dir.read("requirements.txt") == expected def test_explicit_pipenv(self, tmp_path: Path) -> None: @@ -732,7 +727,7 @@ def test_explicit_pipenv(self, tmp_path: Path) -> None: req_path = handle_requirements( package_root=str(tmp_path), dest_path=str(tmp_path), - requirements=cast(Dict[str, bool], find_requirements(str(tmp_path))), + requirements=cast(dict[str, bool], find_requirements(str(tmp_path))), use_pipenv=True, ) assert req_path == str(requirements_txt) @@ -756,7 +751,7 @@ def test_explicit_pipenv(self, tmp_path: Path) -> None: assert requirements_txt.read_text() == "\n".join(expected_text) + "\n" def test_frozen_pipenv( - self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch, tmp_path: Path + self, caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: """Test use pipenv from Pyinstaller build.""" caplog.set_level(logging.ERROR, logger="runway.cfngin.hooks.aws_lambda") @@ -773,7 +768,7 @@ def test_frozen_pipenv( }, ) assert excinfo.value.code == 1 - assert ["pipenv can only be used with python installed from PyPi"] == caplog.messages + assert caplog.messages == ["pipenv can only be used with python installed from PyPi"] def test_implicit_pipenv(self, tmp_path: Path) -> None: """Test implicit use of pipenv.""" @@ -784,7 +779,7 @@ def test_implicit_pipenv(self, tmp_path: Path) -> None: req_path = handle_requirements( package_root=str(tmp_path), dest_path=str(tmp_path), - requirements=cast(Dict[str, bool], find_requirements(str(tmp_path))), + requirements=cast(dict[str, bool], find_requirements(str(tmp_path))), use_pipenv=True, ) assert req_path == str(requirements_txt) @@ -809,17 +804,16 @@ def test_implicit_pipenv(self, tmp_path: Path) -> None: def test_raise_not_implimented(self) -> None: """Test NotImplimentedError is raised when no requirements file.""" - with TempDirectory() as tmp_dir: - with pytest.raises(NotImplementedError): - handle_requirements( - package_root=cast(str, tmp_dir.path), - dest_path=cast(str, tmp_dir.path), - requirements={ - "requirements.txt": False, - "Pipfile": False, - "Pipfile.lock": False, - }, - ) + with TempDirectory() as tmp_dir, pytest.raises(NotImplementedError): + handle_requirements( + package_root=cast(str, tmp_dir.path), + dest_path=cast(str, tmp_dir.path), + requirements={ + "requirements.txt": False, + "Pipfile": False, + "Pipfile.lock": False, + }, + ) class TestShouldUseDocker: diff --git a/tests/unit/cfngin/hooks/test_base.py b/tests/unit/cfngin/hooks/test_base.py index f949137a5..f4047077d 100644 --- a/tests/unit/cfngin/hooks/test_base.py +++ b/tests/unit/cfngin/hooks/test_base.py @@ -5,9 +5,9 @@ import logging from typing import TYPE_CHECKING +from unittest.mock import MagicMock, call, patch import pytest -from mock import MagicMock, call, patch from runway.cfngin.exceptions import StackFailed from runway.cfngin.hooks.base import Hook, HookDeployAction, HookDestroyAction @@ -21,9 +21,8 @@ ) if TYPE_CHECKING: - from pytest import LogCaptureFixture, MonkeyPatch - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext COMPLETE_W_REASON = CompleteStatus("test successful") @@ -31,7 +30,7 @@ class TestHook: """Tests for runway.cfngin.hooks.base.Hook.""" - def test_attributes(self, cfngin_context: MockCFNginContext) -> None: + def test_attributes(self, cfngin_context: MockCfnginContext) -> None: """Test attributes set during __init__.""" provider = MagicMock() args = {"tags": {"key": "val"}} @@ -45,18 +44,18 @@ def test_attributes(self, cfngin_context: MockCFNginContext) -> None: assert not result.stack assert result.stack_name == "stack" - def test_tags(self, cfngin_context: MockCFNginContext) -> None: + def test_tags(self, cfngin_context: MockCfnginContext) -> None: """Test tags property.""" cfngin_context.config.tags = {"context_tag": "val"} - hook = Hook(cfngin_context, MagicMock(), **{"tags": {"arg_tag": "val"}}) + hook = Hook(cfngin_context, MagicMock(), tags={"arg_tag": "val"}) assert hook.tags.to_dict() == [ {"Key": "arg_tag", "Value": "val"}, {"Key": "context_tag", "Value": "val"}, ] - def test_get_template_description(self, cfngin_context: MockCFNginContext) -> None: + def test_get_template_description(self, cfngin_context: MockCfnginContext) -> None: """Test for get_template_description.""" hook = Hook(cfngin_context, MagicMock()) @@ -70,7 +69,7 @@ def test_get_template_description(self, cfngin_context: MockCFNginContext) -> No MagicMock(return_value=COMPLETE), ) def test_deploy_stack( - self, cfngin_context: MockCFNginContext, caplog: LogCaptureFixture + self, cfngin_context: MockCfnginContext, caplog: pytest.LogCaptureFixture ) -> None: """Test for deploy_stack.""" hook = Hook(cfngin_context, MagicMock()) @@ -87,7 +86,7 @@ def test_deploy_stack( MagicMock(side_effect=[SUBMITTED, COMPLETE]), ) def test_deploy_stack_wait( - self, cfngin_context: MockCFNginContext, caplog: LogCaptureFixture + self, cfngin_context: MockCfnginContext, caplog: pytest.LogCaptureFixture ) -> None: """Test for deploy_stack with wait.""" hook = Hook(cfngin_context, MagicMock()) @@ -106,7 +105,7 @@ def test_deploy_stack_wait( MagicMock(side_effect=[SKIPPED]), ) def test_deploy_stack_wait_skipped( - self, cfngin_context: MockCFNginContext, caplog: LogCaptureFixture + self, cfngin_context: MockCfnginContext, caplog: pytest.LogCaptureFixture ) -> None: """Test for deploy_stack with wait and skip.""" hook = Hook(cfngin_context, MagicMock()) @@ -119,7 +118,7 @@ def test_deploy_stack_wait_skipped( assert caplog.records[0].message == f"{stack.name}:{SKIPPED.name}" @patch("runway.cfngin.hooks.base.HookDeployAction.run", MagicMock(side_effect=[FAILED])) - def test_deploy_stack_wait_failed(self, cfngin_context: MockCFNginContext) -> None: + def test_deploy_stack_wait_failed(self, cfngin_context: MockCfnginContext) -> None: """Test for deploy_stack with wait and skip.""" hook = Hook(cfngin_context, MagicMock()) stack = MagicMock() @@ -133,7 +132,7 @@ def test_deploy_stack_wait_failed(self, cfngin_context: MockCFNginContext) -> No MagicMock(side_effect=[SUBMITTED, COMPLETE_W_REASON]), ) def test_destroy_stack( - self, cfngin_context: MockCFNginContext, caplog: LogCaptureFixture + self, cfngin_context: MockCfnginContext, caplog: pytest.LogCaptureFixture ) -> None: """Test for destroy_stack with wait.""" hook = Hook(cfngin_context, MagicMock()) @@ -150,7 +149,7 @@ def test_destroy_stack( == f"{stack.name}:{COMPLETE_W_REASON.name} ({COMPLETE_W_REASON.reason})" ) - def test_wait_for_stack_till_reason(self, cfngin_context: MockCFNginContext) -> None: + def test_wait_for_stack_till_reason(self, cfngin_context: MockCfnginContext) -> None: """Test _wait_for_stack till_reason option.""" hook = Hook(cfngin_context, MagicMock()) stack = MagicMock(fqn="test-stack", name="stack") @@ -167,7 +166,7 @@ def test_wait_for_stack_till_reason(self, cfngin_context: MockCFNginContext) -> assert result.reason == "catch" def test_wait_for_stack_log_change( - self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch + self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch ) -> None: """Test _wait_for_stack log status change.""" hook = Hook(cfngin_context, MagicMock()) @@ -189,28 +188,28 @@ def test_wait_for_stack_log_change( mock_log.assert_has_calls([call(stack, new_status), call(stack, COMPLETE)]) assert mock_log.call_count == 2 - def test_post_deploy(self, cfngin_context: MockCFNginContext) -> None: + def test_post_deploy(self, cfngin_context: MockCfnginContext) -> None: """Test post_deploy.""" hook = Hook(cfngin_context, MagicMock()) with pytest.raises(NotImplementedError): hook.post_deploy() - def test_post_destroy(self, cfngin_context: MockCFNginContext) -> None: + def test_post_destroy(self, cfngin_context: MockCfnginContext) -> None: """Test post_destroy.""" hook = Hook(cfngin_context, MagicMock()) with pytest.raises(NotImplementedError): hook.post_destroy() - def test_pre_deploy(self, cfngin_context: MockCFNginContext) -> None: + def test_pre_deploy(self, cfngin_context: MockCfnginContext) -> None: """Test pre_deploy.""" hook = Hook(cfngin_context, MagicMock()) with pytest.raises(NotImplementedError): hook.pre_deploy() - def test_pre_destroy(self, cfngin_context: MockCFNginContext): + def test_pre_destroy(self, cfngin_context: MockCfnginContext) -> None: """Test pre_destroy.""" hook = Hook(cfngin_context, MagicMock()) @@ -221,21 +220,21 @@ def test_pre_destroy(self, cfngin_context: MockCFNginContext): class TestHookDeployAction: """Tests for runway.cfngin.hooks.base.HookDeployAction.""" - def test_provider(self, cfngin_context: MockCFNginContext) -> None: + def test_provider(self, cfngin_context: MockCfnginContext) -> None: """Test provider property.""" provider = MagicMock() obj = HookDeployAction(cfngin_context, provider) assert obj.provider == provider - def test_build_provider(self, cfngin_context: MockCFNginContext) -> None: + def test_build_provider(self, cfngin_context: MockCfnginContext) -> None: """Test build_provider.""" provider = MagicMock() obj = HookDeployAction(cfngin_context, provider) assert obj.build_provider() == provider - def test_run(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch) -> None: + def test_run(self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch) -> None: """Test run.""" obj = HookDeployAction(cfngin_context, MagicMock()) monkeypatch.setattr(obj, "_launch_stack", lambda: "success") @@ -246,7 +245,7 @@ def test_run(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch) class TestHookDestroyAction: """Tests for runway.cfngin.hooks.base.HookDestroyAction.""" - def test_run(self, cfngin_context: MockCFNginContext, monkeypatch: MonkeyPatch) -> None: + def test_run(self, cfngin_context: MockCfnginContext, monkeypatch: pytest.MonkeyPatch) -> None: """Test run.""" obj = HookDestroyAction(cfngin_context, MagicMock()) monkeypatch.setattr(obj, "_destroy_stack", lambda: "success") diff --git a/tests/unit/cfngin/hooks/test_cleanup_s3.py b/tests/unit/cfngin/hooks/test_cleanup_s3.py index 7c5b9422c..b9f9256cb 100644 --- a/tests/unit/cfngin/hooks/test_cleanup_s3.py +++ b/tests/unit/cfngin/hooks/test_cleanup_s3.py @@ -10,10 +10,10 @@ from runway.cfngin.hooks.cleanup_s3 import purge_bucket if TYPE_CHECKING: - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext -def test_purge_bucket(cfngin_context: MockCFNginContext) -> None: +def test_purge_bucket(cfngin_context: MockCfnginContext) -> None: """Test purge_bucket.""" stub = cfngin_context.add_stubber("s3") @@ -24,7 +24,7 @@ def test_purge_bucket(cfngin_context: MockCFNginContext) -> None: stub.assert_no_pending_responses() -def test_purge_bucket_does_not_exist(cfngin_context: MockCFNginContext) -> None: +def test_purge_bucket_does_not_exist(cfngin_context: MockCfnginContext) -> None: """Test purge_bucket Bucket doesn't exist.""" stub = cfngin_context.add_stubber("s3") @@ -34,7 +34,7 @@ def test_purge_bucket_does_not_exist(cfngin_context: MockCFNginContext) -> None: stub.assert_no_pending_responses() -def test_purge_bucket_unhandled_exception(cfngin_context: MockCFNginContext) -> None: +def test_purge_bucket_unhandled_exception(cfngin_context: MockCfnginContext) -> None: """Test purge_bucket with unhandled exception.""" stub = cfngin_context.add_stubber("s3") diff --git a/tests/unit/cfngin/hooks/test_cleanup_ssm.py b/tests/unit/cfngin/hooks/test_cleanup_ssm.py index d62211c7d..9c2a5f5ac 100644 --- a/tests/unit/cfngin/hooks/test_cleanup_ssm.py +++ b/tests/unit/cfngin/hooks/test_cleanup_ssm.py @@ -7,10 +7,10 @@ from runway.cfngin.hooks.cleanup_ssm import delete_param if TYPE_CHECKING: - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext -def test_delete_param(cfngin_context: MockCFNginContext) -> None: +def test_delete_param(cfngin_context: MockCfnginContext) -> None: """Test delete_param.""" stub = cfngin_context.add_stubber("ssm") @@ -19,7 +19,7 @@ def test_delete_param(cfngin_context: MockCFNginContext) -> None: assert delete_param(cfngin_context, parameter_name="foo") -def test_delete_param_not_found(cfngin_context: MockCFNginContext) -> None: +def test_delete_param_not_found(cfngin_context: MockCfnginContext) -> None: """Test delete_param.""" stub = cfngin_context.add_stubber("ssm") diff --git a/tests/unit/cfngin/hooks/test_ecs.py b/tests/unit/cfngin/hooks/test_ecs.py index 4bf886d57..4c6c33deb 100644 --- a/tests/unit/cfngin/hooks/test_ecs.py +++ b/tests/unit/cfngin/hooks/test_ecs.py @@ -1,6 +1,5 @@ """Tests for runway.cfngin.hooks.ecs.""" -# pyright: basic from __future__ import annotations from typing import TYPE_CHECKING @@ -9,15 +8,17 @@ from runway.cfngin.hooks.ecs import create_clusters if TYPE_CHECKING: + import pytest from mypy_boto3_ecs.type_defs import ClusterTypeDef - from pytest import LogCaptureFixture - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext MODULE = "runway.cfngin.hooks.ecs" -def test_create_clusters(caplog: LogCaptureFixture, cfngin_context: MockCFNginContext) -> None: +def test_create_clusters( + caplog: pytest.LogCaptureFixture, cfngin_context: MockCfnginContext +) -> None: """Test create_clusters.""" caplog.set_level(LogLevels.DEBUG, MODULE) stub = cfngin_context.add_stubber("ecs") @@ -39,7 +40,7 @@ def test_create_clusters(caplog: LogCaptureFixture, cfngin_context: MockCFNginCo assert f"creating ECS cluster: {cluster}" in caplog.messages -def test_create_clusters_str(cfngin_context: MockCFNginContext) -> None: +def test_create_clusters_str(cfngin_context: MockCfnginContext) -> None: """Test create_clusters with ``clusters`` provided as str.""" stub = cfngin_context.add_stubber("ecs") cluster_name = "foo" diff --git a/tests/unit/cfngin/hooks/test_iam.py b/tests/unit/cfngin/hooks/test_iam.py index 410cc981c..666cca6f1 100644 --- a/tests/unit/cfngin/hooks/test_iam.py +++ b/tests/unit/cfngin/hooks/test_iam.py @@ -22,13 +22,13 @@ from pytest_mock import MockerFixture - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext CREATE_DATE = datetime(2015, 1, 1) MODULE = "runway.cfngin.hooks.iam" -def test_create_ecs_service_role(cfngin_context: MockCFNginContext) -> None: +def test_create_ecs_service_role(cfngin_context: MockCfnginContext) -> None: """Test create_ecs_service_role.""" stub = cfngin_context.add_stubber("iam") @@ -64,7 +64,7 @@ def test_create_ecs_service_role(cfngin_context: MockCFNginContext) -> None: def test_create_ecs_service_role_already_exists( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, ) -> None: """Test create_ecs_service_role already exists.""" stub = cfngin_context.add_stubber("iam") @@ -86,7 +86,7 @@ def test_create_ecs_service_role_already_exists( def test_create_ecs_service_role_raise_client_error( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, ) -> None: """Test create_ecs_service_role raise ClientError.""" stub = cfngin_context.add_stubber("iam") @@ -99,7 +99,7 @@ def test_create_ecs_service_role_raise_client_error( def test_ensure_server_cert_exists( - cfngin_context: MockCFNginContext, mocker: MockerFixture, tmp_path: Path + cfngin_context: MockCfnginContext, mocker: MockerFixture, tmp_path: Path ) -> None: """Test ensure_server_cert_exists.""" cert_name = "foo" @@ -155,7 +155,7 @@ def test_ensure_server_cert_exists( def test_ensure_server_cert_exists_already_exists( - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, ) -> None: """Test ensure_server_cert_exists already exists.""" cert_name = "foo" @@ -188,7 +188,7 @@ def test_ensure_server_cert_exists_already_exists( def test_ensure_server_cert_exists_no_prompt_no_parameters( - cfngin_context: MockCFNginContext, mocker: MockerFixture + cfngin_context: MockCfnginContext, mocker: MockerFixture ) -> None: """Test ensure_server_cert_exists no prompt, not parameters.""" mocker.patch( @@ -205,7 +205,7 @@ def test_ensure_server_cert_exists_no_prompt_no_parameters( def test_ensure_server_cert_exists_prompt_no( - cfngin_context: MockCFNginContext, mocker: MockerFixture + cfngin_context: MockCfnginContext, mocker: MockerFixture ) -> None: """Test ensure_server_cert_exists prompt input no.""" mocker.patch( diff --git a/tests/unit/cfngin/hooks/test_keypair.py b/tests/unit/cfngin/hooks/test_keypair.py index 5f7e0e198..08212cbef 100644 --- a/tests/unit/cfngin/hooks/test_keypair.py +++ b/tests/unit/cfngin/hooks/test_keypair.py @@ -6,10 +6,10 @@ import os import sys from contextlib import contextmanager -from typing import TYPE_CHECKING, Iterator, NamedTuple, Tuple +from typing import TYPE_CHECKING, NamedTuple +from unittest import mock import boto3 -import mock import pytest from moto import mock_ec2, mock_ssm @@ -18,6 +18,7 @@ from ..factories import mock_context if TYPE_CHECKING: + from collections.abc import Iterator from pathlib import Path from runway.context import CfnginContext @@ -45,7 +46,7 @@ def ssh_key(cfngin_fixtures: Path) -> SSHKey: ) -@pytest.fixture +@pytest.fixture() def context() -> CfnginContext: """Mock context.""" return mock_context(namespace="fake") @@ -62,9 +63,8 @@ def ec2(ssh_key: SSHKey) -> Iterator[None]: "fingerprint": ssh_key.fingerprint, "material": ssh_key.private_key.decode("ascii"), } - with mock.patch("moto.ec2.models.random_key_pair", side_effect=[key_pair]): - with mock_ec2(): - yield + with mock.patch("moto.ec2.models.random_key_pair", side_effect=[key_pair]), mock_ec2(): + yield @pytest.fixture(autouse=True) @@ -75,13 +75,12 @@ def ssm() -> Iterator[None]: @contextmanager -def mock_input(lines: Tuple[str, ...] = (), isatty: bool = True) -> Iterator[mock.MagicMock]: +def mock_input(lines: tuple[str, ...] = (), isatty: bool = True) -> Iterator[mock.MagicMock]: """Mock input.""" with mock.patch( "runway.cfngin.hooks.keypair.get_raw_input", side_effect=lines - ) as mock_get_raw_input: - with mock.patch.object(sys.stdin, "isatty", return_value=isatty): - yield mock_get_raw_input + ) as mock_get_raw_input, mock.patch.object(sys.stdin, "isatty", return_value=isatty): + yield mock_get_raw_input def assert_key_present(hook_result: KeyPairInfo, key_name: str, fingerprint: str) -> None: diff --git a/tests/unit/cfngin/hooks/test_route53.py b/tests/unit/cfngin/hooks/test_route53.py index 78b3e4609..e40a0737a 100644 --- a/tests/unit/cfngin/hooks/test_route53.py +++ b/tests/unit/cfngin/hooks/test_route53.py @@ -9,12 +9,12 @@ if TYPE_CHECKING: from pytest_mock import MockerFixture - from ...factories import MockCFNginContext + from ...factories import MockCfnginContext MODULE = "runway.cfngin.hooks.route53" -def test_create_domain(cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: +def test_create_domain(cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test create_domain.""" domain = "foo" create_route53_zone = mocker.patch(f"{MODULE}.create_route53_zone", return_value="bar") diff --git a/tests/unit/cfngin/hooks/test_utils.py b/tests/unit/cfngin/hooks/test_utils.py index 0d1960ef3..1ba1ee152 100644 --- a/tests/unit/cfngin/hooks/test_utils.py +++ b/tests/unit/cfngin/hooks/test_utils.py @@ -1,14 +1,16 @@ """Tests for runway.cfngin.hooks.utils.""" -# pyright: basic, reportUnknownArgumentType=none, reportUnknownVariableType=none +# pyright: reportUnknownArgumentType=none, reportUnknownVariableType=none from __future__ import annotations import queue import unittest -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any, ClassVar +from unittest.mock import call, patch -from mock import call, patch +import pytest +from runway.cfngin.hooks.base import HookArgsBaseModel from runway.cfngin.hooks.protocols import CfnginHookProtocol from runway.cfngin.hooks.utils import handle_hooks from runway.config.models.cfngin import CfnginHookDefinitionModel @@ -16,7 +18,7 @@ from ..factories import mock_context, mock_provider if TYPE_CHECKING: - from mock import MagicMock + from unittest.mock import MagicMock HOOK_QUEUE = queue.Queue() @@ -33,30 +35,34 @@ def test_empty_hook_stage(self) -> None: """Test empty hook stage.""" hooks = [] handle_hooks("fake", hooks, self.provider, self.context) - self.assertTrue(HOOK_QUEUE.empty()) + assert HOOK_QUEUE.empty() def test_missing_required_hook(self) -> None: """Test missing required hook.""" hooks = [CfnginHookDefinitionModel(path="not.a.real.path", required=True)] - with self.assertRaises(ImportError): + with pytest.raises(ImportError): handle_hooks("missing", hooks, self.provider, self.context) def test_missing_required_hook_method(self) -> None: """Test missing required hook method.""" - with self.assertRaises(AttributeError): - hooks = [CfnginHookDefinitionModel(path="runway.cfngin.hooks.blah", required=True)] - handle_hooks("missing", hooks, self.provider, self.context) + with pytest.raises(AttributeError): + handle_hooks( + "missing", + [CfnginHookDefinitionModel(path="runway.cfngin.hooks.blah", required=True)], + self.provider, + self.context, + ) def test_missing_non_required_hook_method(self) -> None: """Test missing non required hook method.""" hooks = [CfnginHookDefinitionModel(path="runway.cfngin.hooks.blah", required=False)] handle_hooks("missing", hooks, self.provider, self.context) - self.assertTrue(HOOK_QUEUE.empty()) + assert HOOK_QUEUE.empty() def test_default_required_hook(self) -> None: """Test default required hook.""" - hooks = [CfnginHookDefinitionModel(**{"path": "runway.cfngin.hooks.blah"})] - with self.assertRaises(AttributeError): + hooks = [CfnginHookDefinitionModel(path="runway.cfngin.hooks.blah")] + with pytest.raises(AttributeError): handle_hooks("missing", hooks, self.provider, self.context) @patch("runway.cfngin.hooks.utils.load_object_from_string") @@ -79,8 +85,8 @@ def test_valid_hook(self, mock_load: MagicMock) -> None: [call(hooks[0].path, try_reload=True), call(hooks[1].path, try_reload=True)] ) good = HOOK_QUEUE.get_nowait() - self.assertEqual(good["provider"].region, "us-east-1") - with self.assertRaises(queue.Empty): + assert good["provider"].region == "us-east-1" + with pytest.raises(queue.Empty): HOOK_QUEUE.get_nowait() def test_valid_enabled_hook(self) -> None: @@ -94,8 +100,8 @@ def test_valid_enabled_hook(self) -> None: ] handle_hooks("missing", hooks, self.provider, self.context) good = HOOK_QUEUE.get_nowait() - self.assertEqual(good["provider"].region, "us-east-1") - with self.assertRaises(queue.Empty): + assert good["provider"].region == "us-east-1" + with pytest.raises(queue.Empty): HOOK_QUEUE.get_nowait() def test_valid_enabled_false_hook(self) -> None: @@ -108,7 +114,7 @@ def test_valid_enabled_false_hook(self) -> None: ) ] handle_hooks("missing", hooks, self.provider, self.context) - self.assertTrue(HOOK_QUEUE.empty()) + assert HOOK_QUEUE.empty() def test_context_provided_to_hook(self) -> None: """Test context provided to hook.""" @@ -128,7 +134,7 @@ def test_hook_failure(self) -> None: required=True, ) ] - with self.assertRaises(SystemExit): + with pytest.raises(SystemExit): handle_hooks("fail", hooks, self.provider, self.context) hooks = [ CfnginHookDefinitionModel( @@ -136,7 +142,7 @@ def test_hook_failure(self) -> None: required=True, ) ] - with self.assertRaises(Exception): + with pytest.raises(Exception): # noqa: B017, PT011 handle_hooks("fail", hooks, self.provider, self.context) hooks = [ CfnginHookDefinitionModel( @@ -159,9 +165,9 @@ def test_return_data_hook(self) -> None: ] handle_hooks("result", hooks, self.provider, self.context) - self.assertEqual(self.context.hook_data["my_hook_results"]["foo"], "bar") + assert self.context.hook_data["my_hook_results"]["foo"] == "bar" # Verify only the first hook resulted in stored data - self.assertEqual(list(self.context.hook_data.keys()), ["my_hook_results"]) + assert list(self.context.hook_data.keys()) == ["my_hook_results"] def test_return_data_hook_duplicate_key(self) -> None: """Test return data hook duplicate key.""" @@ -176,7 +182,7 @@ def test_return_data_hook_duplicate_key(self) -> None: ), ] - with self.assertRaises(KeyError): + with pytest.raises(KeyError): handle_hooks("result", hooks, self.provider, self.context) def test_resolve_lookups_in_args(self) -> None: @@ -190,63 +196,63 @@ def test_resolve_lookups_in_args(self) -> None: ] handle_hooks("lookups", hooks, self.provider, self.context) - self.assertEqual( - self.context.hook_data["my_hook_results"]["default_lookup"], "default_value" - ) + assert self.context.hook_data["my_hook_results"]["default_lookup"] == "default_value" class MockHook(CfnginHookProtocol): """Mock hook class.""" - args: Dict[str, Any] + ARGS_PARSER: ClassVar[type[HookArgsBaseModel]] = HookArgsBaseModel + + args: dict[str, Any] - def __init__(self, **kwargs: Any) -> None: + def __init__(self, **_kwargs: Any) -> None: """Instantiate class.""" - self.args = {} + self.args = {} # type: ignore - def post_deploy(self) -> Dict[str, str]: + def post_deploy(self) -> dict[str, str]: """Run during the **post_deploy** stage.""" return {"status": "success"} - def post_destroy(self) -> Dict[str, str]: + def post_destroy(self) -> dict[str, str]: """Run during the **post_destroy** stage.""" return {"status": "success"} - def pre_deploy(self) -> Dict[str, str]: + def pre_deploy(self) -> dict[str, str]: """Run during the **pre_deploy** stage.""" return {"status": "success"} - def pre_destroy(self) -> Dict[str, str]: + def pre_destroy(self) -> dict[str, str]: """Run during the **pre_destroy** stage.""" return {"status": "success"} -def mock_hook(*args: Any, **kwargs: Any) -> bool: +def mock_hook(*_args: Any, **kwargs: Any) -> bool: """Mock hook.""" HOOK_QUEUE.put(kwargs) return True -def fail_hook(*args: Any, **kwargs: Any) -> None: +def fail_hook(*_args: Any, **_kwargs: Any) -> None: """Fail hook.""" - return None + return -def exception_hook(*args: Any, **kwargs: Any) -> None: +def exception_hook(*_args: Any, **_kwargs: Any) -> None: """Exception hook.""" raise Exception -def context_hook(*args: Any, **kwargs: Any) -> bool: +def context_hook(*_args: Any, **kwargs: Any) -> bool: """Context hook.""" return "context" in kwargs -def result_hook(*args: Any, **kwargs: Any) -> Dict[str, str]: +def result_hook(*_args: Any, **_kwargs: Any) -> dict[str, str]: """Results hook.""" return {"foo": "bar"} -def kwargs_hook(*args: Any, **kwargs: Any) -> Any: +def kwargs_hook(*_args: Any, **kwargs: Any) -> Any: """Kwargs hook.""" return kwargs diff --git a/tests/unit/cfngin/lookups/handlers/test_ami.py b/tests/unit/cfngin/lookups/handlers/test_ami.py index 8e72ae8c8..86bc853da 100644 --- a/tests/unit/cfngin/lookups/handlers/test_ami.py +++ b/tests/unit/cfngin/lookups/handlers/test_ami.py @@ -10,7 +10,7 @@ from runway.cfngin.lookups.handlers.ami import AmiLookup, ImageNotFound if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext REGION = "us-east-1" @@ -18,7 +18,7 @@ class TestAMILookup: """Tests for runway.cfngin.lookups.handlers.ami.AmiLookup.""" - def test_basic_lookup_single_image(self, cfngin_context: MockCFNginContext) -> None: + def test_basic_lookup_single_image(self, cfngin_context: MockCfnginContext) -> None: """Test basic lookup single image.""" executable_users = ["123456789012", "234567890123"] stubber = cfngin_context.add_stubber("ec2") @@ -55,7 +55,7 @@ def test_basic_lookup_single_image(self, cfngin_context: MockCFNginContext) -> N == image_id ) - def test_basic_lookup_with_region(self, cfngin_context: MockCFNginContext) -> None: + def test_basic_lookup_with_region(self, cfngin_context: MockCfnginContext) -> None: """Test basic lookup with region.""" stubber = cfngin_context.add_stubber("ec2", region="us-west-1") image_id = "ami-fffccc111" @@ -86,7 +86,7 @@ def test_basic_lookup_with_region(self, cfngin_context: MockCFNginContext) -> No == image_id ) - def test_basic_lookup_multiple_images(self, cfngin_context: MockCFNginContext) -> None: + def test_basic_lookup_multiple_images(self, cfngin_context: MockCfnginContext) -> None: """Test basic lookup multiple images.""" stubber = cfngin_context.add_stubber("ec2") image_id = "ami-fffccc111" @@ -137,7 +137,7 @@ def test_basic_lookup_multiple_images(self, cfngin_context: MockCFNginContext) - ) def test_basic_lookup_multiple_images_name_match( - self, cfngin_context: MockCFNginContext + self, cfngin_context: MockCfnginContext ) -> None: """Test basic lookup multiple images name match.""" stubber = cfngin_context.add_stubber("ec2") @@ -178,7 +178,7 @@ def test_basic_lookup_multiple_images_name_match( == image_id ) - def test_basic_lookup_no_matching_images(self, cfngin_context: MockCFNginContext) -> None: + def test_basic_lookup_no_matching_images(self, cfngin_context: MockCfnginContext) -> None: """Test basic lookup no matching images.""" stubber = cfngin_context.add_stubber("ec2") stubber.add_response("describe_images", {"Images": []}) @@ -189,7 +189,7 @@ def test_basic_lookup_no_matching_images(self, cfngin_context: MockCFNginContext ) def test_basic_lookup_no_matching_images_from_name( - self, cfngin_context: MockCFNginContext + self, cfngin_context: MockCfnginContext ) -> None: """Test basic lookup no matching images from name.""" stubber = cfngin_context.add_stubber("ec2") diff --git a/tests/unit/cfngin/lookups/handlers/test_awslambda.py b/tests/unit/cfngin/lookups/handlers/test_awslambda.py index afc21fb57..c68f4255d 100644 --- a/tests/unit/cfngin/lookups/handlers/test_awslambda.py +++ b/tests/unit/cfngin/lookups/handlers/test_awslambda.py @@ -3,32 +3,26 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from troposphere.awslambda import Code, Content -from runway.cfngin.exceptions import CfnginOnlyLookupError from runway.cfngin.hooks.awslambda.base_classes import AwsLambdaHook from runway.cfngin.hooks.awslambda.models.responses import AwsLambdaHookDeployResponse from runway.cfngin.lookups.handlers.awslambda import AwsLambdaLookup -from runway.config import CfnginConfig -from runway.config.models.cfngin import ( - CfnginConfigDefinitionModel, - CfnginHookDefinitionModel, -) from runway.lookups.handlers.base import LookupHandler if TYPE_CHECKING: from pytest_mock import MockerFixture - from runway.context import CfnginContext, RunwayContext + from runway.context import CfnginContext MODULE = "runway.cfngin.lookups.handlers.awslambda" QUERY = "test::foo=bar" -@pytest.fixture(scope="function") +@pytest.fixture() def hook_data() -> AwsLambdaHookDeployResponse: """Fixture for hook response data.""" return AwsLambdaHookDeployResponse( @@ -51,7 +45,8 @@ def test_get_deployment_package_data(self, hook_data: AwsLambdaHookDeployRespons data_key = "test.key" assert ( AwsLambdaLookup.get_deployment_package_data( - Mock(hook_data={data_key: hook_data.dict(by_alias=True)}), data_key + Mock(hook_data={data_key: hook_data.model_dump(by_alias=True)}), + data_key, ) == hook_data ) @@ -64,18 +59,16 @@ def test_get_deployment_package_data_set_hook_data( ) -> None: """Test get_deployment_package_data set hook_data when it's missing.""" data_key = "test.key" - hook = Mock(plan=Mock(return_value=hook_data.dict(by_alias=True))) + hook = Mock(plan=Mock(return_value=hook_data.model_dump(by_alias=True))) init_hook_class = mocker.patch.object(AwsLambdaLookup, "init_hook_class", return_value=hook) - get_required_hook_definition = mocker.patch.object( - AwsLambdaLookup, "get_required_hook_definition", return_value="hook_def" + get_hook_definition = mocker.patch.object( + AwsLambdaLookup, "get_hook_definition", return_value="hook_def" ) assert AwsLambdaLookup.get_deployment_package_data(cfngin_context, data_key) == hook_data - get_required_hook_definition.assert_called_once_with(cfngin_context.config, data_key) - init_hook_class.assert_called_once_with( - cfngin_context, get_required_hook_definition.return_value - ) + get_hook_definition.assert_called_once_with(cfngin_context.config, data_key) + init_hook_class.assert_called_once_with(cfngin_context, get_hook_definition.return_value) hook.plan.assert_called_once_with() - assert cfngin_context.hook_data[data_key] == hook_data.dict(by_alias=True) + assert cfngin_context.hook_data[data_key] == hook_data.model_dump(by_alias=True) def test_get_deployment_package_data_raise_type_error(self) -> None: """Test get_deployment_package_data.""" @@ -85,56 +78,6 @@ def test_get_deployment_package_data_raise_type_error(self) -> None: ) assert "expected AwsLambdaHookDeployResponseTypedDict, not " in str(excinfo.value) - def test_get_required_hook_definition(self) -> None: - """Test get_required_hook_definition.""" - data_key = "test.data" - expected_hook = CfnginHookDefinitionModel(data_key=data_key, path="foo.bar") - config = CfnginConfig( - CfnginConfigDefinitionModel( - namespace="test", - pre_deploy=[ - expected_hook, - CfnginHookDefinitionModel(data_key="foo", path="foo"), - ], - pre_destroy=[CfnginHookDefinitionModel(data_key=data_key, path="pre_destroy")], - post_deploy=[CfnginHookDefinitionModel(data_key=data_key, path="post_deploy")], - post_destroy=[CfnginHookDefinitionModel(data_key=data_key, path="post_destroy")], - ) - ) - assert AwsLambdaLookup.get_required_hook_definition(config, data_key) == expected_hook - - def test_get_required_hook_definition_raise_value_error_more_than_one(self) -> None: - """Test get_required_hook_definition raise ValueError for more than one.""" - data_key = "test.data" - expected_hook = CfnginHookDefinitionModel(data_key=data_key, path="foo.bar") - config = CfnginConfig( - CfnginConfigDefinitionModel( - namespace="test", - pre_deploy=[expected_hook, expected_hook], - ) - ) - with pytest.raises(ValueError) as excinfo: - assert not AwsLambdaLookup.get_required_hook_definition(config, data_key) - assert str(excinfo.value) == f"more than one hook definition found with data_key {data_key}" - - def test_get_required_hook_definition_raise_value_error_none(self) -> None: - """Test get_required_hook_definition raise ValueError none found.""" - data_key = "test.data" - config = CfnginConfig( - CfnginConfigDefinitionModel( - namespace="test", - pre_deploy=[ - CfnginHookDefinitionModel(data_key="foo", path="foo"), - ], - pre_destroy=[CfnginHookDefinitionModel(data_key=data_key, path="pre_destroy")], - post_deploy=[CfnginHookDefinitionModel(data_key=data_key, path="post_deploy")], - post_destroy=[CfnginHookDefinitionModel(data_key=data_key, path="post_destroy")], - ) - ) - with pytest.raises(ValueError) as excinfo: - assert not AwsLambdaLookup.get_required_hook_definition(config, data_key) - assert str(excinfo.value) == f"no hook definition found with data_key {data_key}" - def test_handle(self, mocker: MockerFixture) -> None: """Test handle.""" context = Mock() @@ -154,11 +97,6 @@ def test_handle(self, mocker: MockerFixture) -> None: ) mock_format_results.assert_not_called() - def test_handle_raise_cfngin_only_lookup_error(self, runway_context: RunwayContext) -> None: - """Test handle raise CfnginOnlyLookupError.""" - with pytest.raises(CfnginOnlyLookupError): - AwsLambdaLookup.handle("test", runway_context) - def test_init_hook_class(self, mocker: MockerFixture) -> None: """Test init_hook_class.""" context = Mock() @@ -219,21 +157,21 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - result = AwsLambdaLookup.Code.handle(QUERY, context, "arg", foo="bar") + result = AwsLambdaLookup.Code.handle(QUERY, context, foo="bar") assert isinstance(result, Code) assert not hasattr(result, "ImageUri") assert result.S3Bucket == hook_data.bucket_name assert result.S3Key == hook_data.object_key assert result.S3ObjectVersion == hook_data.object_version_id assert not hasattr(result, "ZipFile") - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.Code.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Code.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Code.__name__}" + == AwsLambdaLookup.Code.TYPE_NAME ) @@ -247,18 +185,15 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - assert ( - AwsLambdaLookup.CodeSha256.handle(QUERY, context, "arg", foo="bar") - == hook_data.code_sha256 - ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + assert AwsLambdaLookup.CodeSha256.handle(QUERY, context, foo="bar") == hook_data.code_sha256 + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.CodeSha256.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CodeSha256.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CodeSha256.__name__}" + == AwsLambdaLookup.CodeSha256.TYPE_NAME ) @@ -273,17 +208,17 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) assert ( - AwsLambdaLookup.CompatibleArchitectures.handle(QUERY, context, "arg", foo="bar") + AwsLambdaLookup.CompatibleArchitectures.handle(QUERY, context, foo="bar") == mock_format_results.return_value ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_called_once_with(hook_data.compatible_architectures, foo="bar") def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.CompatibleArchitectures.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CompatibleArchitectures.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CompatibleArchitectures.__name__}" + == AwsLambdaLookup.CompatibleArchitectures.TYPE_NAME ) @@ -298,17 +233,17 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) assert ( - AwsLambdaLookup.CompatibleRuntimes.handle(QUERY, context, "arg", foo="bar") + AwsLambdaLookup.CompatibleRuntimes.handle(QUERY, context, foo="bar") == mock_format_results.return_value ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_called_once_with(hook_data.compatible_runtimes, foo="bar") def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.CompatibleRuntimes.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CompatibleRuntimes.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.CompatibleRuntimes.__name__}" + == AwsLambdaLookup.CompatibleRuntimes.TYPE_NAME ) @@ -322,20 +257,20 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - result = AwsLambdaLookup.Content.handle(QUERY, context, "arg", foo="bar") + result = AwsLambdaLookup.Content.handle(QUERY, context, foo="bar") assert isinstance(result, Content) assert not hasattr(result, "ImageUri") assert result.S3Bucket == hook_data.bucket_name assert result.S3Key == hook_data.object_key assert result.S3ObjectVersion == hook_data.object_version_id - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.Content.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Content.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Content.__name__}" + == AwsLambdaLookup.Content.TYPE_NAME ) @@ -350,17 +285,17 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) assert ( - AwsLambdaLookup.LicenseInfo.handle(QUERY, context, "arg", foo="bar") + AwsLambdaLookup.LicenseInfo.handle(QUERY, context, foo="bar") == mock_format_results.return_value ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_called_once_with(hook_data.license, foo="bar") def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.LicenseInfo.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.LicenseInfo.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.LicenseInfo.__name__}" + == AwsLambdaLookup.LicenseInfo.TYPE_NAME ) @@ -374,15 +309,15 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - assert AwsLambdaLookup.Runtime.handle(QUERY, context, "arg", foo="bar") == hook_data.runtime - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + assert AwsLambdaLookup.Runtime.handle(QUERY, context, foo="bar") == hook_data.runtime + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.Runtime.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Runtime.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.Runtime.__name__}" + == AwsLambdaLookup.Runtime.TYPE_NAME ) @@ -396,18 +331,15 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - assert ( - AwsLambdaLookup.S3Bucket.handle(QUERY, context, "arg", foo="bar") - == hook_data.bucket_name - ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + assert AwsLambdaLookup.S3Bucket.handle(QUERY, context, foo="bar") == hook_data.bucket_name + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.S3Bucket.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3Bucket.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3Bucket.__name__}" + == AwsLambdaLookup.S3Bucket.TYPE_NAME ) @@ -421,17 +353,15 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt LookupHandler, "format_results", return_value="success" ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) - assert ( - AwsLambdaLookup.S3Key.handle(QUERY, context, "arg", foo="bar") == hook_data.object_key - ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + assert AwsLambdaLookup.S3Key.handle(QUERY, context, foo="bar") == hook_data.object_key + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.S3Key.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3Key.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3Key.__name__}" + == AwsLambdaLookup.S3Key.TYPE_NAME ) @@ -446,15 +376,15 @@ def test_handle(self, hook_data: AwsLambdaHookDeployResponse, mocker: MockerFixt ) mock_handle = mocker.patch.object(AwsLambdaLookup, "handle", return_value=hook_data) assert ( - AwsLambdaLookup.S3ObjectVersion.handle(QUERY, context, "arg", foo="bar") + AwsLambdaLookup.S3ObjectVersion.handle(QUERY, context, foo="bar") == hook_data.object_version_id ) - mock_handle.assert_called_once_with(QUERY, context, "arg", foo="bar") + mock_handle.assert_called_once_with(QUERY, context, foo="bar") mock_format_results.assert_not_called() def test_type_name(self) -> None: """Test TYPE_NAME.""" assert ( - AwsLambdaLookup.S3ObjectVersion.TYPE_NAME - == f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3ObjectVersion.__name__}" + f"{AwsLambdaLookup.TYPE_NAME}.{AwsLambdaLookup.S3ObjectVersion.__name__}" + == AwsLambdaLookup.S3ObjectVersion.TYPE_NAME ) diff --git a/tests/unit/cfngin/lookups/handlers/test_default.py b/tests/unit/cfngin/lookups/handlers/test_default.py index 04f88b34e..ea77f9783 100644 --- a/tests/unit/cfngin/lookups/handlers/test_default.py +++ b/tests/unit/cfngin/lookups/handlers/test_default.py @@ -1,9 +1,9 @@ """Tests for runway.cfngin.lookups.handlers.default.""" -# pyright: basic import unittest +from unittest.mock import MagicMock -from mock import MagicMock +import pytest from runway.cfngin.lookups.handlers.default import DefaultLookup from runway.context import CfnginContext @@ -31,6 +31,5 @@ def test_env_var_missing(self) -> None: def test_invalid_value(self) -> None: """Test invalid value.""" - with self.assertRaises(ValueError): - value = "env_var:fallback" - DefaultLookup.handle(value, provider=self.provider, context=self.context) + with pytest.raises(ValueError): # noqa: PT011 + DefaultLookup.handle("env_var:fallback", provider=self.provider, context=self.context) diff --git a/tests/unit/cfngin/lookups/handlers/test_dynamodb.py b/tests/unit/cfngin/lookups/handlers/test_dynamodb.py index 14c44b7ec..c0600680f 100644 --- a/tests/unit/cfngin/lookups/handlers/test_dynamodb.py +++ b/tests/unit/cfngin/lookups/handlers/test_dynamodb.py @@ -1,16 +1,15 @@ """Tests for runway.cfngin.lookups.handlers.dynamodb.""" -# pyright: basic from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any import pytest from runway.cfngin.lookups.handlers.dynamodb import DynamodbLookup, QueryDataModel if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext GET_ITEM_RESPONSE = { "Item": { @@ -57,7 +56,7 @@ class TestDynamoDBHandler: ) def test_handle( self, - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, expected_projection: str, expected_result: str, query: str, @@ -74,7 +73,7 @@ def test_handle( assert DynamodbLookup.handle(query, context=cfngin_context) == expected_result stubber.assert_no_pending_responses() - def test_handle_client_error(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_client_error(self, cfngin_context: MockCfnginContext) -> None: """Test handle ClientError.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -87,21 +86,20 @@ def test_handle_client_error(self, cfngin_context: MockCFNginContext) -> None: expected_params=expected_params, ) query = "TestTable@FakeKey:TestVal.TestMap[M].String1" - with stubber, pytest.raises(ValueError) as excinfo: + with ( + stubber, + pytest.raises(ValueError, match="The DynamoDB lookup '.*' encountered an error: .*"), + ): DynamodbLookup.handle(query, context=cfngin_context) stubber.assert_no_pending_responses() - assert str(excinfo.value).startswith( - f"The DynamoDB lookup '{query}' encountered an error: " - ) - def test_handle_empty_table_name(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_empty_table_name(self, cfngin_context: MockCfnginContext) -> None: """Test handle with empty table_name.""" query = "@TestKey:TestVal.TestMap[M].String1" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="Query '.*' doesn't match regex:"): DynamodbLookup.handle(query, context=cfngin_context) - assert str(excinfo.value).startswith(f"Query '{query}' doesn't match regex:") - def test_handle_invalid_partition_key(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_invalid_partition_key(self, cfngin_context: MockCfnginContext) -> None: """Test handle with invalid partition key.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -116,14 +114,19 @@ def test_handle_invalid_partition_key(self, cfngin_context: MockCFNginContext) - expected_params=expected_params, ) - with stubber, pytest.raises(ValueError) as excinfo: + with ( + stubber, + pytest.raises( + ValueError, + match="No DynamoDB record matched the partition key: FakeKey", + ), + ): DynamodbLookup.handle( "TestTable@FakeKey:TestVal.TestMap[M].String1", context=cfngin_context ) stubber.assert_no_pending_responses() - assert str(excinfo.value) == "No DynamoDB record matched the partition key: FakeKey" - def test_handle_invalid_partition_value(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_invalid_partition_value(self, cfngin_context: MockCfnginContext) -> None: """Test handle with invalid partition value.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -131,18 +134,21 @@ def test_handle_invalid_partition_value(self, cfngin_context: MockCFNginContext) "Key": {"TestKey": {"S": "FakeVal"}}, "ProjectionExpression": "TestKey,TestMap,String1", } - empty_response: Dict[str, Any] = {"ResponseMetadata": {}} + empty_response: dict[str, Any] = {"ResponseMetadata": {}} stubber.add_response("get_item", empty_response, expected_params) - with stubber, pytest.raises(ValueError) as excinfo: + with ( + stubber, + pytest.raises( + ValueError, + match="The DynamoDB record could not be found using the following: " + "{'TestKey': {'S': 'FakeVal'}}", + ), + ): DynamodbLookup.handle( "TestTable@TestKey:FakeVal.TestMap[M].String1", context=cfngin_context ) - assert ( - str(excinfo.value) == "The DynamoDB record could not be found using the following: " - "{'TestKey': {'S': 'FakeVal'}}" - ) - def test_handle_list(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_list(self, cfngin_context: MockCfnginContext) -> None: """Test handle return list.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -157,16 +163,13 @@ def test_handle_list(self, cfngin_context: MockCFNginContext) -> None: ) == ["ListVal1", "ListVal2"] stubber.assert_no_pending_responses() - def test_handle_missing_table_name(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_missing_table_name(self, cfngin_context: MockCfnginContext) -> None: """Test handle missing table_name.""" query = "TestKey:TestVal.TestMap[M].String1" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="'.*' missing delimiter for DynamoDB Table name:"): DynamodbLookup.handle(query, context=cfngin_context) - assert str(excinfo.value).startswith( - f"'{query}' missing delimiter for DynamoDB Table name:" - ) - def test_handle_number(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_number(self, cfngin_context: MockCfnginContext) -> None: """Test handle return number.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -186,7 +189,7 @@ def test_handle_number(self, cfngin_context: MockCFNginContext) -> None: ) stubber.assert_no_pending_responses() - def test_handle_table_not_found(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_table_not_found(self, cfngin_context: MockCfnginContext) -> None: """Test handle DDB Table not found.""" stubber = cfngin_context.add_stubber("dynamodb") expected_params = { @@ -200,20 +203,21 @@ def test_handle_table_not_found(self, cfngin_context: MockCFNginContext) -> None service_error_code=service_error_code, expected_params=expected_params, ) - with stubber, pytest.raises(ValueError) as excinfo: + with ( + stubber, + pytest.raises(ValueError, match="Can't find the DynamoDB table: FakeTable"), + ): DynamodbLookup.handle( "FakeTable@TestKey:TestVal.TestMap[M].String1", context=cfngin_context ) stubber.assert_no_pending_responses() - assert str(excinfo.value) == "Can't find the DynamoDB table: FakeTable" - def test_handle_unsupported_data_type(self, cfngin_context: MockCFNginContext) -> None: + def test_handle_unsupported_data_type(self, cfngin_context: MockCfnginContext) -> None: """Test handle with unsupported data type.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="CFNgin does not support looking up the data type: B"): DynamodbLookup.handle( "TestTable@TestKey:FakeVal.TestStringSet[B]", context=cfngin_context ) - assert str(excinfo.value) == "CFNgin does not support looking up the data type: B" class TestQueryDataModel: @@ -228,7 +232,7 @@ class TestQueryDataModel: ("TestVal[S]", {"S": "TestVal"}), ], ) - def test_item_key(self, expected: Dict[str, Any], value: str) -> None: + def test_item_key(self, expected: dict[str, Any], value: str) -> None: """Test item_key.""" assert QueryDataModel( attribute="", @@ -245,8 +249,8 @@ def test_item_key_no_match(self) -> None: partition_key_value="TestVal[L]", table_name="", ) - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, + match="Partition key value '.*' doesn't match regex: .*", + ): assert obj.item_key - assert str(excinfo.value).startswith( - f"Partition key value '{obj.partition_key_value}' doesn't match regex:" - ) diff --git a/tests/unit/cfngin/lookups/handlers/test_envvar.py b/tests/unit/cfngin/lookups/handlers/test_envvar.py index f5d011d5a..c7e3f6703 100644 --- a/tests/unit/cfngin/lookups/handlers/test_envvar.py +++ b/tests/unit/cfngin/lookups/handlers/test_envvar.py @@ -1,9 +1,10 @@ """Tests for runway.cfngin.lookups.handlers.envvar.""" -# pyright: basic import os import unittest +import pytest + from runway.cfngin.lookups.handlers.envvar import EnvvarLookup @@ -20,9 +21,9 @@ def setUp(self) -> None: def test_valid_envvar(self) -> None: """Test valid envvar.""" value = EnvvarLookup.handle(self.testkey) - self.assertEqual(value, self.testval) + assert value == self.testval def test_invalid_envvar(self) -> None: """Test invalid envvar.""" - with self.assertRaises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 EnvvarLookup.handle(self.invalidtestkey) diff --git a/tests/unit/cfngin/lookups/handlers/test_file.py b/tests/unit/cfngin/lookups/handlers/test_file.py index 93612faba..79e798123 100644 --- a/tests/unit/cfngin/lookups/handlers/test_file.py +++ b/tests/unit/cfngin/lookups/handlers/test_file.py @@ -156,13 +156,8 @@ def test_handle_raise_validation_error(self) -> None: def test_handle_raise_value_error(self) -> None: """Test handle raise ValueError.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="Query 'foo' doesn't match regex: "): FileLookup.handle("foo") - assert ( - str(excinfo.value) == "Query 'foo' doesn't match regex: " - "^(?P[base64|json|json-parameterized|parameterized|" - "parameterized-b64|plain|yaml|yaml-parameterized]:.+$)" - ) def test_handle_yaml(self, tmp_path: Path) -> None: """Test handle yaml.""" diff --git a/tests/unit/cfngin/lookups/handlers/test_hook_data.py b/tests/unit/cfngin/lookups/handlers/test_hook_data.py index 7b4b9fd46..bd50703b9 100644 --- a/tests/unit/cfngin/lookups/handlers/test_hook_data.py +++ b/tests/unit/cfngin/lookups/handlers/test_hook_data.py @@ -12,13 +12,13 @@ from runway.variables import Variable if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext class TestHookDataLookup: """Tests for runway.cfngin.lookups.handlers.hook_data.HookDataLookup.""" - def test_handle(self, cfngin_context: MockCFNginContext) -> None: + def test_handle(self, cfngin_context: MockCfnginContext) -> None: """Test handle with simple usage.""" cfngin_context.set_hook_data("fake_hook", {"nested": {"result": "good"}}) var_top = Variable("test", "${hook_data fake_hook}", variable_type="cfngin") @@ -31,7 +31,7 @@ def test_handle(self, cfngin_context: MockCFNginContext) -> None: assert var_top.value == {"nested": {"result": "good"}} assert var_nested.value == "good" - def test_default(self, cfngin_context: MockCFNginContext) -> None: + def test_default(self, cfngin_context: MockCfnginContext) -> None: """Test handle with a default value.""" cfngin_context.set_hook_data("fake_hook", {"nested": {"result": "good"}}) var_top = Variable( @@ -48,7 +48,7 @@ def test_default(self, cfngin_context: MockCFNginContext) -> None: assert var_top.value == "something" assert var_nested.value == "something" - def test_not_found(self, cfngin_context: MockCFNginContext) -> None: + def test_not_found(self, cfngin_context: MockCfnginContext) -> None: """Test value not found and no default.""" variable = Variable("test", "${hook_data fake_hook.bad.result}", variable_type="cfngin") with pytest.raises(FailedVariableLookup) as err: @@ -59,7 +59,7 @@ def test_not_found(self, cfngin_context: MockCFNginContext) -> None: ) assert "Could not find a value for" in str(err.value.__cause__) - def test_troposphere(self, cfngin_context: MockCFNginContext) -> None: + def test_troposphere(self, cfngin_context: MockCfnginContext) -> None: """Test with troposphere object like returned from lambda hook.""" bucket = "test-bucket" s3_key = "lambda_functions/my_function" diff --git a/tests/unit/cfngin/lookups/handlers/test_kms.py b/tests/unit/cfngin/lookups/handlers/test_kms.py index 8a3b9497d..cbcfc06da 100644 --- a/tests/unit/cfngin/lookups/handlers/test_kms.py +++ b/tests/unit/cfngin/lookups/handlers/test_kms.py @@ -12,7 +12,7 @@ from runway.cfngin.lookups.handlers.kms import KmsLookup if TYPE_CHECKING: - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext SECRET = "my secret" @@ -20,7 +20,7 @@ class TestKMSHandler: """Tests for runway.cfngin.lookups.handlers.kms.KmsLookup.""" - def test_handle(self, cfngin_context: MockCFNginContext) -> None: + def test_handle(self, cfngin_context: MockCfnginContext) -> None: """Test handle.""" stubber = cfngin_context.add_stubber("kms") stubber.add_response( @@ -34,7 +34,7 @@ def test_handle(self, cfngin_context: MockCFNginContext) -> None: stubber.assert_no_pending_responses() @pytest.mark.parametrize("template", ["${region}@${blob}", "${blob}::region=${region}"]) - def test_handle_with_region(self, cfngin_context: MockCFNginContext, template: str) -> None: + def test_handle_with_region(self, cfngin_context: MockCfnginContext, template: str) -> None: """Test handle with region.""" region = "us-west-2" query = string.Template(template).substitute({"blob": SECRET, "region": region}) diff --git a/tests/unit/cfngin/lookups/handlers/test_output.py b/tests/unit/cfngin/lookups/handlers/test_output.py index bb540a5e3..d2fa6592e 100644 --- a/tests/unit/cfngin/lookups/handlers/test_output.py +++ b/tests/unit/cfngin/lookups/handlers/test_output.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import MagicMock import pytest -from mock import MagicMock from runway._logging import LogLevels from runway.cfngin.exceptions import StackDoesNotExist @@ -17,10 +17,9 @@ from ...factories import generate_definition if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.lookups.handlers.output" @@ -61,7 +60,7 @@ def test_dependencies_not_resolved(self) -> None: ("stack-name.foo::default=bar", "bar"), ], ) - def test_handle(self, cfngin_context: MockCFNginContext, expected: str, provided: str) -> None: + def test_handle(self, cfngin_context: MockCfnginContext, expected: str, provided: str) -> None: """Test handle.""" stack = Stack(definition=generate_definition("stack-name"), context=cfngin_context) stack.set_outputs({"Output": "output-val"}) @@ -70,7 +69,7 @@ def test_handle(self, cfngin_context: MockCFNginContext, expected: str, provided @pytest.mark.parametrize("provided", ["stack-name.MissingOutput", "stack-name::MissingOutput"]) def test_handle_raise_output_does_not_exist( - self, cfngin_context: MockCFNginContext, provided: str + self, cfngin_context: MockCfnginContext, provided: str ) -> None: """Test handle raise OutputDoesNotExist.""" stack = Stack(definition=generate_definition("stack-name"), context=cfngin_context) @@ -85,7 +84,7 @@ def test_handle_raise_output_does_not_exist( @pytest.mark.parametrize("provided", ["stack-name.Output", "stack-name::Output"]) def test_handle_raise_stack_does_not_exist( - self, cfngin_context: MockCFNginContext, provided: str + self, cfngin_context: MockCfnginContext, provided: str ) -> None: """Test handle raise StackDoesNotExist.""" with pytest.raises( @@ -94,7 +93,7 @@ def test_handle_raise_stack_does_not_exist( ): OutputLookup.handle(provided, context=cfngin_context) - def test_legacy_parse(self, caplog: LogCaptureFixture, mocker: MockerFixture) -> None: + def test_legacy_parse(self, caplog: pytest.LogCaptureFixture, mocker: MockerFixture) -> None: """Test legacy_parse.""" query = "foo" caplog.set_level(LogLevels.WARNING, MODULE) diff --git a/tests/unit/cfngin/lookups/handlers/test_rxref.py b/tests/unit/cfngin/lookups/handlers/test_rxref.py index 524b33e38..43bb4a62d 100644 --- a/tests/unit/cfngin/lookups/handlers/test_rxref.py +++ b/tests/unit/cfngin/lookups/handlers/test_rxref.py @@ -3,18 +3,17 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from runway._logging import LogLevels from runway.cfngin.lookups.handlers.rxref import RxrefLookup if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture - from ....factories import MockCFNginContext + from ....factories import MockCfnginContext MODULE = "runway.cfngin.lookups.handlers.rxref" @@ -35,7 +34,7 @@ class TestRxrefLookup: ) def test_handle( self, - cfngin_context: MockCFNginContext, + cfngin_context: MockCfnginContext, expected: str, mocker: MockerFixture, provided: str, @@ -48,7 +47,7 @@ def test_handle( assert RxrefLookup.handle(provided, context=cfngin_context, provider=provider) cfn.handle.assert_called_once_with(expected, context=cfngin_context, provider=provider) - def test_legacy_parse(self, caplog: LogCaptureFixture, mocker: MockerFixture) -> None: + def test_legacy_parse(self, caplog: pytest.LogCaptureFixture, mocker: MockerFixture) -> None: """Test legacy_parse.""" query = "foo" caplog.set_level(LogLevels.WARNING, MODULE) diff --git a/tests/unit/cfngin/lookups/handlers/test_split.py b/tests/unit/cfngin/lookups/handlers/test_split.py index aea2141bd..68beb1386 100644 --- a/tests/unit/cfngin/lookups/handlers/test_split.py +++ b/tests/unit/cfngin/lookups/handlers/test_split.py @@ -1,8 +1,9 @@ """Tests for runway.cfngin.lookups.handlers.split.""" -# pyright: basic import unittest +import pytest + from runway.cfngin.lookups.handlers.split import SplitLookup @@ -24,5 +25,5 @@ def test_multi_character_split(self) -> None: def test_invalid_value_split(self) -> None: """Test invalid value split.""" value = ",:a,b,c" - with self.assertRaises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 SplitLookup.handle(value) diff --git a/tests/unit/cfngin/lookups/handlers/test_xref.py b/tests/unit/cfngin/lookups/handlers/test_xref.py index dd7472058..ec389d604 100644 --- a/tests/unit/cfngin/lookups/handlers/test_xref.py +++ b/tests/unit/cfngin/lookups/handlers/test_xref.py @@ -2,8 +2,7 @@ # pyright: basic, reportUnknownArgumentType=none, reportUnknownVariableType=none import unittest - -from mock import MagicMock +from unittest.mock import MagicMock from runway.cfngin.lookups.handlers.xref import XrefLookup @@ -24,8 +23,8 @@ def test_xref_handler(self) -> None: provider=self.provider, context=self.context, ) - self.assertEqual(value, "Test Output") - self.assertEqual(self.context.get_fqn.call_count, 0) + assert value == "Test Output" + assert self.context.get_fqn.call_count == 0 args = self.provider.get_output.call_args - self.assertEqual(args[0][0], "fully-qualified-stack-name") - self.assertEqual(args[0][1], "SomeOutput") + assert args[0][0] == "fully-qualified-stack-name" + assert args[0][1] == "SomeOutput" diff --git a/tests/unit/cfngin/providers/aws/test_default.py b/tests/unit/cfngin/providers/aws/test_default.py index c03e7a094..37f8eeb35 100644 --- a/tests/unit/cfngin/providers/aws/test_default.py +++ b/tests/unit/cfngin/providers/aws/test_default.py @@ -1,25 +1,23 @@ """Tests for runway.cfngin.providers.aws.default.""" -# pyright: basic from __future__ import annotations import copy import locale -import os.path import random import string import threading import unittest +from contextlib import suppress from datetime import datetime from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Optional +from unittest.mock import MagicMock, patch import boto3 import pytest from botocore.exceptions import ClientError, UnStubbedResponseError from botocore.stub import Stubber -from mock import MagicMock, patch -from typing_extensions import Literal from runway.cfngin import exceptions from runway.cfngin.actions.diff import DictValue @@ -48,6 +46,7 @@ StackTypeDef, ) from pytest_mock import MockerFixture + from typing_extensions import Literal from runway.core.providers.aws.type_defs import TagSetTypeDef @@ -59,7 +58,7 @@ def random_string(length: int = 12) -> str: length: The # of characters to use in the random string. """ - return "".join([random.choice(string.ascii_letters) for _ in range(length)]) + return "".join([random.choice(string.ascii_letters) for _ in range(length)]) # noqa: S311 def generate_describe_stacks_stack( @@ -105,18 +104,16 @@ def generate_describe_stacks_stack( def generate_get_template( - file_name: str = "cfn_template.json", stages_available: Optional[List[str]] = None -) -> Dict[str, Any]: + file_name: str = "cfn_template.json", stages_available: list[str] | None = None +) -> dict[str, Any]: """Generate get template.""" - fixture_dir = os.path.join(os.path.dirname(__file__), "../../fixtures") - with open(os.path.join(fixture_dir, file_name), "r", encoding="utf-8") as _file: - return { - "StagesAvailable": stages_available or ["Original"], - "TemplateBody": _file.read(), - } + return { + "StagesAvailable": stages_available or ["Original"], + "TemplateBody": (Path(__file__).parent.parent.parent / "fixtures" / file_name).read_text(), + } -def generate_stack_object(stack_name: str, outputs: Optional[Dict[str, Any]] = None) -> MagicMock: +def generate_stack_object(stack_name: str, outputs: Optional[dict[str, Any]] = None) -> MagicMock: """Generate stack object.""" mock_stack = MagicMock(["name", "fqn", "blueprint"]) if not outputs: @@ -148,9 +145,9 @@ def generate_resource_change(replacement: bool = True) -> ChangeTypeDef: def generate_change_set_response( status: str, execution_status: str = "AVAILABLE", - changes: Optional[List[Dict[str, Any]]] = None, + changes: Optional[list[dict[str, Any]]] = None, status_reason: str = "FAKE", -) -> Dict[str, Any]: +) -> dict[str, Any]: """Generate change set response.""" return { "ChangeSetName": "string", @@ -182,7 +179,7 @@ def generate_change( resource_type: str = "EC2::Instance", replacement: str = "False", requires_recreation: str = "Never", -) -> Dict[str, Any]: +) -> dict[str, Any]: """Generate a minimal change for a changeset.""" return { "Type": "Resource", @@ -225,9 +222,9 @@ def test_requires_replacement(self) -> None: generate_resource_change(), ] replacement = requires_replacement(changeset) - self.assertEqual(len(replacement), 2) + assert len(replacement) == 2 for resource in replacement: - self.assertEqual(resource.get("ResourceChange", {}).get("Replacement"), "True") + assert resource.get("ResourceChange", {}).get("Replacement") == "True" def test_summarize_params_diff(self) -> None: """Test summarize params diff.""" @@ -242,54 +239,39 @@ def test_summarize_params_diff(self) -> None: added_param, removed_param, ] - self.assertEqual(summarize_params_diff([]), "") - self.assertEqual( - summarize_params_diff(params_diff), - "\n".join( - [ - "Parameters Added: ParamC", - "Parameters Removed: ParamD", - "Parameters Modified: ParamB\n", - ] - ), + assert summarize_params_diff([]) == "" + assert ( + summarize_params_diff(params_diff) + == "Parameters Added: ParamC\nParameters Removed: ParamD\nParameters Modified: ParamB\n" ) only_modified_params_diff = [modified_param] - self.assertEqual( - summarize_params_diff(only_modified_params_diff), - "Parameters Modified: ParamB\n", - ) + assert summarize_params_diff(only_modified_params_diff) == "Parameters Modified: ParamB\n" only_added_params_diff = [added_param] - self.assertEqual( - summarize_params_diff(only_added_params_diff), "Parameters Added: ParamC\n" - ) + assert summarize_params_diff(only_added_params_diff) == "Parameters Added: ParamC\n" only_removed_params_diff = [removed_param] - self.assertEqual( - summarize_params_diff(only_removed_params_diff), - "Parameters Removed: ParamD\n", - ) + assert summarize_params_diff(only_removed_params_diff) == "Parameters Removed: ParamD\n" def test_ask_for_approval(self) -> None: """Test ask for approval.""" get_input_path = "runway.cfngin.ui.get_raw_input" with patch(get_input_path, return_value="y"): - self.assertIsNone(ask_for_approval([], [], False)) + assert ask_for_approval([], [], False) is None for v in ("n", "N", "x", "\n"): - with patch(get_input_path, return_value=v): - with self.assertRaises(exceptions.CancelExecution): - ask_for_approval([], []) + with patch(get_input_path, return_value=v), pytest.raises(exceptions.CancelExecution): + ask_for_approval([], []) with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input: with patch( "runway.cfngin.providers.aws.default.output_full_changeset" ) as mock_full_changeset: - with self.assertRaises(exceptions.CancelExecution): + with pytest.raises(exceptions.CancelExecution): ask_for_approval([], [], True) - self.assertEqual(mock_full_changeset.call_count, 1) - self.assertEqual(mock_get_input.call_count, 2) + assert mock_full_changeset.call_count == 1 + assert mock_get_input.call_count == 2 def test_ask_for_approval_with_params_diff(self) -> None: """Test ask for approval with params diff.""" @@ -299,21 +281,20 @@ def test_ask_for_approval_with_params_diff(self) -> None: DictValue("ParamB", "param-b-old-value", "param-b-new-value-delta"), ] with patch(get_input_path, return_value="y"): - self.assertIsNone(ask_for_approval([], params_diff, False)) + assert ask_for_approval([], params_diff, False) is None for v in ("n", "N", "x", "\n"): - with patch(get_input_path, return_value=v): - with self.assertRaises(exceptions.CancelExecution): - ask_for_approval([], params_diff) + with patch(get_input_path, return_value=v), pytest.raises(exceptions.CancelExecution): + ask_for_approval([], params_diff) with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input: with patch( "runway.cfngin.providers.aws.default.output_full_changeset" ) as mock_full_changeset: - with self.assertRaises(exceptions.CancelExecution): + with pytest.raises(exceptions.CancelExecution): ask_for_approval([], params_diff, True) - self.assertEqual(mock_full_changeset.call_count, 1) - self.assertEqual(mock_get_input.call_count, 2) + assert mock_full_changeset.call_count == 1 + assert mock_get_input.call_count == 2 @patch("runway.cfngin.providers.aws.default.format_params_diff") @patch("runway.cfngin.providers.aws.default.yaml.safe_dump") @@ -327,22 +308,20 @@ def test_output_full_changeset( for v in ["y", "v", "Y", "V"]: with patch(get_input_path, return_value=v) as prompt: - self.assertIsNone( - output_full_changeset(full_changeset=[], params_diff=[], fqn=None) - ) - self.assertEqual(prompt.call_count, 1) + assert output_full_changeset(full_changeset=[], params_diff=[], fqn=None) is None + assert prompt.call_count == 1 safe_dump_counter += 1 - self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) - self.assertEqual(patched_format.call_count, 0) + assert mock_safe_dump.call_count == safe_dump_counter + assert patched_format.call_count == 0 for v in ["n", "N"]: with patch(get_input_path, return_value=v) as prompt: output_full_changeset(full_changeset=[], params_diff=[], answer=None, fqn=None) - self.assertEqual(prompt.call_count, 1) - self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) - self.assertEqual(patched_format.call_count, 0) + assert prompt.call_count == 1 + assert mock_safe_dump.call_count == safe_dump_counter + assert patched_format.call_count == 0 - with self.assertRaises(exceptions.CancelExecution): + with pytest.raises(exceptions.CancelExecution): output_full_changeset(full_changeset=[], params_diff=[], answer="x", fqn=None) output_full_changeset( @@ -352,8 +331,8 @@ def test_output_full_changeset( fqn=None, ) safe_dump_counter += 1 - self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) - self.assertEqual(patched_format.call_count, 1) + assert mock_safe_dump.call_count == safe_dump_counter + assert patched_format.call_count == 1 def test_wait_till_change_set_complete_success(self) -> None: """Test wait till change set complete success.""" @@ -374,9 +353,8 @@ def test_wait_till_change_set_complete_failed(self) -> None: self.stubber.add_response( "describe_change_set", generate_change_set_response("CREATE_PENDING") ) - with self.stubber: - with self.assertRaises(exceptions.ChangesetDidNotStabilize): - wait_till_change_set_complete(self.cfn, "FAKEID", try_count=2, sleep_time=0.1) + with self.stubber, pytest.raises(exceptions.ChangesetDidNotStabilize): + wait_till_change_set_complete(self.cfn, "FAKEID", try_count=2, sleep_time=0.1) def test_create_change_set_stack_did_not_change(self) -> None: """Test create change set stack did not change.""" @@ -391,15 +369,14 @@ def test_create_change_set_stack_did_not_change(self) -> None: "delete_change_set", {}, expected_params={"ChangeSetName": "CHANGESETID"} ) - with self.stubber: - with self.assertRaises(exceptions.StackDidNotChange): - create_change_set( - cfn_client=self.cfn, - fqn="my-fake-stack", - template=Template(url="http://fake.template.url.com/"), - parameters=[], - tags=[], - ) + with self.stubber, pytest.raises(exceptions.StackDidNotChange): + create_change_set( + cfn_client=self.cfn, + fqn="my-fake-stack", + template=Template(url="http://fake.template.url.com/"), + parameters=[], + tags=[], + ) def test_create_change_set_unhandled_failed_status(self) -> None: """Test create change set unhandled failed status.""" @@ -410,15 +387,14 @@ def test_create_change_set_unhandled_failed_status(self) -> None: generate_change_set_response("FAILED", status_reason="Some random bad thing."), ) - with self.stubber: - with self.assertRaises(exceptions.UnhandledChangeSetStatus): - create_change_set( - cfn_client=self.cfn, - fqn="my-fake-stack", - template=Template(url="http://fake.template.url.com/"), - parameters=[], - tags=[], - ) + with self.stubber, pytest.raises(exceptions.UnhandledChangeSetStatus): + create_change_set( + cfn_client=self.cfn, + fqn="my-fake-stack", + template=Template(url="http://fake.template.url.com/"), + parameters=[], + tags=[], + ) def test_create_change_set_bad_execution_status(self) -> None: """Test create change set bad execution status.""" @@ -429,28 +405,27 @@ def test_create_change_set_bad_execution_status(self) -> None: generate_change_set_response(status="CREATE_COMPLETE", execution_status="UNAVAILABLE"), ) - with self.stubber: - with self.assertRaises(exceptions.UnableToExecuteChangeSet): - create_change_set( - cfn_client=self.cfn, - fqn="my-fake-stack", - template=Template(url="http://fake.template.url.com/"), - parameters=[], - tags=[], - ) + with self.stubber, pytest.raises(exceptions.UnableToExecuteChangeSet): + create_change_set( + cfn_client=self.cfn, + fqn="my-fake-stack", + template=Template(url="http://fake.template.url.com/"), + parameters=[], + tags=[], + ) def test_generate_cloudformation_args(self) -> None: """Test generate cloudformation args.""" stack_name = "mystack" template_url = "http://fake.s3url.com/blah.json" template_body = '{"fake_body": "woot"}' - std_args: Dict[str, Any] = { + std_args: dict[str, Any] = { "stack_name": stack_name, "parameters": [], "tags": [], "template": Template(url=template_url), } - std_return: Dict[str, Any] = { + std_return: dict[str, Any] = { "StackName": stack_name, "Parameters": [], "Tags": [], @@ -458,24 +433,24 @@ def test_generate_cloudformation_args(self) -> None: "TemplateURL": template_url, } result = generate_cloudformation_args(**std_args) - self.assertEqual(result, std_return) + assert result == std_return result = generate_cloudformation_args(service_role="FakeRole", **std_args) service_role_result = copy.deepcopy(std_return) service_role_result["RoleARN"] = "FakeRole" - self.assertEqual(result, service_role_result) + assert result == service_role_result result = generate_cloudformation_args(change_set_name="MyChanges", **std_args) change_set_result = copy.deepcopy(std_return) change_set_result["ChangeSetName"] = "MyChanges" - self.assertEqual(result, change_set_result) + assert result == change_set_result # Check stack policy stack_policy = Template(body="{}") result = generate_cloudformation_args(stack_policy=stack_policy, **std_args) stack_policy_result = copy.deepcopy(std_return) stack_policy_result["StackPolicyBody"] = "{}" - self.assertEqual(result, stack_policy_result) + assert result == stack_policy_result # If not TemplateURL is provided, use TemplateBody std_args["template"] = Template(body=template_body) @@ -483,7 +458,7 @@ def test_generate_cloudformation_args(self) -> None: del template_body_result["TemplateURL"] template_body_result["TemplateBody"] = template_body result = generate_cloudformation_args(**std_args) - self.assertEqual(result, template_body_result) + assert result == template_body_result class TestProvider: @@ -581,8 +556,8 @@ def test_create_stack_no_changeset(self) -> None: """Test create_stack, no changeset, template url.""" stack_name = "fake_stack" template = Template(url="http://fake.template.url.com/") - parameters: List[Any] = [] - tags: List[Any] = [] + parameters: list[Any] = [] + tags: list[Any] = [] expected_args = generate_cloudformation_args(stack_name, parameters, tags, template) expected_args["EnableTerminationProtection"] = False @@ -605,8 +580,8 @@ def test_create_stack_with_changeset( template = Template( body=template_path.read_text(encoding=locale.getpreferredencoding(do_setlocale=False)) ) - parameters: List[Any] = [] - tags: List[Any] = [] + parameters: list[Any] = [] + tags: list[Any] = [] changeset_id = "CHANGESETID" @@ -643,7 +618,7 @@ def test_destroy_stack(self) -> None: self.stubber.add_response("delete_stack", {}, stack) with self.stubber: - self.assertIsNone(self.provider.destroy_stack(stack)) # type: ignore + assert self.provider.destroy_stack(stack) is None # type: ignore self.stubber.assert_no_pending_responses() def test_get_stack_stack_does_not_exist(self) -> None: @@ -656,9 +631,8 @@ def test_get_stack_stack_does_not_exist(self) -> None: expected_params={"StackName": stack_name}, ) - with self.assertRaises(exceptions.StackDoesNotExist): - with self.stubber: - self.provider.get_stack(stack_name) + with pytest.raises(exceptions.StackDoesNotExist), self.stubber: + self.provider.get_stack(stack_name) def test_get_stack_stack_exists(self) -> None: """Test get stack stack exists.""" @@ -671,7 +645,7 @@ def test_get_stack_stack_exists(self) -> None: with self.stubber: response = self.provider.get_stack(stack_name) - self.assertEqual(response["StackName"], stack_name) + assert response["StackName"] == stack_name def test_select_destroy_method(self) -> None: """Test select destroy method.""" @@ -679,7 +653,7 @@ def test_select_destroy_method(self) -> None: [{"force_interactive": False}, self.provider.noninteractive_destroy_stack], [{"force_interactive": True}, self.provider.interactive_destroy_stack], ]: - self.assertEqual(self.provider.select_destroy_method(**i[0]), i[1]) # type: ignore + assert self.provider.select_destroy_method(**i[0]) == i[1] # type: ignore def test_select_update_method(self) -> None: """Test select update method.""" @@ -701,7 +675,7 @@ def test_select_update_method(self) -> None: self.provider.interactive_update_stack, ], ]: - self.assertEqual(self.provider.select_update_method(**i[0]), i[1]) # type: ignore + assert self.provider.select_update_method(**i[0]) == i[1] # type: ignore def test_prepare_stack_for_update_completed(self) -> None: """Test prepare stack for update completed.""" @@ -709,42 +683,39 @@ def test_prepare_stack_for_update_completed(self) -> None: stack_name = "MockStack" stack = generate_describe_stacks_stack(stack_name, stack_status="UPDATE_COMPLETE") - self.assertTrue(self.provider.prepare_stack_for_update(stack, [])) + assert self.provider.prepare_stack_for_update(stack, []) def test_prepare_stack_for_update_in_progress(self) -> None: """Test prepare stack for update in progress.""" stack_name = "MockStack" stack = generate_describe_stacks_stack(stack_name, stack_status="UPDATE_IN_PROGRESS") - with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: - with self.stubber: - self.provider.prepare_stack_for_update(stack, []) + with self.stubber, pytest.raises(exceptions.StackUpdateBadStatus) as raised: + self.provider.prepare_stack_for_update(stack, []) - self.assertIn("in-progress", str(raised.exception)) + assert "in-progress" in str(raised.value) def test_prepare_stack_for_update_non_recreatable(self) -> None: """Test prepare stack for update non recreatable.""" stack_name = "MockStack" stack = generate_describe_stacks_stack(stack_name, stack_status="REVIEW_IN_PROGRESS") - with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: - with self.stubber: - self.provider.prepare_stack_for_update(stack, []) + with pytest.raises(exceptions.StackUpdateBadStatus) as raised, self.stubber: + self.provider.prepare_stack_for_update(stack, []) - self.assertIn("Unsupported state", str(raised.exception)) + assert "Unsupported state" in str(raised.exception) def test_prepare_stack_for_update_disallowed(self) -> None: """Test prepare stack for update disallowed.""" stack_name = "MockStack" stack = generate_describe_stacks_stack(stack_name, stack_status="ROLLBACK_COMPLETE") - with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: - with self.stubber: - self.provider.prepare_stack_for_update(stack, []) + with pytest.raises(exceptions.StackUpdateBadStatus) as raised, self.stubber: + self.provider.prepare_stack_for_update(stack, []) - self.assertIn("re-creation is disabled", str(raised.exception)) + assert "re-creation is disabled" in str(raised.exception) # Ensure we point out to the user how to enable re-creation - self.assertIn("--recreate-failed", str(raised.exception)) + assert "--recreate-failed" in str(raised.exception) def test_prepare_stack_for_update_bad_tags(self) -> None: """Test prepare stack for update bad tags.""" @@ -753,13 +724,12 @@ def test_prepare_stack_for_update_bad_tags(self) -> None: self.provider.recreate_failed = True - with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: - with self.stubber: - self.provider.prepare_stack_for_update( - stack, tags=[{"Key": "cfngin_namespace", "Value": "test"}] - ) + with pytest.raises(exceptions.StackUpdateBadStatus) as raised, self.stubber: + self.provider.prepare_stack_for_update( + stack, tags=[{"Key": "cfngin_namespace", "Value": "test"}] + ) - self.assertIn("tags differ", str(raised.exception).lower()) + assert "tags differ" in str(raised.exception).lower() def test_prepare_stack_for_update_recreate(self) -> None: """Test prepare stack for update recreate.""" @@ -771,7 +741,7 @@ def test_prepare_stack_for_update_recreate(self) -> None: self.provider.recreate_failed = True with self.stubber: - self.assertFalse(self.provider.prepare_stack_for_update(stack, [])) + assert not self.provider.prepare_stack_for_update(stack, []) def test_noninteractive_changeset_update_no_stack_policy(self) -> None: """Test noninteractive changeset update no stack policy.""" @@ -825,7 +795,7 @@ def test_noninteractive_destroy_stack_termination_protected(self) -> None: """Test noninteractive_destroy_stack with termination protection.""" self.stubber.add_client_error("delete_stack") - with self.stubber, self.assertRaises(ClientError): + with self.stubber, pytest.raises(ClientError): self.provider.noninteractive_destroy_stack("fake-stack") self.stubber.assert_no_pending_responses() @@ -866,8 +836,8 @@ def test_get_stack_changes_update(self, mock_output_full_cs: MagicMock) -> None: expected_outputs = { "FakeOutput": "" } - self.assertEqual(self.provider.get_outputs(stack_name), expected_outputs) - self.assertEqual(result, expected_outputs) + assert self.provider.get_outputs(stack_name) == expected_outputs + assert result == expected_outputs @patch("runway.cfngin.providers.aws.default.output_full_changeset") def test_get_stack_changes_create(self, mock_output_full_cs: MagicMock) -> None: @@ -945,9 +915,8 @@ def test_tail_stack_retry_on_missing_stack(self) -> None: try: self.provider.tail_stack(stack, threading.Event()) except ClientError as exc: - self.assertEqual( - exc.response.get("ResponseMetadata", {}).get("attempt"), - MAX_TAIL_RETRIES, + assert ( # noqa: PT017 + exc.response.get("ResponseMetadata", {}).get("attempt") == MAX_TAIL_RETRIES ) def test_tail_stack_retry_on_missing_stack_eventual_success(self) -> None: @@ -957,14 +926,13 @@ def test_tail_stack_retry_on_missing_stack_eventual_success(self) -> None: stack.fqn = f"my-namespace-{stack_name}" default.TAIL_RETRY_SLEEP = 0.01 - default.GET_EVENTS_SLEEP = 0.01 - received_events: List[Any] = [] + received_events: list[Any] = [] def mock_log_func(event: Any) -> None: received_events.append(event) - def valid_event_response(stack: Stack, event_id: str) -> Dict[str, Any]: + def valid_event_response(stack: Stack, event_id: str) -> dict[str, Any]: return { "StackEvents": [ { @@ -992,18 +960,15 @@ def valid_event_response(stack: Stack, event_id: str) -> Dict[str, Any]: self.stubber.add_response("describe_stack_events", valid_event_response(stack, "Event1")) - with self.stubber: - try: - self.provider.tail_stack(stack, threading.Event(), log_func=mock_log_func) - except UnStubbedResponseError: - # Eventually we run out of responses - could not happen in - # regular execution - # normally this would just be dealt with when the threads were - # shutdown, but doing so here is a little difficult because - # we can't control the `tail_stack` loop - pass + with self.stubber, suppress(UnStubbedResponseError): + # Eventually we run out of responses - could not happen in + # regular execution + # normally this would just be dealt with when the threads were + # shutdown, but doing so here is a little difficult because + # we can't control the `tail_stack` loop + self.provider.tail_stack(stack, threading.Event(), log_func=mock_log_func) - self.assertEqual(received_events[0]["EventId"], "Event1") + assert received_events[0]["EventId"] == "Event1" def test_update_termination_protection(self) -> None: """Test update_termination_protection.""" @@ -1061,7 +1026,7 @@ def test_interactive_destroy_stack(self, patched_input: MagicMock) -> None: self.stubber.add_response("delete_stack", {}, stack) with self.stubber: - self.assertIsNone(self.provider.interactive_destroy_stack(stack_name)) + assert self.provider.interactive_destroy_stack(stack_name) is None self.stubber.assert_no_pending_responses() @patch("runway.cfngin.providers.aws.default.Provider.update_termination_protection") @@ -1088,15 +1053,14 @@ def test_destroy_stack_canceled(self, patched_input: MagicMock) -> None: """Test destroy stack canceled.""" patched_input.return_value = "n" - with self.assertRaises(exceptions.CancelExecution): - stack = {"StackName": "MockStack"} - self.provider.destroy_stack(stack) # type: ignore + with pytest.raises(exceptions.CancelExecution): + self.provider.destroy_stack({"StackName": "MockStack"}) # type: ignore def test_successful_init(self) -> None: """Test successful init.""" replacements = True provider = Provider(self.session, interactive=True, replacements_only=replacements) - self.assertEqual(provider.replacements_only, replacements) + assert provider.replacements_only == replacements @patch("runway.cfngin.providers.aws.default.Provider.update_termination_protection") @patch("runway.cfngin.providers.aws.default.ask_for_approval") @@ -1175,7 +1139,7 @@ def test_select_destroy_method(self) -> None: [{"force_interactive": False}, self.provider.interactive_destroy_stack], [{"force_interactive": True}, self.provider.interactive_destroy_stack], ]: - self.assertEqual(self.provider.select_destroy_method(**i[0]), i[1]) # type: ignore + assert self.provider.select_destroy_method(**i[0]) == i[1] # type: ignore def test_select_update_method(self) -> None: """Test select update method.""" @@ -1197,7 +1161,7 @@ def test_select_update_method(self) -> None: self.provider.interactive_update_stack, ], ]: - self.assertEqual(self.provider.select_update_method(**i[0]), i[1]) # type: ignore + assert self.provider.select_update_method(**i[0]) == i[1] # type: ignore @patch("runway.cfngin.providers.aws.default.output_full_changeset") @patch("runway.cfngin.providers.aws.default.output_summary") diff --git a/tests/unit/cfngin/test_cfngin.py b/tests/unit/cfngin/test_cfngin.py index 3850fa66e..b5051dd7f 100644 --- a/tests/unit/cfngin/test_cfngin.py +++ b/tests/unit/cfngin/test_cfngin.py @@ -4,9 +4,9 @@ import shutil from typing import TYPE_CHECKING +from unittest.mock import Mock, call import pytest -from mock import Mock, call from yaml.constructor import ConstructorError from runway.cfngin.cfngin import CFNgin @@ -31,7 +31,7 @@ def copy_basic_fixtures(cfngin_fixtures: Path, tmp_path: Path) -> None: copy_fixture(src=cfngin_fixtures / "configs" / "basic.yml", dest=tmp_path / "basic.yml") -@pytest.fixture(scope="function") +@pytest.fixture() def patch_safehaven(mocker: MockerFixture) -> Mock: """Patch SafeHaven.""" mock_haven = mocker.patch("runway.cfngin.cfngin.SafeHaven") diff --git a/tests/unit/cfngin/test_dag.py b/tests/unit/cfngin/test_dag.py index 1bac39934..09b146854 100644 --- a/tests/unit/cfngin/test_dag.py +++ b/tests/unit/cfngin/test_dag.py @@ -2,7 +2,7 @@ # pyright: basic import threading -from typing import Any, List +from typing import Any import pytest @@ -65,7 +65,7 @@ def test_walk(empty_dag: DAG) -> None: # b and c should be executed at the same time. dag.from_dict({"a": ["b", "c"], "b": ["d"], "c": ["d"], "d": []}) - nodes: List[Any] = [] + nodes: list[Any] = [] def walk_func(node: Any) -> bool: nodes.append(node) @@ -204,7 +204,7 @@ def test_threaded_walker(empty_dag: DAG) -> None: dag.from_dict({"a": ["b", "c"], "b": ["d"], "c": ["d"], "d": []}) lock = threading.Lock() # Protects nodes from concurrent access - nodes: List[Any] = [] + nodes: list[Any] = [] def walk_func(node: Any) -> bool: with lock: diff --git a/tests/unit/cfngin/test_environment.py b/tests/unit/cfngin/test_environment.py index a57ad3ef1..9ad4c5c1f 100644 --- a/tests/unit/cfngin/test_environment.py +++ b/tests/unit/cfngin/test_environment.py @@ -1,7 +1,7 @@ """Tests for runway.cfngin.environment.""" -# pyright: basic, reportUnnecessaryIsInstance=none -import unittest +# pyright: reportUnnecessaryIsInstance=none +import pytest from runway.cfngin.environment import parse_environment @@ -27,27 +27,27 @@ """ -class TestEnvironment(unittest.TestCase): +class TestEnvironment: """Tests for runway.cfngin.environment.""" def test_simple_key_value_parsing(self) -> None: """Test simple key value parsing.""" parsed_env = parse_environment(TEST_ENV) - self.assertTrue(isinstance(parsed_env, dict)) - self.assertEqual(parsed_env["key1"], "value1") - self.assertEqual(parsed_env["key2"], "value2") - self.assertEqual(parsed_env["key3"], "some:complex::value") - self.assertEqual(parsed_env["key4"], ":otherValue:") - self.assertEqual(parsed_env["key5"], "@value") - self.assertEqual(len(parsed_env), 5) + assert isinstance(parsed_env, dict) + assert parsed_env["key1"] == "value1" + assert parsed_env["key2"] == "value2" + assert parsed_env["key3"] == "some:complex::value" + assert parsed_env["key4"] == ":otherValue:" + assert parsed_env["key5"] == "@value" + assert len(parsed_env) == 5 def test_simple_key_value_parsing_exception(self) -> None: """Test simple key value parsing exception.""" - with self.assertRaises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 parse_environment(TEST_ERROR_ENV) def test_blank_value(self) -> None: """Test blank value.""" env = """key1:""" parsed = parse_environment(env) - self.assertEqual(parsed["key1"], "") + assert not parsed["key1"] diff --git a/tests/unit/cfngin/test_exceptions.py b/tests/unit/cfngin/test_exceptions.py index 2c6498612..6a79f25c4 100644 --- a/tests/unit/cfngin/test_exceptions.py +++ b/tests/unit/cfngin/test_exceptions.py @@ -3,7 +3,7 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, Optional, Union import pytest @@ -51,7 +51,7 @@ class TestInvalidConfig: [("error", "error"), (["error0", "error1"], "error0\nerror1")], ) def test___init__( - self, errors: Union[str, List[Union[Exception, str]]], expected_msg: str + self, errors: Union[str, list[Union[Exception, str]]], expected_msg: str ) -> None: """Test __init__.""" obj = InvalidConfig(errors) diff --git a/tests/unit/cfngin/test_plan.py b/tests/unit/cfngin/test_plan.py index 4895d668f..78620fe6d 100644 --- a/tests/unit/cfngin/test_plan.py +++ b/tests/unit/cfngin/test_plan.py @@ -1,16 +1,17 @@ """Tests for runway.cfngin.plan.""" -# pyright: basic +# ruff: noqa: SLF001 from __future__ import annotations import json -import os import shutil import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from pathlib import Path +from typing import TYPE_CHECKING, Any +from unittest import mock -import mock +import pytest from runway.cfngin.dag import walk from runway.cfngin.exceptions import ( @@ -49,23 +50,23 @@ def setUp(self) -> None: def test_status(self) -> None: """Test status.""" - self.assertFalse(self.step.submitted) - self.assertFalse(self.step.completed) + assert not self.step.submitted + assert not self.step.completed self.step.submit() - self.assertEqual(self.step.status, SUBMITTED) - self.assertTrue(self.step.submitted) - self.assertFalse(self.step.completed) + assert self.step.status == SUBMITTED + assert self.step.submitted + assert not self.step.completed self.step.complete() - self.assertEqual(self.step.status, COMPLETE) - self.assertNotEqual(self.step.status, SUBMITTED) - self.assertTrue(self.step.submitted) - self.assertTrue(self.step.completed) + assert self.step.status == COMPLETE + assert self.step.status != SUBMITTED + assert self.step.submitted + assert self.step.completed - self.assertNotEqual(self.step.status, True) - self.assertNotEqual(self.step.status, False) - self.assertNotEqual(self.step.status, "banana") + assert self.step.status is not True + assert self.step.status is not False + assert self.step.status != "banana" def test_from_stack_name(self) -> None: """Return step from step name.""" @@ -73,21 +74,21 @@ def test_from_stack_name(self) -> None: stack_name = "test-stack" result = Step.from_stack_name(stack_name, context) - self.assertIsInstance(result, Step) - self.assertEqual(stack_name, result.stack.name) + assert isinstance(result, Step) + assert stack_name == result.stack.name def test_from_persistent_graph(self) -> None: """Return list of steps from graph dict.""" context = mock_context() - graph_dict: Dict[str, Any] = {"stack1": [], "stack2": ["stack1"]} + graph_dict: dict[str, Any] = {"stack1": [], "stack2": ["stack1"]} result = Step.from_persistent_graph(graph_dict, context) - self.assertEqual(2, len(result)) - self.assertIsInstance(result, list) + assert len(result) == 2 + assert isinstance(result, list) for step in result: - self.assertIsInstance(step, Step) - self.assertIn(step.stack.name, graph_dict.keys()) + assert isinstance(step, Step) + assert step.stack.name in graph_dict class TestGraph(unittest.TestCase): @@ -96,7 +97,7 @@ class TestGraph(unittest.TestCase): def setUp(self) -> None: """Run before tests.""" self.context = mock_context() - self.graph_dict: Dict[str, Any] = {"stack1": [], "stack2": ["stack1"]} + self.graph_dict: dict[str, Any] = {"stack1": [], "stack2": ["stack1"]} self.graph_dict_expected = {"stack1": set(), "stack2": {"stack1"}} self.steps = Step.from_persistent_graph(self.graph_dict, self.context) @@ -105,9 +106,9 @@ def test_add_steps(self) -> None: graph = Graph() graph.add_steps(self.steps) - self.assertEqual(self.steps, list(graph.steps.values())) - self.assertEqual([step.name for step in self.steps], list(graph.steps.keys())) - self.assertEqual(self.graph_dict_expected, graph.to_dict()) + assert self.steps == list(graph.steps.values()) + assert [step.name for step in self.steps] == list(graph.steps.keys()) + assert self.graph_dict_expected == graph.to_dict() def test_pop(self) -> None: """Test pop.""" @@ -116,31 +117,31 @@ def test_pop(self) -> None: stack2 = next(step for step in self.steps if step.name == "stack2") - self.assertEqual(stack2, graph.pop(stack2)) - self.assertEqual({"stack1": set()}, graph.to_dict()) + assert stack2 == graph.pop(stack2) + assert graph.to_dict() == {"stack1": set()} def test_dumps(self) -> None: """Test dumps.""" graph = Graph() graph.add_steps(self.steps) - self.assertEqual(json.dumps(self.graph_dict), graph.dumps()) + assert json.dumps(self.graph_dict) == graph.dumps() def test_from_dict(self) -> None: """Test from dict.""" graph = Graph.from_dict(self.graph_dict, self.context) - self.assertIsInstance(graph, Graph) - self.assertEqual([step.name for step in self.steps], list(graph.steps.keys())) - self.assertEqual(self.graph_dict_expected, graph.to_dict()) + assert isinstance(graph, Graph) + assert [step.name for step in self.steps] == list(graph.steps.keys()) + assert self.graph_dict_expected == graph.to_dict() def test_from_steps(self) -> None: """Test from steps.""" graph = Graph.from_steps(self.steps) - self.assertEqual(self.steps, list(graph.steps.values())) - self.assertEqual([step.name for step in self.steps], list(graph.steps.keys())) - self.assertEqual(self.graph_dict_expected, graph.to_dict()) + assert self.steps == list(graph.steps.values()) + assert [step.name for step in self.steps] == list(graph.steps.keys()) + assert self.graph_dict_expected == graph.to_dict() class TestPlan(unittest.TestCase): @@ -156,7 +157,7 @@ class FakeLookup(LookupHandler): """False Lookup.""" @classmethod - def handle(cls, value: str, *__args: Any, **__kwargs: Any) -> str: # type: ignore + def handle(cls, _value: str, *__args: Any, **__kwargs: Any) -> str: # type: ignore """Perform the lookup.""" return "test" @@ -177,7 +178,7 @@ def test_plan(self) -> None: graph = Graph.from_steps([Step(vpc, fn=None), Step(bastion, fn=None)]) plan = Plan(description="Test", graph=graph) - self.assertEqual(plan.graph.to_dict(), {"bastion-1": {"vpc-1"}, "vpc-1": set()}) + assert plan.graph.to_dict() == {"bastion-1": {"vpc-1"}, "vpc-1": set()} def test_plan_reverse(self) -> None: """Test plan reverse.""" @@ -191,8 +192,8 @@ def test_plan_reverse(self) -> None: # order is different between python2/3 so can't compare dicts result_graph_dict = plan.graph.to_dict() - self.assertEqual(set(), result_graph_dict.get("bastion-1")) - self.assertEqual({"bastion-1"}, result_graph_dict.get("vpc-1")) + assert set() == result_graph_dict.get("bastion-1") + assert {"bastion-1"} == result_graph_dict.get("vpc-1") def test_plan_targeted(self) -> None: """Test plan targeted.""" @@ -206,7 +207,7 @@ def test_plan_targeted(self) -> None: graph = Graph.from_steps([Step(vpc, fn=None), Step(bastion, fn=None)]) plan = Plan(description="Test", graph=graph, context=context) - self.assertEqual({vpc.name: set()}, plan.graph.to_dict()) + assert plan.graph.to_dict() == {vpc.name: set()} def test_execute_plan(self) -> None: """Test execute plan.""" @@ -220,13 +221,13 @@ def test_execute_plan(self) -> None: removed = Stack(definition=generate_definition("removed", 1, requires=[]), context=context) context._persistent_graph = Graph.from_steps([Step(removed)]) - calls: List[str] = [] + calls: list[str] = [] - def _launch_stack(stack: Stack, status: Optional[Status] = None) -> Status: + def _launch_stack(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) return COMPLETE - def _destroy_stack(stack: Stack, status: Optional[Status] = None) -> Status: + def _destroy_stack(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) return COMPLETE @@ -242,17 +243,17 @@ def _destroy_stack(stack: Stack, status: Optional[Status] = None) -> Status: plan.execute(walk) # the order these are appended changes between python2/3 - self.assertIn("namespace-vpc-1", calls) - self.assertIn("namespace-bastion-1", calls) - self.assertIn("namespace-removed-1", calls) + assert "namespace-vpc-1" in calls + assert "namespace-bastion-1" in calls + assert "namespace-removed-1" in calls context.put_persistent_graph.assert_called() # order is different between python2/3 so can't compare dicts result_graph_dict = context.persistent_graph.to_dict() # type: ignore - self.assertEqual(2, len(result_graph_dict)) - self.assertEqual(set(), result_graph_dict.get("vpc-1")) - self.assertEqual({"vpc-1"}, result_graph_dict.get("bastion-1")) - self.assertIsNone(result_graph_dict.get("namespace-removed-1")) + assert len(result_graph_dict) == 2 + assert set() == result_graph_dict.get("vpc-1") + assert {"vpc-1"} == result_graph_dict.get("bastion-1") + assert result_graph_dict.get("namespace-removed-1") is None def test_execute_plan_no_persist(self) -> None: """Test execute plan with no persistent graph.""" @@ -264,9 +265,9 @@ def test_execute_plan_no_persist(self) -> None: context=context, ) - calls: List[str] = [] + calls: list[str] = [] - def _launch_stack(stack: Stack, status: Optional[Status] = None) -> Status: + def _launch_stack(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) return COMPLETE @@ -275,7 +276,7 @@ def _launch_stack(stack: Stack, status: Optional[Status] = None) -> Status: plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1", "namespace-bastion-1"]) + assert calls == ["namespace-vpc-1", "namespace-bastion-1"] context.put_persistent_graph.assert_not_called() def test_execute_plan_locked(self) -> None: @@ -287,14 +288,13 @@ def test_execute_plan_locked(self) -> None: """ vpc = Stack(definition=generate_definition("vpc", 1), context=self.context) bastion = Stack( - definition=generate_definition("bastion", 1, requires=[vpc.name]), - locked=True, + definition=generate_definition("bastion", 1, locked=True, requires=[vpc.name]), context=self.context, ) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) return COMPLETE @@ -302,7 +302,7 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: plan = Plan(description="Test", graph=graph) plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1", "namespace-bastion-1"]) + assert calls == ["namespace-vpc-1", "namespace-bastion-1"] def test_execute_plan_filtered(self) -> None: """Test execute plan filtered.""" @@ -316,9 +316,9 @@ def test_execute_plan_filtered(self) -> None: context=self.context, ) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) return COMPLETE @@ -329,7 +329,7 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: plan = Plan(context=context, description="Test", graph=graph) plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1", "namespace-db-1"]) + assert calls == ["namespace-vpc-1", "namespace-db-1"] def test_execute_plan_exception(self) -> None: """Test execute plan exception.""" @@ -339,9 +339,9 @@ def test_execute_plan_exception(self) -> None: context=self.context, ) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) if stack.name == vpc_step.name: raise ValueError("Boom") @@ -353,11 +353,11 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: graph = Graph.from_steps([vpc_step, bastion_step]) plan = Plan(description="Test", graph=graph) - with self.assertRaises(PlanFailed): + with pytest.raises(PlanFailed): plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1"]) - self.assertEqual(vpc_step.status, FAILED) + assert calls == ["namespace-vpc-1"] + assert vpc_step.status == FAILED def test_execute_plan_skipped(self) -> None: """Test execute plan skipped.""" @@ -367,9 +367,9 @@ def test_execute_plan_skipped(self) -> None: context=self.context, ) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) if stack.fqn == vpc_step.name: return SKIPPED @@ -382,7 +382,7 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: plan = Plan(description="Test", graph=graph) plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1", "namespace-bastion-1"]) + assert calls == ["namespace-vpc-1", "namespace-bastion-1"] def test_execute_plan_failed(self) -> None: """Test execute plan failed.""" @@ -393,9 +393,9 @@ def test_execute_plan_failed(self) -> None: ) db = Stack(definition=generate_definition("db", 1), context=self.context) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) if stack.name == vpc_step.name: return FAILED @@ -407,12 +407,12 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: graph = Graph.from_steps([vpc_step, bastion_step, db_step]) plan = Plan(description="Test", graph=graph) - with self.assertRaises(PlanFailed): + with pytest.raises(PlanFailed): plan.execute(walk) calls.sort() - self.assertEqual(calls, ["namespace-db-1", "namespace-vpc-1"]) + assert calls == ["namespace-db-1", "namespace-vpc-1"] def test_execute_plan_cancelled(self) -> None: """Test execute plan cancelled.""" @@ -422,9 +422,9 @@ def test_execute_plan_cancelled(self) -> None: context=self.context, ) - calls: List[str] = [] + calls: list[str] = [] - def fn(stack: Stack, status: Optional[Status] = None) -> Status: + def fn(stack: Stack, status: Status | None = None) -> Status: # noqa: ARG001 calls.append(stack.fqn) if stack.fqn == vpc_step.name: raise CancelExecution @@ -437,7 +437,7 @@ def fn(stack: Stack, status: Optional[Status] = None) -> Status: plan = Plan(description="Test", graph=graph) plan.execute(walk) - self.assertEqual(calls, ["namespace-vpc-1", "namespace-bastion-1"]) + assert calls == ["namespace-vpc-1", "namespace-bastion-1"] def test_execute_plan_graph_locked(self) -> None: """Test execute plan with locked persistent graph.""" @@ -445,7 +445,7 @@ def test_execute_plan_graph_locked(self) -> None: context._persistent_graph = Graph.from_dict({"stack1": []}, context) context._persistent_graph_lock_code = "1111" plan = Plan(description="Test", graph=Graph(), context=context) - with self.assertRaises(PersistentGraphLocked): + with pytest.raises(PersistentGraphLocked): plan.execute() def test_build_graph_missing_dependency(self) -> None: @@ -455,12 +455,12 @@ def test_build_graph_missing_dependency(self) -> None: context=self.context, ) - with self.assertRaises(GraphError) as expected: + with pytest.raises(GraphError) as expected: Graph.from_steps([Step(bastion)]) message_starts = "Error detected when adding 'vpc-1' as a dependency of 'bastion-1':" message_contains = "dependent node vpc-1 does not exist" - self.assertTrue(str(expected.exception).startswith(message_starts)) - self.assertTrue(message_contains in str(expected.exception)) + assert str(expected.value).startswith(message_starts) + assert message_contains in str(expected.value) def test_build_graph_cyclic_dependencies(self) -> None: """Test build graph cyclic dependencies.""" @@ -474,19 +474,19 @@ def test_build_graph_cyclic_dependencies(self) -> None: context=self.context, ) - with self.assertRaises(GraphError) as expected: + with pytest.raises(GraphError) as expected: Graph.from_steps([Step(vpc), Step(db), Step(app)]) message = ( "Error detected when adding 'db-1' " "as a dependency of 'app-1': graph is " "not acyclic" ) - self.assertEqual(str(expected.exception), message) + assert str(expected.value) == message def test_dump(self) -> None: """Test dump.""" - requires: List[str] = [] - steps: List[Step] = [] + requires: list[str] = [] + steps: list[Step] = [] for i in range(5): overrides = { @@ -515,9 +515,6 @@ def test_dump(self) -> None: plan.dump(directory=tmp_dir, context=self.context) for step in plan.steps: - template_path = os.path.join( - tmp_dir, stack_template_key_name(step.stack.blueprint) # type: ignore - ) - self.assertTrue(os.path.isfile(template_path)) + assert (Path(tmp_dir) / stack_template_key_name(step.stack.blueprint)).is_file() finally: shutil.rmtree(tmp_dir) diff --git a/tests/unit/cfngin/test_stack.py b/tests/unit/cfngin/test_stack.py index fbc0a0801..56ce53990 100644 --- a/tests/unit/cfngin/test_stack.py +++ b/tests/unit/cfngin/test_stack.py @@ -1,115 +1,261 @@ """Tests for runway.cfngin.stack.""" -# pyright: basic -import unittest -from typing import Any +from __future__ import annotations -from mock import MagicMock +from typing import TYPE_CHECKING, Any, ClassVar +from unittest.mock import Mock + +import pytest from runway.cfngin.lookups.registry import ( register_lookup_handler, unregister_lookup_handler, ) from runway.cfngin.stack import Stack -from runway.config import CfnginConfig -from runway.context import CfnginContext +from runway.config import CfnginStackDefinitionModel from runway.lookups.handlers.base import LookupHandler -from .factories import generate_definition +if TYPE_CHECKING: + from collections.abc import Iterator + from pathlib import Path + from pytest_mock import MockerFixture -class TestStack(unittest.TestCase): - """Tests for runway.cfngin.stack.Stack.""" + from ..factories import MockCfnginContext - def setUp(self) -> None: - """Run before tests.""" - self.sd = {"name": "test"} - self.config = CfnginConfig.parse_obj({"namespace": "namespace"}) - self.context = CfnginContext(config=self.config) - self.stack = Stack(definition=generate_definition("vpc", 1), context=self.context) +MODULE = "runway.cfngin.stack" - class FakeLookup(LookupHandler): - """False Lookup.""" - @classmethod - def handle(cls, value: str, *__args: Any, **__kwargs: Any) -> str: # type: ignore - """Perform the lookup.""" - return "test" +@pytest.fixture(autouse=True, scope="module") +def fake_lookup() -> Iterator[None]: + """Register a fake lookup handler for testing.""" - register_lookup_handler("noop", FakeLookup) + class FakeLookup(LookupHandler): + """False Lookup.""" - def tearDown(self) -> None: - """Run after tests.""" - unregister_lookup_handler("noop") - return super().tearDown() + TYPE_NAME: ClassVar[str] = "fake" - def test_stack_requires(self) -> None: - """Test stack requires.""" - definition = generate_definition( - base_name="vpc", - stack_id=1, - variables={ - "Var1": "${noop fakeStack3::FakeOutput}", - "Var2": ( - "some.template.value:${output fakeStack2.FakeOutput}:" - "${output fakeStack.FakeOutput}" - ), - "Var3": "${output fakeStack.FakeOutput}," "${output fakeStack2.FakeOutput}", - }, - requires=["fakeStack"], + @classmethod + def handle(cls, value: str, *__args: Any, **__kwargs: Any) -> str: # type: ignore # noqa: ARG003 + """Perform the lookup.""" + return "test" + + register_lookup_handler(FakeLookup.TYPE_NAME, FakeLookup) + yield + unregister_lookup_handler(FakeLookup.TYPE_NAME) + + +def generate_stack_definition( + base_name: str, stack_id: Any = None, **overrides: Any +) -> CfnginStackDefinitionModel: + """Generate stack definition.""" + definition: dict[str, Any] = { + "name": f"{base_name}-{stack_id}" if stack_id else base_name, + "class_path": f"tests.unit.cfngin.fixtures.mock_blueprints.{base_name.upper()}", + "requires": [], + } + definition.update(overrides) + return CfnginStackDefinitionModel(**definition) + + +class TestStack: + """Test Stack.""" + + @pytest.mark.parametrize( + "enabled, expected", + [ + (False, False), + (True, True), + ("${default !::true}", True), + ("${default !::false}", False), + ], + ) + def test_enabled( + self, cfngin_context: MockCfnginContext, enabled: bool | str, expected: str + ) -> None: + """Test enabled.""" + assert ( + Stack( + definition=generate_stack_definition(base_name="vpc", enabled=enabled), + context=cfngin_context, + ).enabled + is expected + ) + + @pytest.mark.parametrize( + "locked, expected", + [ + (False, False), + (True, True), + ("${default !::true}", True), + ("${default !::false}", False), + ], + ) + def test_locked( + self, cfngin_context: MockCfnginContext, expected: str, locked: bool | str + ) -> None: + """Test locked.""" + assert ( + Stack( + definition=generate_stack_definition(base_name="vpc", locked=locked), + context=cfngin_context, + ).locked + is expected + ) + + @pytest.mark.parametrize( + "protected, expected", + [ + (False, False), + (True, True), + ("${default !::true}", True), + ("${default !::false}", False), + ], + ) + def test_protected( + self, cfngin_context: MockCfnginContext, expected: str, protected: bool | str + ) -> None: + """Test protected.""" + assert ( + Stack( + definition=generate_stack_definition(base_name="vpc", protected=protected), + context=cfngin_context, + ).protected + is expected + ) + + def test_required_by(self, cfngin_context: MockCfnginContext) -> None: + """Test required_by.""" + stack = Stack( + definition=generate_stack_definition( + base_name="vpc", + required_by=["fakeStack0"], + variables={"Param1": "${output fakeStack.FakeOutput}"}, + ), + context=cfngin_context, + ) + assert stack.required_by == {"fakeStack0"} + + def test_requires(self, cfngin_context: MockCfnginContext) -> None: + """Test requires.""" + stack = Stack( + definition=generate_stack_definition( + base_name="vpc", + variables={ + "Var1": "${fake fakeStack2::FakeOutput}", + "Var2": ( + "some.template.value:${output fakeStack1.FakeOutput}:" + "${output fakeStack0.FakeOutput}" + ), + "Var3": "${output fakeStack0.FakeOutput},${output fakeStack1.FakeOutput}", + }, + requires=["fakeStack0"], + ), + context=cfngin_context, + ) + assert len(stack.requires) == 2 + assert "fakeStack0" in stack.requires + assert "fakeStack1" in stack.requires + + def test_requires_cyclic_dependency(self, cfngin_context: MockCfnginContext) -> None: + """Test requires cyclic dependency.""" + stack = Stack( + definition=generate_stack_definition( + base_name="vpc", + variables={"Var1": "${output vpc.FakeOutput}"}, + ), + context=cfngin_context, + ) + with pytest.raises(ValueError, match="has a circular reference"): + assert stack.requires + + def test_resolve(self, cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: + """Test resolve.""" + mock_resolve_variables = mocker.patch(f"{MODULE}.resolve_variables") + mock_provider = Mock() + stack = Stack( + definition=generate_stack_definition(base_name="vpc"), + context=cfngin_context, + ) + stack._blueprint = Mock() + assert not stack.resolve(cfngin_context, mock_provider) + mock_resolve_variables.assert_called_once_with( + stack.variables, cfngin_context, mock_provider ) - stack = Stack(definition=definition, context=self.context) - self.assertEqual(len(stack.requires), 2) - self.assertIn("fakeStack", stack.requires) - self.assertIn("fakeStack2", stack.requires) - - def test_stack_requires_circular_ref(self) -> None: - """Test stack requires circular ref.""" - definition = generate_definition( - base_name="vpc", - stack_id=1, - variables={"Var1": "${output vpc-1.FakeOutput}"}, + stack._blueprint.resolve_variables.assert_called_once_with(stack.variables) + + def test_set_outputs(self, cfngin_context: MockCfnginContext) -> None: + """Test set_outputs.""" + stack = Stack( + definition=generate_stack_definition(base_name="vpc"), + context=cfngin_context, ) - stack = Stack(definition=definition, context=self.context) - with self.assertRaises(ValueError): - stack.requires - - def test_stack_cfn_parameters(self) -> None: - """Test stack cfn parameters.""" - definition = generate_definition( - base_name="vpc", - stack_id=1, - variables={"Param1": "${output fakeStack.FakeOutput}"}, + assert not stack.outputs + outputs = {"foo": "bar"} + assert not stack.set_outputs(outputs) + assert stack.outputs == outputs + + def test_stack_policy(self, cfngin_context: MockCfnginContext, tmp_path: Path) -> None: + """Test stack_policy.""" + stack_policy_path = tmp_path / "stack_policy.json" + stack_policy_path.write_text("success") + assert ( + Stack( + definition=generate_stack_definition( + base_name="vpc", stack_policy_path=stack_policy_path + ), + context=cfngin_context, + ).stack_policy + == "success" + ) + + def test_stack_policy_not_provided(self, cfngin_context: MockCfnginContext) -> None: + """Test stack_policy.""" + assert not Stack( + definition=generate_stack_definition(base_name="vpc"), + context=cfngin_context, + ).stack_policy + + def test_tags(self, cfngin_context: MockCfnginContext) -> None: + """Test tags.""" + cfngin_context.config.tags = {"environment": "prod"} + assert Stack( + definition=generate_stack_definition( + base_name="vpc", tags={"app": "graph", "environment": "stage"} + ), + context=cfngin_context, + ).tags == {"app": "graph", "environment": "stage"} + + def test_tags_default(self, cfngin_context: MockCfnginContext) -> None: + """Test tags.""" + cfngin_context.config.tags = {"environment": "prod"} + assert Stack( + definition=generate_stack_definition(base_name="vpc"), + context=cfngin_context, + ).tags == {"environment": "prod"} + + @pytest.mark.parametrize( + "termination_protection, expected", + [ + (False, False), + (True, True), + ("${default !::true}", True), + ("${default !::false}", False), + ], + ) + def test_termination_protection( + self, + cfngin_context: MockCfnginContext, + expected: str, + termination_protection: bool | str, + ) -> None: + """Test termination_protection.""" + assert ( + Stack( + definition=generate_stack_definition( + base_name="vpc", termination_protection=termination_protection + ), + context=cfngin_context, + ).termination_protection + is expected ) - stack = Stack(definition=definition, context=self.context) - stack._blueprint = MagicMock() - stack._blueprint.parameter_values = { - "Param2": "Some Resolved Value", - } - param = stack.parameter_values["Param2"] - self.assertEqual(param, "Some Resolved Value") - - def test_stack_tags_default(self) -> None: - """Test stack tags default.""" - self.config.tags = {"environment": "prod"} - definition = generate_definition(base_name="vpc", stack_id=1) - stack = Stack(definition=definition, context=self.context) - self.assertEqual(stack.tags, {"environment": "prod"}) - - def test_stack_tags_override(self) -> None: - """Test stack tags override.""" - self.config.tags = {"environment": "prod"} - definition = generate_definition(base_name="vpc", stack_id=1, tags={"environment": "stage"}) - stack = Stack(definition=definition, context=self.context) - self.assertEqual(stack.tags, {"environment": "stage"}) - - def test_stack_tags_extra(self) -> None: - """Test stack tags extra.""" - self.config.tags = {"environment": "prod"} - definition = generate_definition(base_name="vpc", stack_id=1, tags={"app": "graph"}) - stack = Stack(definition=definition, context=self.context) - self.assertEqual(stack.tags, {"environment": "prod", "app": "graph"}) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/unit/cfngin/test_tokenize_userdata.py b/tests/unit/cfngin/test_tokenize_userdata.py index 125bc83d9..ec08288cd 100644 --- a/tests/unit/cfngin/test_tokenize_userdata.py +++ b/tests/unit/cfngin/test_tokenize_userdata.py @@ -16,8 +16,8 @@ def test_tokenize(self) -> None: user_data = ["field0", 'Ref("SshKey")', "field1", 'Fn::GetAtt("Blah", "Woot")'] user_data_dump = yaml.dump(user_data) parts = cf_tokenize(user_data_dump) - self.assertIsInstance(parts[1], dict) - self.assertIsInstance(parts[3], dict) - self.assertEqual(parts[1]["Ref"], "SshKey") # type: ignore - self.assertEqual(parts[3]["Fn::GetAtt"], ["Blah", "Woot"]) # type: ignore - self.assertEqual(len(parts), 5) + assert isinstance(parts[1], dict) + assert isinstance(parts[3], dict) + assert parts[1]["Ref"] == "SshKey" # type: ignore + assert parts[3]["Fn::GetAtt"] == ["Blah", "Woot"] # type: ignore + assert len(parts) == 5 diff --git a/tests/unit/cfngin/test_utils.py b/tests/unit/cfngin/test_utils.py index 455de2a78..df9d95c1e 100644 --- a/tests/unit/cfngin/test_utils.py +++ b/tests/unit/cfngin/test_utils.py @@ -10,10 +10,10 @@ import tempfile import unittest from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, cast +from typing import TYPE_CHECKING, Any, cast +from unittest import mock import boto3 -import mock import pytest from botocore.exceptions import ClientError from botocore.stub import Stubber @@ -40,7 +40,6 @@ from runway.config.models.cfngin import GitCfnginPackageSourceDefinitionModel if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture AWS_REGIONS = [ @@ -59,7 +58,7 @@ MODULE = "runway.cfngin.utils" -def mock_create_cache_directories(self: Any, **kwargs: Any) -> int: +def mock_create_cache_directories(self: Any, **kwargs: Any) -> int: # noqa: ARG001 """Mock create cache directories. Don't actually need the directories created in testing @@ -78,7 +77,7 @@ def test_ensure_s3_bucket() -> None: stubber.assert_no_pending_responses() -def test_ensure_s3_bucket_forbidden(caplog: LogCaptureFixture) -> None: +def test_ensure_s3_bucket_forbidden(caplog: pytest.LogCaptureFixture) -> None: """Test ensure_s3_bucket.""" caplog.set_level(logging.ERROR, logger=MODULE) s3_client = boto3.client("s3") @@ -143,7 +142,7 @@ def test_ensure_s3_bucket_not_found_persist_graph() -> None: stubber.assert_no_pending_responses() -def test_ensure_s3_bucket_persist_graph(caplog: LogCaptureFixture) -> None: +def test_ensure_s3_bucket_persist_graph(caplog: pytest.LogCaptureFixture) -> None: """Test ensure_s3_bucket.""" caplog.set_level(logging.WARNING, logger=MODULE) s3_client = boto3.client("s3") @@ -156,7 +155,7 @@ def test_ensure_s3_bucket_persist_graph(caplog: LogCaptureFixture) -> None: assert not caplog.messages -def test_ensure_s3_bucket_persist_graph_mfa_delete(caplog: LogCaptureFixture) -> None: +def test_ensure_s3_bucket_persist_graph_mfa_delete(caplog: pytest.LogCaptureFixture) -> None: """Test ensure_s3_bucket.""" caplog.set_level(logging.WARNING, logger=MODULE) s3_client = boto3.client("s3") @@ -180,7 +179,7 @@ def test_ensure_s3_bucket_persist_graph_mfa_delete(caplog: LogCaptureFixture) -> "versioning_response", [{"Status": "Disabled"}, {"Status": "Suspended"}, {}] ) def test_ensure_s3_bucket_persist_graph_versioning_not_enabled( - caplog: LogCaptureFixture, versioning_response: Dict[str, Any] + caplog: pytest.LogCaptureFixture, versioning_response: dict[str, Any] ) -> None: """Test ensure_s3_bucket.""" caplog.set_level(logging.WARNING, logger=MODULE) @@ -196,7 +195,7 @@ def test_ensure_s3_bucket_persist_graph_versioning_not_enabled( ) -def test_ensure_s3_bucket_raise_client_error(caplog: LogCaptureFixture) -> None: +def test_ensure_s3_bucket_raise_client_error(caplog: pytest.LogCaptureFixture) -> None: """Test ensure_s3_bucket.""" caplog.set_level(logging.ERROR, logger=MODULE) s3_client = boto3.client("s3") @@ -217,13 +216,13 @@ def test_read_value_from_path_abs(tmp_path: Path) -> None: def test_read_value_from_path_dir(tmp_path: Path) -> None: """Test read_value_from_path directory.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 read_value_from_path(f"file://{tmp_path.absolute()}") def test_read_value_from_path_not_exist(tmp_path: Path) -> None: """Test read_value_from_path does not exist.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 read_value_from_path(f"file://{(tmp_path / 'something.txt').absolute()}") @@ -271,7 +270,7 @@ def setUp(self) -> None: # Create a tar file using the temporary directory with tarfile.open(self.tmp_path / self.tar_file, "w") as tar: - tar.add(self.tmp_path, arcname=os.path.basename(self.tmp_path)) + tar.add(self.tmp_path, arcname=os.path.basename(self.tmp_path)) # noqa: PTH119 def tearDown(self) -> None: """Tear down test case.""" @@ -281,7 +280,7 @@ def test_cf_safe_name(self) -> None: """Test cf safe name.""" tests = (("abc-def", "AbcDef"), ("GhI", "GhI"), ("jKlm.noP", "JKlmNoP")) for test in tests: - self.assertEqual(cf_safe_name(test[0]), test[1]) + assert cf_safe_name(test[0]) == test[1] def test_camel_to_snake(self) -> None: """Test camel to snake.""" @@ -292,7 +291,7 @@ def test_camel_to_snake(self) -> None: ("testtemplate", "testtemplate"), ) for test in tests: - self.assertEqual(camel_to_snake(test[0]), test[1]) + assert camel_to_snake(test[0]) == test[1] def test_yaml_to_ordered_dict(self) -> None: """Test yaml to ordered dict.""" @@ -304,27 +303,27 @@ def test_yaml_to_ordered_dict(self) -> None: path: foo1.bar1 """ config = yaml_to_ordered_dict(raw_config) - self.assertEqual(list(config["pre_deploy"].keys())[0], "hook2") - self.assertEqual(config["pre_deploy"]["hook2"]["path"], "foo.bar") + assert next(iter(config["pre_deploy"].keys())) == "hook2" + assert config["pre_deploy"]["hook2"]["path"] == "foo.bar" def test_get_client_region(self) -> None: """Test get client region.""" regions = ["us-east-1", "us-west-1", "eu-west-1", "sa-east-1"] for region in regions: client = boto3.client("s3", region_name=region) - self.assertEqual(get_client_region(client), region) + assert get_client_region(client) == region def test_get_s3_endpoint(self) -> None: """Test get s3 endpoint.""" endpoint_url = "https://example.com" client = boto3.client("s3", region_name="us-east-1", endpoint_url=endpoint_url) - self.assertEqual(get_s3_endpoint(client), endpoint_url) + assert get_s3_endpoint(client) == endpoint_url def test_s3_bucket_location_constraint(self) -> None: """Test s3 bucket location constraint.""" tests = (("us-east-1", ""), ("us-west-1", "us-west-1")) for region, result in tests: - self.assertEqual(s3_bucket_location_constraint(region), result) + assert s3_bucket_location_constraint(region) == result def test_parse_cloudformation_template(self) -> None: """Test parse cloudformation template.""" @@ -362,52 +361,52 @@ def test_parse_cloudformation_template(self) -> None: } }, } - self.assertEqual(parse_cloudformation_template(template), parsed_template) + assert parse_cloudformation_template(template) == parsed_template - def test_is_within_directory(self): + def test_is_within_directory(self) -> None: """Test is within directory.""" directory = Path("my_directory") # Assert if the target is within the directory. target = "my_directory/sub_directory/file.txt" - self.assertTrue(is_within_directory(directory, target)) + assert is_within_directory(directory, target) # Assert if the target is NOT within the directory. target = "other_directory/file.txt" - self.assertFalse(is_within_directory(directory, target)) + assert not is_within_directory(directory, target) # Assert if the target is the directory. target = "my_directory" - self.assertTrue(is_within_directory(directory, target)) + assert is_within_directory(directory, target) - def test_safe_tar_extract_all_within(self): + def test_safe_tar_extract_all_within(self) -> None: """Test when all tar file contents are within the specified directory.""" path = self.tmp_path / "my_directory" with tarfile.open(self.tmp_path / self.tar_file, "r") as tar: - self.assertIsNone(safe_tar_extract(tar, path)) + assert safe_tar_extract(tar, path) is None - def test_safe_tar_extract_path_traversal(self): + def test_safe_tar_extract_path_traversal(self) -> None: """Test when a tar file tries to go outside the specified area.""" with tarfile.open(self.tmp_path / self.tar_file, "r") as tar: for member in tar.getmembers(): member.name = f"../{member.name}" path = self.tmp_path / "my_directory" - with self.assertRaises(Exception) as context: + with pytest.raises(Exception) as context: # noqa: PT011 safe_tar_extract(tar, path) - self.assertEqual(str(context.exception), "Attempted Path Traversal in Tar File") + assert str(context.exception) == "Attempted Path Traversal in Tar File" # type: ignore - def test_extractors(self): + def test_extractors(self) -> None: """Test extractors.""" - self.assertEqual(Extractor(Path("test.zip")).archive, Path("test.zip")) - self.assertEqual(TarExtractor().extension, ".tar") - self.assertEqual(TarGzipExtractor().extension, ".tar.gz") - self.assertEqual(ZipExtractor().extension, ".zip") + assert Extractor(Path("test.zip")).archive == Path("test.zip") + assert TarExtractor().extension == ".tar" + assert TarGzipExtractor().extension == ".tar.gz" + assert ZipExtractor().extension == ".zip" for i in [TarExtractor(), ZipExtractor(), ZipExtractor()]: i.set_archive(Path("/tmp/foo")) - self.assertEqual(i.archive.name.endswith(i.extension), True) # type: ignore + assert i.archive.name.endswith(i.extension) is True # type: ignore - def test_SourceProcessor_helpers(self): + def test_SourceProcessor_helpers(self) -> None: # noqa: N802 """Test SourceProcessor helpers.""" with mock.patch.object( SourceProcessor, @@ -416,37 +415,34 @@ def test_SourceProcessor_helpers(self): ): sp = SourceProcessor(cache_dir=self.tmp_path, sources={}) # type: ignore - self.assertEqual( - sp.sanitize_git_path("git@github.com:foo/bar.git"), - "git_github.com_foo_bar", + assert sp.sanitize_git_path("git@github.com:foo/bar.git") == "git_github.com_foo_bar" + assert ( + sp.sanitize_uri_path("http://example.com/foo/bar.gz@1") + == "http___example.com_foo_bar.gz_1" ) - self.assertEqual( - sp.sanitize_uri_path("http://example.com/foo/bar.gz@1"), - "http___example.com_foo_bar.gz_1", + assert ( + sp.sanitize_git_path("git@github.com:foo/bar.git", "v1") + == "git_github.com_foo_bar-v1" ) - self.assertEqual( - sp.sanitize_git_path("git@github.com:foo/bar.git", "v1"), - "git_github.com_foo_bar-v1", - ) - self.assertEqual( + assert ( sp.determine_git_ls_remote_ref( GitCfnginPackageSourceDefinitionModel(branch="foo", uri="test") - ), - "refs/heads/foo", + ) + == "refs/heads/foo" ) - for i in [cast(Dict[str, Any], {}), {"tag": "foo"}, {"commit": "1234"}]: - self.assertEqual( + for i in [cast(dict[str, Any], {}), {"tag": "foo"}, {"commit": "1234"}]: + assert ( sp.determine_git_ls_remote_ref( GitCfnginPackageSourceDefinitionModel(uri="git@foo", **i) - ), - "HEAD", + ) + == "HEAD" ) - self.assertEqual( + assert ( sp.git_ls_remote( "https://github.com/remind101/stacker.git", "refs/heads/release-1.0" - ), - "857b4834980e582874d70feef77bb064b60762d1", + ) + == "857b4834980e582874d70feef77bb064b60762d1" ) bad_configs = [ @@ -457,29 +453,28 @@ def test_SourceProcessor_helpers(self): {"uri": "x", "commit": "1234", "branch": "x"}, ] for i in bad_configs: - with self.assertRaises(ValidationError): + with pytest.raises(ValidationError): sp.determine_git_ref(GitCfnginPackageSourceDefinitionModel(**i)) - self.assertEqual( + assert ( sp.determine_git_ref( GitCfnginPackageSourceDefinitionModel( - uri="https://github.com/remind101/stacker.git", - branch="release-1.0", + uri="https://github.com/remind101/stacker.git", branch="release-1.0" ) - ), - "857b4834980e582874d70feef77bb064b60762d1", + ) + == "857b4834980e582874d70feef77bb064b60762d1" ) - self.assertEqual( + assert ( sp.determine_git_ref( - GitCfnginPackageSourceDefinitionModel(**{"uri": "git@foo", "commit": "1234"}) - ), - "1234", + GitCfnginPackageSourceDefinitionModel(uri="git@foo", commit="1234") + ) + == "1234" ) - self.assertEqual( + assert ( sp.determine_git_ref( - GitCfnginPackageSourceDefinitionModel(**{"uri": "git@foo", "tag": "v1.0.0"}) - ), - "v1.0.0", + GitCfnginPackageSourceDefinitionModel(uri="git@foo", tag="v1.0.0") + ) + == "v1.0.0" ) @@ -498,26 +493,28 @@ def setUp(self) -> None: """Run before tests.""" self.counter = 0 - def _works_immediately(self, a: Any, b: Any, x: Any = None, y: Any = None) -> List[Any]: + def _works_immediately(self, a: Any, b: Any, x: Any = None, y: Any = None) -> list[Any]: """Works immediately.""" self.counter += 1 return [a, b, x, y] - def _works_second_attempt(self, a: Any, b: Any, x: Any = None, y: Any = None) -> List[Any]: + def _works_second_attempt(self, a: Any, b: Any, x: Any = None, y: Any = None) -> list[Any]: """Works second_attempt.""" self.counter += 1 if self.counter == 2: return [a, b, x, y] raise Exception("Broke.") - def _second_raises_exception2(self, a: Any, b: Any, x: Any = None, y: Any = None) -> List[Any]: + def _second_raises_exception2(self, a: Any, b: Any, x: Any = None, y: Any = None) -> list[Any]: """Second raises exception2.""" self.counter += 1 if self.counter == 2: return [a, b, x, y] raise MockException("Broke.") - def _throws_exception2(self, a: Any, b: Any, x: Any = None, y: Any = None) -> List[Any]: + def _throws_exception2( + self, a: Any, b: Any, x: Any = None, y: Any = None # noqa: ARG002 + ) -> list[Any]: """Throws exception2.""" self.counter += 1 raise MockException("Broke.") diff --git a/tests/unit/config/components/runway/test_base.py b/tests/unit/config/components/runway/test_base.py index 29a93461c..71836e5ff 100644 --- a/tests/unit/config/components/runway/test_base.py +++ b/tests/unit/config/components/runway/test_base.py @@ -4,9 +4,9 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any +from unittest.mock import MagicMock, call import pytest -from mock import MagicMock, call from pydantic import Extra from runway.config.components.runway import RunwayVariablesDefinition @@ -15,7 +15,6 @@ from runway.exceptions import UnresolvedVariable if TYPE_CHECKING: - from pytest import MonkeyPatch from ....factories import MockRunwayContext @@ -95,7 +94,8 @@ def test_default(self) -> None: obj = SampleConfigComponentDefinition(data) assert obj._data == data assert obj.data == data.dict() - assert not obj._vars and isinstance(obj._vars, dict) + assert not obj._vars + assert isinstance(obj._vars, dict) def test_get(self) -> None: """Test get.""" @@ -118,7 +118,7 @@ def test_getattr(self, runway_context: MockRunwayContext) -> None: with pytest.raises(AttributeError): assert not obj.missing - def test_getitem(self, monkeypatch: MonkeyPatch) -> None: + def test_getitem(self, monkeypatch: pytest.MonkeyPatch) -> None: """Test __getitem__.""" mock_getattr = MagicMock(side_effect=["val", AttributeError]) monkeypatch.setattr(SampleConfigComponentDefinition, "__getattr__", mock_getattr) diff --git a/tests/unit/config/components/runway/test_deployment_def.py b/tests/unit/config/components/runway/test_deployment_def.py index 55ffd6fd5..077a36956 100644 --- a/tests/unit/config/components/runway/test_deployment_def.py +++ b/tests/unit/config/components/runway/test_deployment_def.py @@ -1,7 +1,7 @@ """Test runway.config.components.runway._deployment_dev.""" # pyright: basic -from typing import Any, Dict, List +from typing import Any import pytest @@ -71,7 +71,7 @@ class TestRunwayDeploymentDefinition: ), ], ) - def test_menu_entry(self, data: Dict[str, Any], expected: str) -> None: + def test_menu_entry(self, data: dict[str, Any], expected: str) -> None: """Test menu_entry.""" assert RunwayDeploymentDefinition.parse_obj(data).menu_entry == expected @@ -112,19 +112,19 @@ def test_modules_setter_not_list(self) -> None: def test_models_setter_invalid_list_item(self) -> None: """Test modules.setter when list item is now supported.""" + obj = RunwayDeploymentDefinition.parse_obj({"regions": ["us-east-1"]}) with pytest.raises(TypeError): - obj = RunwayDeploymentDefinition.parse_obj({"regions": ["us-east-1"]}) obj.modules = [RunwayModuleDefinitionModel(path="./"), "invalid"] # type: ignore def test_parse_obj(self) -> None: """Test parse_obj.""" - data: Dict[str, Any] = {"name": "test", "modules": [], "regions": ["us-east-1"]} + data: dict[str, Any] = {"name": "test", "modules": [], "regions": ["us-east-1"]} obj = RunwayDeploymentDefinition.parse_obj(data) assert obj._data.dict(exclude_unset=True) == data def test_parse_obj_list(self) -> None: """Test parse_obj list.""" - data: List[Dict[str, Any]] = [{"name": "test", "modules": [], "regions": ["us-east-1"]}] + data: list[dict[str, Any]] = [{"name": "test", "modules": [], "regions": ["us-east-1"]}] result = RunwayDeploymentDefinition.parse_obj(data) assert isinstance(result, list) diff --git a/tests/unit/config/components/runway/test_module_def.py b/tests/unit/config/components/runway/test_module_def.py index 561f5453a..d6a98922f 100644 --- a/tests/unit/config/components/runway/test_module_def.py +++ b/tests/unit/config/components/runway/test_module_def.py @@ -2,7 +2,7 @@ # pyright: basic from pathlib import Path -from typing import Any, Dict +from typing import Any import pytest @@ -46,11 +46,11 @@ def test_child_modules_setter_not_list(self) -> None: def test_child_modules_setter_invalid_list_item(self) -> None: """Test child_modules.setter when list item is now supported.""" + obj = RunwayModuleDefinition.parse_obj({"path": "./"}) with pytest.raises(TypeError): - obj = RunwayModuleDefinition.parse_obj({"path": "./"}) - obj.child_modules = [ # type: ignore + obj.child_modules = [ RunwayModuleDefinitionModel(path="./"), - "invalid", + "invalid", # type: ignore ] @pytest.mark.parametrize( @@ -80,7 +80,7 @@ def test_child_modules_setter_invalid_list_item(self) -> None: ), ], ) - def test_is_parent(self, data: Dict[str, Any], expected: bool) -> None: + def test_is_parent(self, data: dict[str, Any], expected: bool) -> None: """Test is_parent.""" assert RunwayModuleDefinition.parse_obj(data).is_parent is expected @@ -114,7 +114,7 @@ def test_is_parent(self, data: Dict[str, Any], expected: bool) -> None: ), ], ) - def test_menu_entry(self, data: Dict[str, Any], expected: str) -> None: + def test_menu_entry(self, data: dict[str, Any], expected: str) -> None: """Test menu entry.""" assert RunwayModuleDefinition.parse_obj(data).menu_entry == expected diff --git a/tests/unit/config/components/runway/test_variables_def.py b/tests/unit/config/components/runway/test_variables_def.py index 3920a7418..2b800a105 100644 --- a/tests/unit/config/components/runway/test_variables_def.py +++ b/tests/unit/config/components/runway/test_variables_def.py @@ -17,7 +17,7 @@ def test_init_no_file(self, cd_tmp_path: Path) -> None: """Test init with no file.""" assert not RunwayVariablesDefinition.parse_obj({"sys_path": cd_tmp_path}) - @pytest.mark.parametrize("filename", ("runway.variables.yml", "runway.variables.yaml")) + @pytest.mark.parametrize("filename", ["runway.variables.yml", "runway.variables.yaml"]) def test_init_autofind_file(self, cd_tmp_path: Path, filename: str) -> None: """Test init autofind file.""" data = {"key": "val"} diff --git a/tests/unit/config/models/cfngin/test_cfngin.py b/tests/unit/config/models/cfngin/test_cfngin.py index 8441ac0c7..b78e18930 100644 --- a/tests/unit/config/models/cfngin/test_cfngin.py +++ b/tests/unit/config/models/cfngin/test_cfngin.py @@ -112,8 +112,10 @@ def test_resolve_path_fields(self) -> None: cfngin_cache_dir="./cache", # type: ignore sys_path="./something", # type: ignore ) - assert obj.cfngin_cache_dir and obj.cfngin_cache_dir.is_absolute() - assert obj.sys_path and obj.sys_path.is_absolute() + assert obj.cfngin_cache_dir + assert obj.cfngin_cache_dir.is_absolute() + assert obj.sys_path + assert obj.sys_path.is_absolute() def test_required_fields(self) -> None: """Test required fields.""" @@ -138,14 +140,15 @@ def test_validate_unique_stack_names(self) -> None: def test_validate_unique_stack_names_invalid(self) -> None: """Test _validate_unique_stack_names.""" with pytest.raises(ValidationError) as excinfo: - data = { - "namespace": "test", - "stacks": [ - {"name": "stack0", "class_path": "stack0"}, - {"name": "stack0", "class_path": "stack0"}, - ], - } - CfnginConfigDefinitionModel.parse_obj(data) + CfnginConfigDefinitionModel.parse_obj( + { + "namespace": "test", + "stacks": [ + {"name": "stack0", "class_path": "stack0"}, + {"name": "stack0", "class_path": "stack0"}, + ], + } + ) errors = excinfo.value.errors() assert len(errors) == 1 assert errors[0]["loc"] == ("stacks",) diff --git a/tests/unit/config/models/cfngin/test_package_sources.py b/tests/unit/config/models/cfngin/test_package_sources.py index e2dee3452..cb22fbbe0 100644 --- a/tests/unit/config/models/cfngin/test_package_sources.py +++ b/tests/unit/config/models/cfngin/test_package_sources.py @@ -1,7 +1,6 @@ """Test runway.config.models.cfngin._package_sources.""" # pyright: basic -from typing import Dict, List import pytest from pydantic import ValidationError @@ -90,7 +89,7 @@ def test_required_fields(self) -> None: {"field": "tag", "value": "v1.0.0"}, ], ) - def test_validate_one_ref(self, ref: Dict[str, str]) -> None: + def test_validate_one_ref(self, ref: dict[str, str]) -> None: """Test _validate_one_ref.""" data = {"uri": "something", ref["field"]: ref["value"]} assert GitCfnginPackageSourceDefinitionModel.parse_obj(data)[ref["field"]] == ref["value"] @@ -117,7 +116,7 @@ def test_validate_one_ref(self, ref: Dict[str, str]) -> None: ], ], ) - def test_validate_one_ref_invalid(self, refs: List[Dict[str, str]]) -> None: + def test_validate_one_ref_invalid(self, refs: list[dict[str, str]]) -> None: """Test _validate_one_ref invalid values.""" data = {"uri": "something", **{ref["field"]: ref["value"] for ref in refs}} with pytest.raises(ValidationError) as excinfo: diff --git a/tests/unit/config/models/runway/options/test_cdk.py b/tests/unit/config/models/runway/options/test_cdk.py index 13b979d7c..372234f50 100644 --- a/tests/unit/config/models/runway/options/test_cdk.py +++ b/tests/unit/config/models/runway/options/test_cdk.py @@ -10,7 +10,8 @@ class TestRunwayCdkModuleOptionsDataModel: def test_init_default(self) -> None: """Test init default.""" obj = RunwayCdkModuleOptionsDataModel() - assert not obj.build_steps and isinstance(obj.build_steps, list) + assert not obj.build_steps + assert isinstance(obj.build_steps, list) assert not obj.skip_npm_ci def test_init_extra(self) -> None: diff --git a/tests/unit/config/models/runway/options/test_serverless.py b/tests/unit/config/models/runway/options/test_serverless.py index 5d53e02c3..126249897 100644 --- a/tests/unit/config/models/runway/options/test_serverless.py +++ b/tests/unit/config/models/runway/options/test_serverless.py @@ -16,8 +16,10 @@ class TestRunwayServerlessModuleOptionsDataModel: def test_init_default(self) -> None: """Test init default values.""" obj = RunwayServerlessModuleOptionsDataModel() - assert not obj.args and isinstance(obj.args, list) - assert not obj.extend_serverless_yml and isinstance(obj.extend_serverless_yml, dict) + assert not obj.args + assert isinstance(obj.args, list) + assert not obj.extend_serverless_yml + assert isinstance(obj.extend_serverless_yml, dict) assert obj.promotezip == RunwayServerlessPromotezipOptionDataModel() assert obj.skip_npm_ci is False diff --git a/tests/unit/config/models/runway/options/test_terraform.py b/tests/unit/config/models/runway/options/test_terraform.py index 0b77f0547..28e59c25d 100644 --- a/tests/unit/config/models/runway/options/test_terraform.py +++ b/tests/unit/config/models/runway/options/test_terraform.py @@ -17,9 +17,12 @@ class TestRunwayTerraformArgsDataModel: def test_init_default(self) -> None: """Test init default.""" obj = RunwayTerraformArgsDataModel() - assert not obj.apply and isinstance(obj.apply, list) - assert not obj.init and isinstance(obj.init, list) - assert not obj.plan and isinstance(obj.plan, list) + assert not obj.apply + assert isinstance(obj.apply, list) + assert not obj.init + assert isinstance(obj.init, list) + assert not obj.plan + assert isinstance(obj.plan, list) def test_init_extra(self) -> None: """Test init extra.""" @@ -82,8 +85,10 @@ def test_convert_args(self) -> None: """Test _convert_args.""" obj = RunwayTerraformModuleOptionsDataModel.parse_obj({"args": ["test"]}) assert obj.args.apply == ["test"] - assert not obj.args.init and isinstance(obj.args.init, list) - assert not obj.args.plan and isinstance(obj.args.plan, list) + assert not obj.args.init + assert isinstance(obj.args.init, list) + assert not obj.args.plan + assert isinstance(obj.args.plan, list) def test_init_default(self) -> None: """Test init default.""" diff --git a/tests/unit/config/models/runway/test_runway.py b/tests/unit/config/models/runway/test_runway.py index a7c113d47..4d9d6c0f9 100644 --- a/tests/unit/config/models/runway/test_runway.py +++ b/tests/unit/config/models/runway/test_runway.py @@ -2,7 +2,7 @@ # pyright: basic from pathlib import Path -from typing import Any, Dict +from typing import Any import pytest import yaml @@ -214,7 +214,7 @@ def test_field_defaults(self) -> None: ) def test_fields_string_lookup_only(self, field: str) -> None: """Test fields that support strings only for lookups.""" - data: Dict[str, Any] = {} + data: dict[str, Any] = {} if field not in ["parallel_regions", "regions"]: data["regions"] = ["us-east-1"] data[field] = "something" diff --git a/tests/unit/config/models/test_base.py b/tests/unit/config/models/test_base.py index 8a35a00cb..3cf130261 100644 --- a/tests/unit/config/models/test_base.py +++ b/tests/unit/config/models/test_base.py @@ -1,7 +1,7 @@ """Test runway.config.models.base.""" # pyright: basic -from typing import Any, Dict, Optional +from typing import Any, Optional import pytest from pydantic import Extra, ValidationError @@ -33,7 +33,7 @@ class GoodObject(ConfigProperty): name: str bool_field: bool = True - dict_field: Dict[str, Any] = {} + dict_field: dict[str, Any] = {} optional_str_field: Optional[str] = None class Config(ConfigProperty.Config): @@ -81,9 +81,8 @@ def test_validate_all(self) -> None: def test_validate_assignment(self) -> None: """Test Config.validate_assignment.""" with pytest.raises(ValidationError) as excinfo: - obj = GoodObject(name="test") - obj.name = ("invalid",) # type: ignore + GoodObject(name="test").name = ("invalid",) # type: ignore errors = excinfo.value.errors() assert len(errors) == 1 assert errors[0]["loc"] == ("name",) - assert errors[0]["msg"] == "str type expected" + assert errors[0]["msg"] == "Input should be a valid string" diff --git a/tests/unit/config/models/test_utils.py b/tests/unit/config/models/test_utils.py index ec2ea856c..33d5b0b0f 100644 --- a/tests/unit/config/models/test_utils.py +++ b/tests/unit/config/models/test_utils.py @@ -66,6 +66,6 @@ def test_validate_string_is_lookup(provided: Any) -> None: ) def test_validate_string_is_lookup_raises(provided: str) -> None: """Test validate_string_is_lookup.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError) as excinfo: # noqa: PT011 validate_string_is_lookup(provided) assert excinfo.value == RUNWAY_LOOKUP_STRING_ERROR diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index d6cdbb354..344b306fa 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -3,12 +3,11 @@ # pyright: basic from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING +from unittest.mock import MagicMock, patch import pytest import yaml -from mock import MagicMock, patch from pydantic import BaseModel from runway.cfngin.exceptions import MissingEnvironment @@ -20,7 +19,8 @@ from runway.exceptions import ConfigNotFound if TYPE_CHECKING: - from pytest import MonkeyPatch + from pathlib import Path + MODULE = "runway.config" @@ -34,7 +34,7 @@ class ExampleModel(BaseModel): class TestBaseConfig: """Test runway.config.BaseConfig.""" - def test_dump(self, monkeypatch: MonkeyPatch) -> None: + def test_dump(self, monkeypatch: pytest.MonkeyPatch) -> None: """Test dump.""" mock_dict = MagicMock(return_value={"name": "test"}) monkeypatch.setattr(ExampleModel, "dict", mock_dict) @@ -121,7 +121,9 @@ def test_parse_file_file_path_missing(self, tmp_path: Path) -> None: CfnginConfig.parse_file(file_path=config_yml) assert excinfo.value.path == config_yml - def test_parse_file_find_config_file(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> None: + def test_parse_file_find_config_file( + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path + ) -> None: """Test parse_file with path.""" file_path = tmp_path / "test.yml" file_path.write_text("name: test\n") @@ -136,30 +138,28 @@ def test_parse_file_find_config_file(self, monkeypatch: MonkeyPatch, tmp_path: P ) def test_parse_file_find_config_file_value_error( - self, monkeypatch: MonkeyPatch, tmp_path: Path + self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: """Test parse_file with path raise ValueError.""" mock_find_config_file = MagicMock(return_value=[tmp_path / "01.yml", tmp_path / "02.yml"]) monkeypatch.setattr(CfnginConfig, "find_config_file", mock_find_config_file) - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="more than one"): CfnginConfig.parse_file(path=tmp_path) - assert str(excinfo.value).startswith("more than one") - def test_parse_file_value_error(self): + def test_parse_file_value_error(self) -> None: """Test parse_file raise ValueError.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="must provide path or file_path"): CfnginConfig.parse_file() - assert str(excinfo.value) == "must provide path or file_path" - def test_parse_obj(self, monkeypatch: MonkeyPatch) -> None: - """Test parse_obj.""" + def test_model_validate(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Test model_validate.""" monkeypatch.setattr( - MODULE + ".CfnginConfigDefinitionModel.parse_obj", - lambda x: CfnginConfigDefinitionModel(namespace="success"), # type: ignore + MODULE + ".CfnginConfigDefinitionModel.model_validate", + lambda x: CfnginConfigDefinitionModel(namespace="success"), # type: ignore # noqa: ARG005 ) assert CfnginConfig.parse_obj({}).namespace == "success" - def test_parse_raw(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> None: + def test_parse_raw(self, monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: """Test parse_raw.""" mock_resolve_raw_data = MagicMock() mock_parse_obj = MagicMock() @@ -195,7 +195,7 @@ def test_parse_raw(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> None: @patch(MODULE + ".SourceProcessor") def test_process_package_sources( - self, mock_source_processor: MagicMock, monkeypatch: MonkeyPatch, tmp_path: Path + self, mock_source_processor: MagicMock, monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: """Test process_package_sources.""" mock_resolve_raw_data = MagicMock(return_value="rendered") @@ -261,13 +261,13 @@ def test_resolve_raw_data_ignore_lookup(self) -> None: class TestRunwayConfig: """Test runway.config.RunwayConfig.""" - def test_find_config_file_yaml(self, tmp_path: Path): + def test_find_config_file_yaml(self, tmp_path: Path) -> None: """Test file_config_file runway.yaml.""" runway_yaml = tmp_path / "runway.yaml" runway_yaml.touch() assert RunwayConfig.find_config_file(tmp_path) == runway_yaml - def test_find_config_file_yml(self, tmp_path: Path): + def test_find_config_file_yml(self, tmp_path: Path) -> None: """Test file_config_file runway.yml.""" runway_yml = tmp_path / "runway.yml" runway_yml.touch() @@ -291,9 +291,8 @@ def test_find_config_file_value_error(self, tmp_path: Path) -> None: """Test file_config_file raise ValueError.""" (tmp_path / "runway.yaml").touch() (tmp_path / "runway.yml").touch() - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="more than one"): RunwayConfig.find_config_file(tmp_path) - assert str(excinfo.value).startswith("more than one") def test_parse_obj(self) -> None: """Test parse_obj.""" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 2eb8c2bce..6c0e32b9f 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -5,17 +5,17 @@ import logging import os from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, cast +from typing import TYPE_CHECKING, Any, Optional, cast +from unittest.mock import MagicMock, Mock import pytest import yaml -from mock import MagicMock from runway.config import RunwayConfig from runway.core.components import DeployEnvironment from .factories import ( - MockCFNginContext, + MockCfnginContext, MockRunwayConfig, MockRunwayContext, YamlLoader, @@ -24,17 +24,18 @@ from .mock_docker.fake_api_client import make_fake_client if TYPE_CHECKING: + from collections.abc import Iterator + from _pytest.config import Config from _pytest.python import Module from docker import DockerClient - from pytest import FixtureRequest, MonkeyPatch from pytest_mock import MockerFixture -LOG = logging.getLogger(__name__) -TEST_ROOT = Path(os.path.dirname(os.path.realpath(__file__))) +LOGGER = logging.getLogger(__name__) +TEST_ROOT = Path(__file__).parent -def pytest_ignore_collect(path: Any, config: Config) -> bool: +def pytest_ignore_collect(path: Any, config: Config) -> bool: # noqa: ARG001 """Determine if this directory should have its tests collected.""" if config.option.functional: return True @@ -54,16 +55,16 @@ def aws_credentials() -> Iterator[None]: "AWS_SECRET_ACCESS_KEY": "testing", "AWS_DEFAULT_REGION": "us-east-1", } - saved_env: Dict[str, Optional[str]] = {} + saved_env: dict[str, Optional[str]] = {} for key, value in overrides.items(): - LOG.info("Overriding env var: %s=%s", key, value) + LOGGER.info("Overriding env var: %s=%s", key, value) saved_env[key] = os.environ.get(key, None) os.environ[key] = value yield for key, value in saved_env.items(): - LOG.info("Restoring saved env var: %s=%s", key, value) + LOGGER.info("Restoring saved env var: %s=%s", key, value) if value is None: os.environ.pop(key, None) # handle key missing else: @@ -73,9 +74,9 @@ def aws_credentials() -> Iterator[None]: @pytest.fixture(scope="package") -def fixture_dir() -> str: +def fixture_dir() -> Path: """Path to the fixture directory.""" - return os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures") + return Path(__file__).parent / "fixtures" @pytest.fixture(scope="module") @@ -88,19 +89,19 @@ def fx_config() -> YamlLoader: ) -@pytest.fixture(scope="function") +@pytest.fixture() def fx_deployments() -> YamlLoaderDeployment: """Return YAML loader for deployment fixtures.""" return YamlLoaderDeployment(TEST_ROOT / "fixtures" / "deployments") -@pytest.fixture(scope="function") +@pytest.fixture() def mock_docker_client() -> DockerClient: """Create a docker client with mock API backend.""" return make_fake_client() -@pytest.fixture(scope="function") +@pytest.fixture() def tempfile_temporary_directory(mocker: MockerFixture, tmp_path: Path) -> MagicMock: """Mock tempfile.TemporaryDirectory.""" return mocker.patch( @@ -110,63 +111,61 @@ def tempfile_temporary_directory(mocker: MockerFixture, tmp_path: Path) -> Magic @pytest.fixture(scope="module") -def yaml_fixtures(request: FixtureRequest, fixture_dir: str) -> Dict[str, Any]: +def yaml_fixtures(request: pytest.FixtureRequest, fixture_dir: Path) -> dict[str, Any]: """Load test fixture yaml files. Uses a list of file paths within the fixture directory loaded from the `YAML_FIXTURES` variable of the module. """ - file_paths: List[str] = getattr( + file_paths: list[str] = getattr( cast("Module", request.module), "YAML_FIXTURES", [] # type: ignore ) - result: Dict[str, Any] = {} + result: dict[str, Any] = {} for file_path in file_paths: - with open(os.path.join(fixture_dir, file_path), encoding="utf-8") as _file: - data = _file.read() - result[file_path] = yaml.safe_load(data) + result[file_path] = yaml.safe_load((fixture_dir / file_path).read_bytes()) return result -@pytest.fixture(scope="function") +@pytest.fixture() def deploy_environment(tmp_path: Path) -> DeployEnvironment: """Create a deploy environment that can be used for testing.""" return DeployEnvironment(explicit_name="test", root_dir=tmp_path) -@pytest.fixture(scope="function") -def cfngin_context(runway_context: MockRunwayContext) -> MockCFNginContext: +@pytest.fixture() +def cfngin_context(runway_context: MockRunwayContext) -> MockCfnginContext: """Create a mock CFNgin context object.""" - return MockCFNginContext(deploy_environment=runway_context.env, parameters={}) + return MockCfnginContext(deploy_environment=runway_context.env, parameters={}) -@pytest.fixture -def patch_time(monkeypatch: MonkeyPatch) -> None: - """Patch built-in time object.""" - monkeypatch.setattr("time.sleep", lambda s: None) # type: ignore +@pytest.fixture() +def mock_sleep(mocker: MockerFixture) -> Mock: + """Patch built-in ``time.sleep``.""" + return mocker.patch("time.sleep", return_value=None) -@pytest.fixture +@pytest.fixture() def platform_darwin(mocker: MockerFixture) -> None: """Patch platform.system to always return "Darwin".""" mocker.patch("platform.system", return_value="Darwin") -@pytest.fixture +@pytest.fixture() def platform_linux(mocker: MockerFixture) -> None: """Patch platform.system to always return "Linux".""" mocker.patch("platform.system", return_value="Linux") -@pytest.fixture +@pytest.fixture() def platform_windows(mocker: MockerFixture) -> None: """Patch platform.system to always return "Windows".""" mocker.patch("platform.system", return_value="Windows") -@pytest.fixture(scope="function") +@pytest.fixture() def patch_runway_config( - request: FixtureRequest, monkeypatch: MonkeyPatch, runway_config: MockRunwayConfig + request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch, runway_config: MockRunwayConfig ) -> MockRunwayConfig: """Patch Runway config and return a mock config object.""" patch_path = getattr(cast("Module", request.module), "PATCH_RUNWAY_CONFIG", None) @@ -175,14 +174,14 @@ def patch_runway_config( return runway_config -@pytest.fixture(scope="function") +@pytest.fixture() def runway_config() -> MockRunwayConfig: """Create a mock runway config object.""" return MockRunwayConfig() -@pytest.fixture(scope="function") -def runway_context(request: FixtureRequest, tmp_path: Path) -> MockRunwayContext: +@pytest.fixture() +def runway_context(request: pytest.FixtureRequest, tmp_path: Path) -> MockRunwayContext: """Create a mock Runway context object.""" env_vars = { "AWS_REGION": getattr(cast("Module", request.module), "AWS_REGION", "us-east-1"), diff --git a/tests/unit/context/test_base.py b/tests/unit/context/test_base.py index 26910a6fc..1123e937b 100644 --- a/tests/unit/context/test_base.py +++ b/tests/unit/context/test_base.py @@ -4,10 +4,10 @@ from __future__ import annotations from typing import TYPE_CHECKING, cast +from unittest.mock import MagicMock import boto3 import pytest -from mock import MagicMock from runway.context._base import BaseContext from runway.context.sys_info import SystemInfo @@ -30,7 +30,7 @@ } -@pytest.fixture(scope="function") +@pytest.fixture() def mock_boto3_session(mocker: MockerFixture) -> MagicMock: """Mock boto3.Session.""" mock_session = MagicMock(autospec=boto3.Session) @@ -38,7 +38,7 @@ def mock_boto3_session(mocker: MockerFixture) -> MagicMock: return mock_session -@pytest.fixture(scope="function") +@pytest.fixture() def mock_sso_botocore_session(mocker: MockerFixture) -> MagicMock: """Mock runway.aws_sso_botocore.session.Session.""" return mocker.patch(f"{MODULE}.Session") diff --git a/tests/unit/context/test_cfngin.py b/tests/unit/context/test_cfngin.py index 6b64284dd..11c16884f 100644 --- a/tests/unit/context/test_cfngin.py +++ b/tests/unit/context/test_cfngin.py @@ -6,12 +6,12 @@ import io import json from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union, cast +from typing import TYPE_CHECKING, Any, Optional, Union, cast +from unittest.mock import MagicMock import pytest from botocore.response import StreamingBody from botocore.stub import Stubber -from mock import MagicMock from runway.cfngin.exceptions import ( PersistentGraphCannotLock, @@ -41,12 +41,12 @@ } -def gen_tagset(tags: Dict[str, str]) -> TagSetTypeDef: +def gen_tagset(tags: dict[str, str]) -> TagSetTypeDef: """Create TagSet value from a dict.""" return [{"Key": key, "Value": value} for key, value in tags.items()] -def gen_s3_object_content(content: Union[Dict[str, Any], str]) -> StreamingBody: +def gen_s3_object_content(content: Union[dict[str, Any], str]) -> StreamingBody: """Convert a string or dict to S3 object body. Args: @@ -97,7 +97,7 @@ class TestCFNginContext: {"name": "stack2", "template_path": ".", "requires": ["stack1"]}, ], } - persist_graph_raw: Dict[str, Set[str]] = {"stack1": set(), "stack2": {"stack1"}} + persist_graph_raw: dict[str, set[str]] = {"stack1": set(), "stack2": {"stack1"}} persist_graph_config = CfnginConfig.parse_obj(persist_graph_raw_config) @pytest.mark.parametrize( @@ -180,7 +180,8 @@ def test_init(self, tmp_path: Path) -> None: assert obj.config_path == tmp_path assert obj.env == self.env assert obj.force_stacks == ["stack-01"] - assert not obj.hook_data and isinstance(obj.hook_data, dict) + assert not obj.hook_data + assert isinstance(obj.hook_data, dict) assert obj.logger assert obj.parameters == {"key": "val"} assert obj.stack_names == ["stack-02"] @@ -194,10 +195,13 @@ def test_init_defaults(self) -> None: assert obj.config_path == Path.cwd() assert isinstance(obj.env, DeployEnvironment) assert obj.force_stacks == [] - assert not obj.hook_data and isinstance(obj.hook_data, dict) + assert not obj.hook_data + assert isinstance(obj.hook_data, dict) assert obj.logger - assert not obj.parameters and isinstance(obj.parameters, dict) - assert not obj.stack_names and isinstance(obj.stack_names, list) + assert not obj.parameters + assert isinstance(obj.parameters, dict) + assert not obj.stack_names + assert isinstance(obj.stack_names, list) def test_lock_persistent_graph_locked(self, mocker: MockerFixture) -> None: """Test lock_persistent_graph no graph.""" @@ -301,7 +305,7 @@ def test_persistent_graph_no_such_key(self, mocker: MockerFixture) -> None: "put_object", {}, { - "Body": "{}".encode(), + "Body": b"{}", "ServerSideEncryption": "AES256", "ACL": "bucket-owner-full-control", "ContentType": "application/json", @@ -360,7 +364,7 @@ def test_persistent_graph_location_add_json(self) -> None: {"cfngin_bucket": "", "persistent_graph_key": "something"}, ], ) - def test_persistent_graph_location_empty(self, config_ext: Dict[str, str]) -> None: + def test_persistent_graph_location_empty(self, config_ext: dict[str, str]) -> None: """Test persistent_graph_location.""" config = CfnginConfig.parse_obj({"namespace": "test", **config_ext}) assert not CfnginContext(config=config).persistent_graph_location @@ -621,7 +625,7 @@ def test_template_indent(self) -> None: ({"namespace": "test", "cfngin_bucket": "something"}, True), ], ) - def test_upload_to_s3(self, config: Dict[str, Any], expected: bool) -> None: + def test_upload_to_s3(self, config: dict[str, Any], expected: bool) -> None: """Test upload_to_s3.""" assert CfnginContext(config=CfnginConfig.parse_obj(config)).upload_to_s3 is expected @@ -686,7 +690,7 @@ def test_unlock_persistent_graph_no_such_key(self, mocker: MockerFixture) -> Non stubber = Stubber(obj.s3_client) stubber.add_response( "get_object", - {"Body": "{}".encode()}, + {"Body": b"{}"}, { "ResponseContentType": "application/json", **obj.persistent_graph_location, @@ -696,9 +700,9 @@ def test_unlock_persistent_graph_no_such_key(self, mocker: MockerFixture) -> Non with stubber: assert obj.unlock_persistent_graph("123") - @pytest.mark.parametrize("graph_dict", cast(List[Dict[str, List[str]]], [{"stack0": []}, {}])) + @pytest.mark.parametrize("graph_dict", cast(list[dict[str, list[str]]], [{"stack0": []}, {}])) def test_unlock_persistent_graph( - self, graph_dict: Dict[str, List[str]], mocker: MockerFixture + self, graph_dict: dict[str, list[str]], mocker: MockerFixture ) -> None: """Test unlock_persistent_graph.""" mocker.patch.object( @@ -714,7 +718,7 @@ def test_unlock_persistent_graph( if not graph_dict: stubber.add_response( "get_object", - {"Body": "{}".encode()}, + {"Body": b"{}"}, { "ResponseContentType": "application/json", **obj.persistent_graph_location, diff --git a/tests/unit/context/test_runway.py b/tests/unit/context/test_runway.py index cbef68e81..4a37815ba 100644 --- a/tests/unit/context/test_runway.py +++ b/tests/unit/context/test_runway.py @@ -4,9 +4,9 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any +from unittest.mock import MagicMock import pytest -from mock import MagicMock from runway.context._runway import RunwayContext from runway.context.sys_info import OsInfo diff --git a/tests/unit/context/test_sys_info.py b/tests/unit/context/test_sys_info.py index 317942863..e6d03143f 100644 --- a/tests/unit/context/test_sys_info.py +++ b/tests/unit/context/test_sys_info.py @@ -14,43 +14,43 @@ MODULE = "runway.context.sys_info" -@pytest.fixture(scope="function") -def clear_OsInfo() -> None: +@pytest.fixture() +def clear_os_info() -> None: """Clear OsInfo singleton.""" OsInfo.clear_singleton() -@pytest.fixture(scope="function") -def clear_SystemInfo() -> None: +@pytest.fixture() +def clear_system_info() -> None: """Clear OsInfo singleton.""" SystemInfo.clear_singleton() -@pytest.mark.usefixtures("clear_OsInfo") +@pytest.mark.usefixtures("clear_os_info") class TestOsInfo: """Test OsInfo.""" - def test_is_darwin_false(self, platform_linux: None) -> None: + def test_is_darwin_false(self, platform_linux: None) -> None: # noqa: ARG002 """Test is_darwin False.""" assert not OsInfo().is_darwin - def test_is_darwin(self, platform_darwin: None) -> None: + def test_is_darwin(self, platform_darwin: None) -> None: # noqa: ARG002 """Test is_darwin.""" assert OsInfo().is_darwin - def test_is_linux_false(self, platform_darwin: None) -> None: + def test_is_linux_false(self, platform_darwin: None) -> None: # noqa: ARG002 """Test is_linux False.""" assert not OsInfo().is_linux - def test_is_linux(self, platform_linux: None) -> None: + def test_is_linux(self, platform_linux: None) -> None: # noqa: ARG002 """Test is_linux.""" assert OsInfo().is_linux - def test_is_macos_false(self, platform_linux: None) -> None: + def test_is_macos_false(self, platform_linux: None) -> None: # noqa: ARG002 """Test is_macos False.""" assert not OsInfo().is_macos - def test_is_macos(self, platform_darwin: None) -> None: + def test_is_macos(self, platform_darwin: None) -> None: # noqa: ARG002 """Test is_macos.""" assert OsInfo().is_macos @@ -66,23 +66,23 @@ def test_is_posix(self, mocker: MockerFixture) -> None: mock_os.name = "posix" assert OsInfo().is_posix - def test_is_windows_false(self, platform_linux: None) -> None: + def test_is_windows_false(self, platform_linux: None) -> None: # noqa: ARG002 """Test is_windows False.""" assert not OsInfo().is_windows - def test_is_windows(self, platform_windows: None) -> None: + def test_is_windows(self, platform_windows: None) -> None: # noqa: ARG002 """Test is_windows.""" assert OsInfo().is_windows - def test_name_darwin(self, platform_darwin: None) -> None: + def test_name_darwin(self, platform_darwin: None) -> None: # noqa: ARG002 """Test name darwin.""" assert OsInfo().name == "darwin" - def test_name_linux(self, platform_linux: None) -> None: + def test_name_linux(self, platform_linux: None) -> None: # noqa: ARG002 """Test name linux.""" assert OsInfo().name == "linux" - def test_name_windows(self, platform_windows: None) -> None: + def test_name_windows(self, platform_windows: None) -> None: # noqa: ARG002 """Test name windows.""" assert OsInfo().name == "windows" @@ -91,7 +91,7 @@ def test_singleton(self) -> None: assert id(OsInfo()) == id(OsInfo()) -@pytest.mark.usefixtures("clear_SystemInfo") +@pytest.mark.usefixtures("clear_system_info") class TestSystemInfo: """Test SystemInfo.""" diff --git a/tests/unit/core/components/test_deploy_environment.py b/tests/unit/core/components/test_deploy_environment.py index aa12516c9..8c26e13a0 100644 --- a/tests/unit/core/components/test_deploy_environment.py +++ b/tests/unit/core/components/test_deploy_environment.py @@ -1,20 +1,20 @@ -"""Test runway.core.components.deploy_environment.""" +"""Test runway.core.components._deploy_environment.""" +# ruff: noqa: SLF001 from __future__ import annotations import logging import os from pathlib import Path -from typing import TYPE_CHECKING, Dict, List +from typing import TYPE_CHECKING +from unittest.mock import MagicMock import pytest from git.exc import InvalidGitRepositoryError -from mock import MagicMock -from runway.core.components import DeployEnvironment +from runway.core.components._deploy_environment import DeployEnvironment if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture MODULE = "runway.core.components._deploy_environment" @@ -29,7 +29,7 @@ class TestDeployEnvironment: """Test runway.core.components.DeployEnvironment.""" - def test_init(self, cd_tmp_path: Path) -> None: + def test___init__(self, cd_tmp_path: Path) -> None: """Test attributes set by init.""" new_dir = cd_tmp_path / "new_dir" obj = DeployEnvironment( @@ -43,7 +43,7 @@ def test_init(self, cd_tmp_path: Path) -> None: assert obj.root_dir == new_dir assert obj.vars == {"key": "val"} - def test_init_defaults(self, cd_tmp_path: Path) -> None: + def test___init___defaults(self, cd_tmp_path: Path) -> None: """Test attributes set by init default values.""" obj = DeployEnvironment() @@ -52,6 +52,11 @@ def test_init_defaults(self, cd_tmp_path: Path) -> None: assert obj.root_dir == cd_tmp_path assert obj.vars == os.environ + def test___init___empty_environ(self) -> None: + """Test attributes set by init.""" + obj = DeployEnvironment(environ={}) + assert obj.vars == {} + def test_boto3_credentials(self) -> None: """Test boto3_credentials.""" obj = DeployEnvironment(environ=TEST_CREDENTIALS) @@ -97,7 +102,7 @@ def test_branch_name(self, mocker: MockerFixture) -> None: obj = DeployEnvironment() assert obj.branch_name == branch_name - mock_git.Repo.assert_called_once_with(os.getcwd(), search_parent_directories=True) + mock_git.Repo.assert_called_once_with(str(Path.cwd()), search_parent_directories=True) def test_branch_name_invalid_repo(self, mocker: MockerFixture) -> None: """Test branch_name handle InvalidGitRepositoryError.""" @@ -106,9 +111,11 @@ def test_branch_name_invalid_repo(self, mocker: MockerFixture) -> None: obj = DeployEnvironment() assert obj.branch_name is None - mock_git.Repo.assert_called_once_with(os.getcwd(), search_parent_directories=True) + mock_git.Repo.assert_called_once_with(str(Path.cwd()), search_parent_directories=True) - def test_branch_name_no_git(self, mocker: MockerFixture, caplog: LogCaptureFixture) -> None: + def test_branch_name_no_git( + self, mocker: MockerFixture, caplog: pytest.LogCaptureFixture + ) -> None: """Test branch_name git ImportError.""" caplog.set_level(logging.DEBUG, logger="runway.core.components") mocker.patch(f"{MODULE}.git", object) @@ -120,15 +127,16 @@ def test_branch_name_no_git(self, mocker: MockerFixture, caplog: LogCaptureFixtu "to read the branch name" ) in caplog.messages - def test_branch_name_type_error(self, mocker: MockerFixture, caplog: LogCaptureFixture) -> None: + def test_branch_name_type_error( + self, mocker: MockerFixture, caplog: pytest.LogCaptureFixture + ) -> None: """Test branch_name handle TypeError.""" caplog.set_level(logging.WARNING, logger="runway") mock_git = mocker.patch(f"{MODULE}.git") mock_git.Repo.side_effect = TypeError with pytest.raises(SystemExit) as excinfo: - obj = DeployEnvironment() - assert not obj.branch_name + assert not DeployEnvironment().branch_name assert excinfo.value.code == 1 assert "Unable to retrieve the current git branch name!" in caplog.messages @@ -245,7 +253,7 @@ def test_name(self) -> None: ], ) def test_name_from_branch( - self, branch: str, environ: Dict[str, str], expected: str, mocker: MockerFixture + self, branch: str, environ: dict[str, str], expected: str, mocker: MockerFixture ) -> None: """Test name from branch.""" mock_prompt = MagicMock(return_value="user_value") @@ -307,7 +315,7 @@ def test_copy(self, mocker: MockerFixture, tmp_path: Path) -> None: ( "explicit", [ - 'deploy environment "test" is explicitly defined ' "in the environment", + 'deploy environment "test" is explicitly defined in the environment', "if not correct, update the value or unset it to " "fall back to the name of the current git branch " "or parent directory", @@ -316,7 +324,7 @@ def test_copy(self, mocker: MockerFixture, tmp_path: Path) -> None: ( "branch", [ - 'deploy environment "test" was determined from the ' "current git branch", + 'deploy environment "test" was determined from the current git branch', "if not correct, update the branch name or set an " "override via the DEPLOY_ENVIRONMENT environment " "variable", @@ -325,7 +333,7 @@ def test_copy(self, mocker: MockerFixture, tmp_path: Path) -> None: ( "directory", [ - 'deploy environment "test" was determined from ' "the current directory", + 'deploy environment "test" was determined from the current directory', "if not correct, update the directory name or " "set an override via the DEPLOY_ENVIRONMENT " "environment variable", @@ -336,8 +344,8 @@ def test_copy(self, mocker: MockerFixture, tmp_path: Path) -> None: def test_log_name( self, derived_from: str, - expected: List[str], - caplog: LogCaptureFixture, + expected: list[str], + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, ) -> None: """Test log_name.""" diff --git a/tests/unit/core/components/test_deployment.py b/tests/unit/core/components/test_deployment.py index eee9926d6..d071cdfed 100644 --- a/tests/unit/core/components/test_deployment.py +++ b/tests/unit/core/components/test_deployment.py @@ -1,24 +1,24 @@ -"""Test runway.core.components.deployment.""" +"""Test runway.core.components._deployment.""" +# ruff: noqa: SLF001 from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, Dict, List, cast +from typing import TYPE_CHECKING, Any, cast +from unittest.mock import ANY, MagicMock, Mock, PropertyMock, call import pytest -from mock import ANY, MagicMock, Mock, PropertyMock, call from runway.config.components.runway import ( RunwayDeploymentDefinition, RunwayVariablesDefinition, ) from runway.config.models.runway import RunwayFutureDefinitionModel -from runway.core.components import Deployment +from runway.core.components._deployment import Deployment from runway.exceptions import UnresolvedVariable from runway.variables import Variable if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from runway.core.type_defs import RunwayActionTypeDef @@ -100,7 +100,7 @@ def test___init___args( def test_assume_role_config( self, config: str, - expected: Dict[str, Any], + expected: dict[str, Any], fx_deployments: YamlLoaderDeployment, runway_context: MockRunwayContext, ) -> None: @@ -129,13 +129,12 @@ def test_env_vars_config_raise_unresolved_variable( ) with pytest.raises(UnresolvedVariable): - obj = Deployment( + assert not Deployment( context=runway_context, definition=RunwayDeploymentDefinition.parse_obj( - cast(Dict[str, Any], fx_deployments.get("min_required")) + cast(dict[str, Any], fx_deployments.get("min_required")) ), - ) - assert not obj.env_vars_config + ).env_vars_config def test_env_vars_config_unresolved( self, @@ -162,7 +161,7 @@ def test_env_vars_config_unresolved( ) variable = Mock(value=expected) - raw_deployment: Dict[str, Any] = cast(Dict[str, Any], fx_deployments.get("min_required")) + raw_deployment: dict[str, Any] = cast(dict[str, Any], fx_deployments.get("min_required")) deployment = RunwayDeploymentDefinition.parse_obj(raw_deployment) obj = Deployment(context=runway_context, definition=deployment) obj.definition._vars.update({"env_vars": variable}) @@ -183,7 +182,7 @@ def test_env_vars_config_unresolved( def test_regions( self, config: str, - expected: List[str], + expected: list[str], fx_deployments: YamlLoaderDeployment, runway_context: MockRunwayContext, ) -> None: @@ -228,7 +227,7 @@ def test_deploy( def test_deploy_async( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -248,7 +247,7 @@ def test_deploy_async( ) assert not obj.deploy() assert ( - "unnamed_deployment:processing regions in parallel... (output will be interwoven)" + "unnamed_deployment: processing regions in parallel... (output will be interwoven)" in caplog.messages ) mock_mp_context.assert_called_once_with("fork") @@ -263,7 +262,7 @@ def test_deploy_async( def test_deploy_sync( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -279,7 +278,7 @@ def test_deploy_sync( definition=fx_deployments.load("simple_parallel_regions"), ) assert not obj.deploy() - assert "unnamed_deployment:processing regions sequentially..." in caplog.messages + assert "unnamed_deployment: processing regions sequentially..." in caplog.messages mock_run.assert_has_calls([call("deploy", "us-east-1"), call("deploy", "us-west-2")]) @pytest.mark.parametrize("async_used", [(True), (False)]) @@ -313,7 +312,7 @@ def test_destroy( def test_init( self, async_used: bool, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -342,7 +341,7 @@ def test_init( def test_plan( self, async_used: bool, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -362,7 +361,7 @@ def test_plan( if async_used: assert ( - "unnamed_deployment:processing of regions will be done in " + "unnamed_deployment: processing of regions will be done in " "parallel during deploy/destroy" in caplog.messages ) mock_async.assert_not_called() @@ -407,11 +406,9 @@ def test_run_async( ) -> None: """Test run async.""" mocker.patch(f"{MODULE}.aws") - # ensure that mock.MagicMock is used for backported features mock_module = mocker.patch(f"{MODULE}.Module", MagicMock()) definition = fx_deployments.load("simple_parallel_regions") runway_context._use_concurrent = True - # ensure that mock.MagicMock is used for backported features mock_resolve = mocker.patch.object(definition, "resolve", MagicMock()) mocker.patch.object(Deployment, "validate_account_credentials") obj = Deployment(context=runway_context, definition=definition) @@ -420,15 +417,15 @@ def test_run_async( new_ctx = mock_resolve.call_args.args[0] assert new_ctx != runway_context - assert new_ctx.command == "destroy" and runway_context.command != "destroy" - assert ( - new_ctx.env.aws_region == "us-west-2" and runway_context.env.aws_region != "us-west-2" - ) + assert new_ctx.command == "destroy" + assert runway_context.command != "destroy" + assert new_ctx.env.aws_region == "us-west-2" + assert runway_context.env.aws_region != "us-west-2" assert mock_module.run_list.call_args.kwargs["context"] == new_ctx def test_validate_account_credentials( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, fx_deployments: YamlLoaderDeployment, runway_context: MockRunwayContext, diff --git a/tests/unit/core/components/test_module.py b/tests/unit/core/components/test_module.py index 387e47170..4e71370ac 100644 --- a/tests/unit/core/components/test_module.py +++ b/tests/unit/core/components/test_module.py @@ -3,11 +3,11 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, List, Optional, cast +from typing import TYPE_CHECKING, Any, Optional, cast +from unittest.mock import MagicMock, call import pytest import yaml -from mock import MagicMock, call from runway.core.components import Deployment, Module from runway.core.components._module import validate_environment @@ -15,7 +15,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from ...factories import MockRunwayContext, YamlLoaderDeployment @@ -23,7 +22,7 @@ MODULE = "runway.core.components._module" -@pytest.fixture(scope="function") +@pytest.fixture() def empty_opts_from_file(mocker: MockerFixture) -> None: """Empty Module.opts_from_file.""" mocker.patch.object(Module, "opts_from_file", {}) @@ -76,7 +75,7 @@ def test_child_modules( def test_environment_matches_defined( self, - cd_tmp_path: Path, + cd_tmp_path: Path, # noqa: ARG002 fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -98,7 +97,7 @@ def test_environment_matches_defined( def test_environments_deployment( self, cd_tmp_path: Path, - empty_opts_from_file: None, + empty_opts_from_file: None, # noqa: ARG002 fx_deployments: YamlLoaderDeployment, runway_context: MockRunwayContext, ) -> None: @@ -178,7 +177,7 @@ def test_path( def test_payload_with_deployment( self, cd_tmp_path: Path, - empty_opts_from_file: None, + empty_opts_from_file: None, # noqa: ARG002 fx_deployments: YamlLoaderDeployment, runway_context: MockRunwayContext, ) -> None: @@ -328,7 +327,7 @@ def test_deploy( def test_deploy_async( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -366,7 +365,7 @@ def test_deploy_async( def test_deploy_sync( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -476,7 +475,7 @@ def test_init_no_children( def test_plan( self, async_used: bool, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -504,7 +503,7 @@ def test_plan( def test_plan_no_children( self, async_used: bool, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -526,7 +525,7 @@ def test_plan_no_children( def test_run( self, - empty_opts_from_file: None, + empty_opts_from_file: None, # noqa: ARG002 fx_deployments: YamlLoaderDeployment, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -632,9 +631,9 @@ def test_run_list( ], ) def test_validate_environment( - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, env_def: Any, - expected_logs: List[str], + expected_logs: list[str], expected: Optional[bool], mocker: MockerFixture, runway_context: MockRunwayContext, diff --git a/tests/unit/core/components/test_module_path.py b/tests/unit/core/components/test_module_path.py index 7100e8982..c044fd5b0 100644 --- a/tests/unit/core/components/test_module_path.py +++ b/tests/unit/core/components/test_module_path.py @@ -5,10 +5,10 @@ from copy import deepcopy from pathlib import Path -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Optional, Union +from unittest.mock import MagicMock import pytest -from mock import MagicMock from typing_extensions import TypedDict from runway.config.components.runway import RunwayModuleDefinition @@ -23,20 +23,19 @@ MODULE = "runway.core.components._module_path" -TypeDefTestDefinitionExpected = TypedDict( - "TypeDefTestDefinitionExpected", - arguments=Dict[str, str], - location=str, - source=str, - uri=str, -) -TypeDefTestDefinition = TypedDict( - "TypeDefTestDefinition", - definition=Optional[Union[Path, str]], - expected=TypeDefTestDefinitionExpected, -) +class TypeDefTestDefinitionExpected(TypedDict): # noqa: D101 + arguments: dict[str, str] + location: str + source: str + uri: str -TESTS: List[TypeDefTestDefinition] = [ + +class TypeDefTestDefinition(TypedDict): # noqa: D101 + definition: Optional[Union[Path, str]] + expected: TypeDefTestDefinitionExpected + + +TESTS: list[TypeDefTestDefinition] = [ { "definition": "git::git://github.com/onicagroup/foo/foo-bar.git", "expected": { diff --git a/tests/unit/core/components/test_module_type.py b/tests/unit/core/components/test_module_type.py index 098f8a477..11cdd27e4 100644 --- a/tests/unit/core/components/test_module_type.py +++ b/tests/unit/core/components/test_module_type.py @@ -4,7 +4,7 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, List, Type +from typing import TYPE_CHECKING import pytest @@ -19,8 +19,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture - from runway.config.models.runway import RunwayModuleTypeTypeDef from runway.module.base import RunwayModule @@ -43,7 +41,7 @@ class TestRunwayModuleType: ], ) def test_autodetection( - self, files: List[str], expected: Type[RunwayModule], cd_tmp_path: Path + self, files: list[str], expected: type[RunwayModule], cd_tmp_path: Path ) -> None: """Test from autodetection.""" for file_path in files: @@ -57,7 +55,7 @@ def test_autodetection( assert not result.type_str assert result.module_class.__name__ == expected.__name__ - def test_autodetection_fail(self, caplog: LogCaptureFixture, cd_tmp_path: Path) -> None: + def test_autodetection_fail(self, caplog: pytest.LogCaptureFixture, cd_tmp_path: Path) -> None: """Test autodetection fail.""" caplog.set_level(logging.ERROR, logger="runway") with pytest.raises(SystemExit) as excinfo: @@ -88,7 +86,7 @@ def test_from_class_path(self, cd_tmp_path: Path) -> None: ], ) def test_from_extension( - self, ext: str, expected: Type[RunwayModule], cd_tmp_path: Path + self, ext: str, expected: type[RunwayModule], cd_tmp_path: Path ) -> None: """Test from path extension.""" filename = "filename." + ext @@ -109,7 +107,7 @@ def test_from_extension( def test_from_type_str( self, type_str: RunwayModuleTypeTypeDef, - expected: Type[RunwayModule], + expected: type[RunwayModule], cd_tmp_path: Path, ) -> None: """Test from type_str.""" diff --git a/tests/unit/core/providers/aws/s3/_helpers/conftest.py b/tests/unit/core/providers/aws/s3/_helpers/conftest.py index fa104748e..d9daae69e 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/conftest.py +++ b/tests/unit/core/providers/aws/s3/_helpers/conftest.py @@ -2,18 +2,23 @@ from __future__ import annotations -from pathlib import Path -from typing import List +from typing import TYPE_CHECKING import pytest from typing_extensions import TypedDict -LocalFiles = TypedDict( - "LocalFiles", files=List[Path], local_dir=Path, local_file=Path, tmp_path=Path -) +if TYPE_CHECKING: + from pathlib import Path -@pytest.fixture(scope="function") +class LocalFiles(TypedDict): + files: list[Path] + local_dir: Path + local_file: Path + tmp_path: Path + + +@pytest.fixture() def loc_files(tmp_path: Path) -> LocalFiles: """Fixture for creating local files.""" file0 = tmp_path / "some_directory" / "text0.txt" diff --git a/tests/unit/core/providers/aws/s3/_helpers/factories.py b/tests/unit/core/providers/aws/s3/_helpers/factories.py index 50ceb715b..401aa748a 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/factories.py +++ b/tests/unit/core/providers/aws/s3/_helpers/factories.py @@ -2,13 +2,13 @@ from __future__ import annotations -from typing import Any, Dict, Optional +from typing import Any, Optional class FakeTransferFutureCallArgs: """Fake TransferFutureCallArgs.""" - def __init__(self, *, extra_args: Optional[Dict[str, Any]] = None, **kwargs: Any): + def __init__(self, *, extra_args: Optional[dict[str, Any]] = None, **kwargs: Any) -> None: """Instantiate class.""" self.extra_args = extra_args or {} for kwarg, val in kwargs.items(): @@ -23,7 +23,7 @@ def __init__( size: Optional[int] = None, call_args: Optional[FakeTransferFutureCallArgs] = None, transfer_id: Optional[str] = None, - ): + ) -> None: """Instantiate class.""" self.size = size self.call_args = call_args or FakeTransferFutureCallArgs() @@ -36,9 +36,9 @@ class FakeTransferFuture: def __init__( self, result: Optional[str] = None, - exception: Exception = None, + exception: Optional[Exception] = None, meta: FakeTransferFutureMeta = None, - ): + ) -> None: """Instantiate class.""" self._result = result self._exception = exception diff --git a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_base.py b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_base.py index 7f4d8a25a..b889e5829 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_base.py +++ b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_base.py @@ -3,10 +3,10 @@ from __future__ import annotations import datetime -from typing import TYPE_CHECKING, List, Optional, cast +from typing import TYPE_CHECKING, Any, cast +from unittest.mock import Mock import pytest -from mock import Mock from runway.core.providers.aws.s3._helpers.file_generator import FileStats from runway.core.providers.aws.s3._helpers.sync_strategy.base import ( @@ -39,19 +39,18 @@ def test_compare_size(self, dest_size: int, expected: bool, src_size: int) -> No @pytest.mark.parametrize("src, dest", [(None, None), (Mock(), None), (None, Mock())]) def test_compare_size_raise_value_error( - self, dest: Optional[FileStats], src: Optional[FileStats] + self, dest: FileStats | None, src: FileStats | None ) -> None: """Test compare_time.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="src_file and dest_file must not be None"): BaseSync().compare_size(src, dest) - assert str(excinfo.value) == "src_file and dest_file must not be None" def test_compare_time(self) -> None: """Test compare_time.""" - obj = BaseSync() + obj: BaseSync[Any] = BaseSync() now = datetime.datetime.now() future = now + datetime.timedelta(0, 15) - kwargs = {"src": "", "operation_name": "invalid"} + kwargs: dict[str, Any] = {"src": "", "operation_name": "invalid"} assert ( obj.compare_time( FileStats(last_update=now, **kwargs), @@ -77,10 +76,10 @@ def test_compare_time(self) -> None: @pytest.mark.parametrize("operation_name", ["copy", "upload"]) def test_compare_time_copy_or_upload(self, operation_name: str) -> None: """Test compare_time.""" - obj = BaseSync() + obj: BaseSync[Any] = BaseSync() now = datetime.datetime.now() future = now + datetime.timedelta(0, 15) - kwargs = {"src": "", "operation_name": operation_name} + kwargs: dict[str, Any] = {"src": "", "operation_name": operation_name} assert ( obj.compare_time( FileStats(last_update=now, **kwargs), @@ -105,10 +104,10 @@ def test_compare_time_copy_or_upload(self, operation_name: str) -> None: def test_compare_time_download(self) -> None: """Test compare_time.""" - obj = BaseSync() + obj: BaseSync[Any] = BaseSync() now = datetime.datetime.now() future = now + datetime.timedelta(0, 15) - kwargs = {"src": "", "operation_name": "download"} + kwargs: dict[str, Any] = {"src": "", "operation_name": "download"} assert ( obj.compare_time( FileStats(last_update=now, **kwargs), @@ -133,12 +132,11 @@ def test_compare_time_download(self) -> None: @pytest.mark.parametrize("src, dest", [(None, None), (Mock(), None), (None, Mock())]) def test_compare_time_raise_value_error( - self, dest: Optional[FileStats], src: Optional[FileStats] + self, dest: FileStats | None, src: FileStats | None ) -> None: """Test compare_time.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="src_file and dest_file must not be None"): BaseSync().compare_time(src, dest) - assert str(excinfo.value) == "src_file and dest_file must not be None" def test_determine_should_sync(self) -> None: """Test determine_should_sync.""" @@ -147,16 +145,16 @@ def test_determine_should_sync(self) -> None: def test_init(self) -> None: """Test __init__.""" - valid_sync_types: List[ValidSyncType] = [ + valid_sync_types: list[ValidSyncType] = [ "file_at_src_and_dest", "file_not_at_dest", "file_not_at_src", ] for sync_type in valid_sync_types: - strategy = BaseSync(sync_type) + strategy: BaseSync[Any] = BaseSync(sync_type) assert strategy.sync_type == sync_type - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Unknown sync_type"): BaseSync("invalid_sync_type") # type: ignore def test_name(self) -> None: @@ -166,7 +164,7 @@ def test_name(self) -> None: def test_register_strategy(self) -> None: """Test register_strategy.""" session = Mock() - obj = BaseSync() + obj: BaseSync[Any] = BaseSync() obj.register_strategy(session) register_args = cast(Mock, session.register).call_args_list assert register_args[0][0][0] == "choosing-s3-sync-strategy" @@ -176,7 +174,7 @@ def test_use_sync_strategy(self, mocker: MockerFixture) -> None: """Test use_sync_strategy.""" assert BaseSync().use_sync_strategy({"invalid_sync_strategy": True}) is None # type: ignore mocker.patch.object(BaseSync, "name", "something") - obj = BaseSync() + obj: BaseSync[Any] = BaseSync() assert obj.use_sync_strategy({"something": True}) == obj # type: ignore diff --git a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_exact_timestamps.py b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_exact_timestamps.py index 6810f4e77..a7d3590ff 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_exact_timestamps.py +++ b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_exact_timestamps.py @@ -3,10 +3,9 @@ from __future__ import annotations import datetime -from typing import Optional +from unittest.mock import Mock import pytest -from mock import Mock from runway.core.providers.aws.s3._helpers.file_generator import FileStats from runway.core.providers.aws.s3._helpers.sync_strategy.exact_timestamps import ( @@ -45,12 +44,11 @@ def test_compare_time_dest_older_not_download(self) -> None: @pytest.mark.parametrize("src, dest", [(None, None), (Mock(), None), (None, Mock())]) def test_compare_time_raise_value_error( - self, dest: Optional[FileStats], src: Optional[FileStats] + self, dest: FileStats | None, src: FileStats | None ) -> None: """Test compare_time.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="src_file and dest_file must not be None"): ExactTimestampsSync().compare_time(src, dest) - assert str(excinfo.value) == "src_file and dest_file must not be None" def test_compare_time_same(self) -> None: """Test compare_time.""" diff --git a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_register.py b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_register.py index c761a4660..429de4b7d 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_register.py +++ b/tests/unit/core/providers/aws/s3/_helpers/sync_strategy/test_register.py @@ -3,8 +3,7 @@ from __future__ import annotations from typing import TYPE_CHECKING - -from mock import Mock, call +from unittest.mock import Mock, call from runway.core.providers.aws.s3._helpers.sync_strategy import ( DeleteSync, diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_action_architecture.py b/tests/unit/core/providers/aws/s3/_helpers/test_action_architecture.py index 03a23c90c..5d5001276 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_action_architecture.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_action_architecture.py @@ -4,9 +4,9 @@ import os from typing import TYPE_CHECKING +from unittest.mock import Mock, call import pytest -from mock import Mock, call from runway.core.providers.aws.s3._helpers.action_architecture import ActionArchitecture from runway.core.providers.aws.s3._helpers.parameters import ParametersDataModel diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_comparator.py b/tests/unit/core/providers/aws/s3/_helpers/test_comparator.py index a12f172ae..fed60411c 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_comparator.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_comparator.py @@ -3,10 +3,9 @@ from __future__ import annotations import datetime -from typing import List, Optional +from unittest.mock import Mock import pytest -from mock import Mock from runway.core.providers.aws.s3._helpers.comparator import Comparator from runway.core.providers.aws.s3._helpers.file_generator import FileStats @@ -38,8 +37,7 @@ def setup_method(self) -> None: def test_call_compare_key_equal_should_not_sync(self) -> None: """Test call compare key equal should not sync.""" self.sync_strategy.determine_should_sync.return_value = False - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] + ref_list: list[FileStats] = [] src_files = [ FileStats( src="", @@ -64,30 +62,22 @@ def test_call_compare_key_equal_should_not_sync(self) -> None: operation_name="", ) ] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list # Try when the sync strategy says to sync the file. self.sync_strategy.determine_should_sync.return_value = True ref_list = [] - result_list = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) ref_list.append(src_files[0]) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list - def test_call_compare_key_greater(self): + def test_call_compare_key_greater(self) -> None: """Test call compare key greater.""" self.not_at_dest_sync_strategy.determine_should_sync.return_value = False self.not_at_src_sync_strategy.determine_should_sync.return_value = True - src_files: List[FileStats] = [] - dest_files: List[FileStats] = [] - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] + src_files: list[FileStats] = [] + dest_files: list[FileStats] = [] + ref_list: list[FileStats] = [] src_file = FileStats( src="", dest="", @@ -111,32 +101,24 @@ def test_call_compare_key_greater(self): src_files.append(src_file) dest_files.append(dest_file) ref_list.append(dest_file) - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list # Now try when the sync strategy says not to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = False - result_list = [] ref_list = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list def test_call_compare_key_less(self) -> None: """Test call compare key less.""" self.not_at_src_sync_strategy.determine_should_sync.return_value = False self.not_at_dest_sync_strategy.determine_should_sync.return_value = True - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] - src_files: List[FileStats] = [] - dest_files: List[FileStats] = [] + ref_list: list[FileStats] = [] + src_files: list[FileStats] = [] + dest_files: list[FileStats] = [] src_file = FileStats( src="", dest="", - compare_key="bomparator_test.py", + compare_key="bomparator_test.py", # cspell: disable-line size=10, last_update=NOW, src_type="local", @@ -156,27 +138,19 @@ def test_call_compare_key_less(self) -> None: src_files.append(src_file) dest_files.append(dest_file) ref_list.append(src_file) - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list # Now try when the sync strategy says not to sync the file. self.not_at_dest_sync_strategy.determine_should_sync.return_value = False - result_list = [] ref_list = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list def test_call_empty_dest(self) -> None: """Test call empty dest.""" self.not_at_dest_sync_strategy.determine_should_sync.return_value = True - src_files: List[FileStats] = [] - dest_files: List[FileStats] = [] - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] + src_files: list[FileStats] = [] + dest_files: list[FileStats] = [] + ref_list: list[FileStats] = [] src_file = FileStats( src="", dest="", @@ -189,27 +163,19 @@ def test_call_empty_dest(self) -> None: ) src_files.append(src_file) ref_list.append(src_file) - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list # Now try when the sync strategy says not to sync the file. self.not_at_dest_sync_strategy.determine_should_sync.return_value = False - result_list = [] ref_list = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list def test_call_empty_src(self) -> None: """Test call empty src.""" self.not_at_src_sync_strategy.determine_should_sync.return_value = True - src_files: List[FileStats] = [] - dest_files: List[FileStats] = [] - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] + src_files: list[FileStats] = [] + dest_files: list[FileStats] = [] + ref_list: list[FileStats] = [] dest_file = FileStats( src="", dest="", @@ -222,30 +188,19 @@ def test_call_empty_src(self) -> None: ) dest_files.append(dest_file) ref_list.append(dest_file) - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list # Now try when the sync strategy says not to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = False - result_list = [] ref_list = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list def test_call_empty_src_dest(self) -> None: """Test call.""" - src_files: List[FileStats] = [] - dest_files: List[FileStats] = [] - ref_list: List[FileStats] = [] - result_list: List[FileStats] = [] - files = self.comparator.call(iter(src_files), iter(dest_files)) - for filename in files: - result_list.append(filename) - assert result_list == ref_list + src_files: list[FileStats] = [] + dest_files: list[FileStats] = [] + ref_list: list[FileStats] = [] + assert list(self.comparator.call(iter(src_files), iter(dest_files))) == ref_list @pytest.mark.parametrize( "src_file, dest_file, expected", @@ -260,9 +215,9 @@ def test_call_empty_src_dest(self) -> None: ) def test_compare_comp_key( self, - dest_file: Optional[FileStats], + dest_file: FileStats | None, expected: str, - src_file: Optional[FileStats], + src_file: FileStats | None, ) -> None: """Test compare_comp_key.""" assert Comparator.compare_comp_key(src_file, dest_file) == expected diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_file_generator.py b/tests/unit/core/providers/aws/s3/_helpers/test_file_generator.py index 6e4522160..0fdd79559 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_file_generator.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_file_generator.py @@ -6,13 +6,12 @@ import os import platform import stat -from pathlib import Path from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest from botocore.exceptions import ClientError from dateutil.tz import tzlocal -from mock import Mock from runway.core.providers.aws.s3._helpers.file_generator import ( FileGenerator, @@ -24,6 +23,8 @@ from runway.core.providers.aws.s3._helpers.utils import EPOCH_TIME if TYPE_CHECKING: + from pathlib import Path + from pytest_mock import MockerFixture from runway.core.providers.aws.s3._helpers.file_generator import ( @@ -342,7 +343,7 @@ def test_safely_get_file_stats_no_last_update( ) mock_create_warning.assert_called_once_with( path=tmp_path, - error_message="File has an invalid timestamp. Passing epoch " "time as timestamp.", + error_message="File has an invalid timestamp. Passing epoch time as timestamp.", skip_file=False, ) assert obj.result_queue.get() == "warning" diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_file_info.py b/tests/unit/core/providers/aws/s3/_helpers/test_file_info.py index 4b4297e2f..d684b12a6 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_file_info.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_file_info.py @@ -6,7 +6,6 @@ from typing import TYPE_CHECKING import pytest -from typing_extensions import Literal from runway.core.providers.aws.s3._helpers.file_info import FileInfo from runway.core.providers.aws.s3._helpers.utils import EPOCH_TIME @@ -15,6 +14,7 @@ from pathlib import Path from mypy_boto3_s3.type_defs import ObjectTypeDef + from typing_extensions import Literal NOW = datetime.datetime.now() diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_file_info_builder.py b/tests/unit/core/providers/aws/s3/_helpers/test_file_info_builder.py index 3b61056f7..7c45e02ed 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_file_info_builder.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_file_info_builder.py @@ -2,7 +2,7 @@ from __future__ import annotations -from mock import Mock +from unittest.mock import Mock from runway.core.providers.aws.s3._helpers.file_generator import FileStats from runway.core.providers.aws.s3._helpers.file_info import FileInfo diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_format_path.py b/tests/unit/core/providers/aws/s3/_helpers/test_format_path.py index 3488268b6..698ab0487 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_format_path.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_format_path.py @@ -3,10 +3,10 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING +from unittest.mock import call import pytest -from mock import call from runway.core.providers.aws.s3._helpers.format_path import FormatPath @@ -82,7 +82,7 @@ def test_format_local_path(self, tmp_path: Path) -> None: ("s3://bucket/key.txt", False, ("s3://bucket/key.txt", False)), ], ) - def test_format_s3_path(self, dir_op: bool, expected: Tuple[str, bool], path: str) -> None: + def test_format_s3_path(self, dir_op: bool, expected: tuple[str, bool], path: str) -> None: """Test format_s3_path.""" assert FormatPath.format_s3_path(path, dir_op) == expected @@ -97,6 +97,6 @@ def test_format_s3_path(self, dir_op: bool, expected: Tuple[str, bool], path: st ("s3://test", ("s3", "test")), ], ) - def test_identify_path_type(self, expected: Tuple[SupportedPathType, str], path: str) -> None: + def test_identify_path_type(self, expected: tuple[SupportedPathType, str], path: str) -> None: """Test identify_path_type.""" assert FormatPath.identify_path_type(path) == expected diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_parameters.py b/tests/unit/core/providers/aws/s3/_helpers/test_parameters.py index 856549241..535a6c7b0 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_parameters.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_parameters.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List +from typing import TYPE_CHECKING, Any import pytest from pydantic import ValidationError @@ -67,9 +67,8 @@ def test_same_path_mv_locallocal(self) -> None: def test_same_path_mv_s3s3(self) -> None: """Test _same_path.""" self.data_s3s3.dest = self.data_s3s3.src - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="Cannot mv a file onto itself"): Parameters("mv", self.data_s3s3) - assert "Cannot mv a file onto itself" in str(excinfo.value) def test_same_path_mv_s3s3_not_same(self) -> None: """Test _same_path.""" @@ -162,7 +161,7 @@ def test_normalize_s3_trailing_slash(self, provided: str, expected: str) -> None "kwargs, error_locs", [({"dest": "test-dest"}, ["src"]), ({"src": "test-src"}, ["dest"])], ) - def test_required_fields(self, error_locs: List[str], kwargs: Dict[str, Any]) -> None: + def test_required_fields(self, error_locs: list[str], kwargs: dict[str, Any]) -> None: """Test required fields.""" with pytest.raises(ValidationError) as excinfo: ParametersDataModel(**kwargs) diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_results.py b/tests/unit/core/providers/aws/s3/_helpers/test_results.py index 033a94028..2ff84fbf8 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_results.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_results.py @@ -6,10 +6,10 @@ from concurrent.futures import CancelledError from io import StringIO from queue import Queue -from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional +from typing import TYPE_CHECKING, Any, ClassVar, Optional +from unittest.mock import Mock import pytest -from mock import Mock from s3transfer.exceptions import FatalError from runway._logging import LogLevels @@ -53,7 +53,6 @@ ) if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from s3transfer.futures import TransferFuture @@ -99,7 +98,7 @@ class BaseResultSubscriberTest: future: TransferFuture key: ClassVar[str] = "test.txt" ref_exception: ClassVar[Exception] = Exception() - result_queue: "Queue[Any]" + result_queue: Queue[Any] size: ClassVar[int] = 20 * (1024 * 1024) # 20 MB src: str transfer_type: str @@ -293,7 +292,7 @@ class TestCommandResultRecorder: command_result_recorder: CommandResultRecorder dest: ClassVar[str] = "s3://mybucket/test-key" result_processor: ResultProcessor - result_queue: "Queue[Any]" + result_queue: Queue[Any] result_recorder: ResultRecorder src: ClassVar[str] = "file" total_transfer_size: ClassVar[int] = 20 * (1024 * 1024) # 20 MB @@ -375,7 +374,7 @@ def test_notify_total_submissions(self) -> None: class TestCopyResultSubscriber(TestUploadResultSubscriber): """Test CopyResultSubscriber.""" - copy_source: Dict[str, str] + copy_source: dict[str, str] source_bucket: str source_key: str @@ -469,7 +468,7 @@ def test_does_not_print_progress_result(self) -> None: self.result_printer(progress_result) assert self.out_file.getvalue() == "" - def test_does_print_success_result(self, caplog: LogCaptureFixture) -> None: + def test_does_print_success_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test does print success result.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -481,7 +480,7 @@ def test_does_print_success_result(self, caplog: LogCaptureFixture) -> None: assert self.out_file.getvalue() == "" def test_final_total_does_not_try_to_clear_empty_progress( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test final total does not try to clear empty progress.""" caplog.set_level(LogLevels.INFO, "runway.core.providers.aws.s3") @@ -502,7 +501,7 @@ def test_final_total_does_not_try_to_clear_empty_progress( assert caplog.messages == ["upload: file to s3://mybucket/test-key"] assert self.out_file.getvalue() == "" - def test_print_failure_result(self, caplog: LogCaptureFixture) -> None: + def test_print_failure_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test print failure result.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -518,7 +517,7 @@ def test_print_failure_result(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == ["upload failed: file to s3://mybucket/test-key my exception"] assert self.error_file.getvalue() == "" - def test_print_warning_result(self, caplog: LogCaptureFixture) -> None: + def test_print_warning_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test print warning.""" caplog.set_level(LogLevels.WARNING, "runway.core.providers.aws.s3") self.result_printer(PrintTask("warning: my warning")) @@ -544,7 +543,7 @@ def test_does_not_print_progress_result(self) -> None: self.result_printer(progress_result) assert self.out_file.getvalue() == "" - def test_does_not_print_success_result(self, caplog: LogCaptureFixture) -> None: + def test_does_not_print_success_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test does not print success result.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -555,7 +554,7 @@ def test_does_not_print_success_result(self, caplog: LogCaptureFixture) -> None: assert not caplog.messages assert not self.out_file.getvalue() - def test_does_print_failure_result(self, caplog: LogCaptureFixture) -> None: + def test_does_print_failure_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test print failure result.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -571,7 +570,7 @@ def test_does_print_failure_result(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == ["upload failed: file to s3://mybucket/test-key my exception"] assert not self.error_file.getvalue() - def test_does_print_warning_result(self, caplog: LogCaptureFixture) -> None: + def test_does_print_warning_result(self, caplog: pytest.LogCaptureFixture) -> None: """Test print warning.""" caplog.set_level(LogLevels.WARNING, "runway.core.providers.aws.s3") self.result_printer(PrintTask("warning: my warning")) @@ -579,7 +578,7 @@ def test_does_print_warning_result(self, caplog: LogCaptureFixture) -> None: assert not self.error_file.getvalue() def test_final_total_does_not_try_to_clear_empty_progress( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test final total does not try to clear empty progress.""" caplog.set_level(LogLevels.INFO, "runway.core.providers.aws.s3") @@ -604,20 +603,20 @@ def test_final_total_does_not_try_to_clear_empty_progress( class TestResultPrinter(BaseResultPrinterTest): """Test ResultPrinter.""" - def test_ctrl_c_error(self, caplog: LogCaptureFixture) -> None: + def test_ctrl_c_error(self, caplog: pytest.LogCaptureFixture) -> None: """Test Ctrl+C error.""" caplog.set_level(LogLevels.WARNING, "runway.core.providers.aws.s3") self.result_printer(CtrlCResult(Exception())) assert caplog.messages == ["cancelled: ctrl-c received"] - def test_dry_run(self, caplog: LogCaptureFixture) -> None: + def test_dry_run(self, caplog: pytest.LogCaptureFixture) -> None: """Test dry run.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") result = DryRunResult(transfer_type="upload", src="s3://mybucket/key", dest="./local/file") self.result_printer(result) assert caplog.messages == [f"(dryrun) upload: {result.src} to {result.dest}"] - def test_dry_run_unicode(self, caplog: LogCaptureFixture) -> None: + def test_dry_run_unicode(self, caplog: pytest.LogCaptureFixture) -> None: """Test dry run.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") result = DryRunResult( @@ -626,19 +625,19 @@ def test_dry_run_unicode(self, caplog: LogCaptureFixture) -> None: self.result_printer(result) assert caplog.messages == [f"(dryrun) upload: {result.src} to {result.dest}"] - def test_error(self, caplog: LogCaptureFixture) -> None: + def test_error(self, caplog: pytest.LogCaptureFixture) -> None: """Test error.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") self.result_printer(ErrorResult(Exception("my exception"))) assert caplog.messages == ["fatal error: my exception"] - def test_error_unicode(self, caplog: LogCaptureFixture) -> None: + def test_error_unicode(self, caplog: pytest.LogCaptureFixture) -> None: """Test error.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") self.result_printer(ErrorResult(Exception("unicode exists \u2713"))) assert caplog.messages == ["fatal error: unicode exists \u2713"] - def test_error_while_progress(self, caplog: LogCaptureFixture) -> None: + def test_error_while_progress(self, caplog: pytest.LogCaptureFixture) -> None: """Test error.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") mb = 1024**2 @@ -651,7 +650,7 @@ def test_error_while_progress(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == ["fatal error: my exception"] assert not self.out_file.getvalue() - def test_failure(self, caplog: LogCaptureFixture) -> None: + def test_failure(self, caplog: pytest.LogCaptureFixture) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -670,7 +669,7 @@ def test_failure(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"upload failed: file to {dest} my exception"] def test_failure_but_no_expected_files_transferred_provided( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") @@ -696,11 +695,11 @@ def test_failure_but_no_expected_files_transferred_provided( ) self.result_printer(failure_result) assert self.out_file.getvalue() == ( - "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) " "remaining (calculating...)\r" + "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) remaining (calculating...)\r" ) assert caplog.messages == [f"upload failed: file to {dest} my exception"] - def test_failure_for_delete(self, caplog: LogCaptureFixture) -> None: + def test_failure_for_delete(self, caplog: pytest.LogCaptureFixture) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") transfer_type = "delete" @@ -719,7 +718,7 @@ def test_failure_for_delete(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"delete failed: {src} my exception"] def test_failure_for_delete_but_no_expected_files_transferred_provided( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test failure.""" shared_file = self.out_file @@ -744,7 +743,9 @@ def test_failure_for_delete_but_no_expected_files_transferred_provided( ) assert caplog.messages == [f"delete failed: {src} my exception"] - def test_failure_for_delete_with_files_remaining(self, caplog: LogCaptureFixture) -> None: + def test_failure_for_delete_with_files_remaining( + self, caplog: pytest.LogCaptureFixture + ) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") shared_file = self.out_file @@ -770,7 +771,7 @@ def test_failure_for_delete_with_files_remaining(self, caplog: LogCaptureFixture ) assert caplog.messages == [f"delete failed: {src} my exception"] - def test_failure_unicode(self, caplog: LogCaptureFixture) -> None: + def test_failure_unicode(self, caplog: pytest.LogCaptureFixture) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -788,7 +789,7 @@ def test_failure_unicode(self, caplog: LogCaptureFixture) -> None: self.result_printer(failure_result) assert caplog.messages == [f"upload failed: {src} to {dest} my exception"] - def test_failure_with_files_remaining(self, caplog: LogCaptureFixture) -> None: + def test_failure_with_files_remaining(self, caplog: pytest.LogCaptureFixture) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") shared_file = self.out_file @@ -813,11 +814,11 @@ def test_failure_with_files_remaining(self, caplog: LogCaptureFixture) -> None: ) self.result_printer(failure_result) assert self.out_file.getvalue() == ( - "Completed 1.0 MiB/~4.0 MiB (0 Bytes/s) with ~3 file(s) " "remaining (calculating...)\r" + "Completed 1.0 MiB/~4.0 MiB (0 Bytes/s) with ~3 file(s) remaining (calculating...)\r" ) assert caplog.messages == [f"upload failed: file to {dest} my exception"] - def test_failure_with_progress(self, caplog: LogCaptureFixture) -> None: + def test_failure_with_progress(self, caplog: pytest.LogCaptureFixture) -> None: """Test failure.""" caplog.set_level(LogLevels.ERROR, "runway.core.providers.aws.s3") shared_file = self.out_file @@ -853,7 +854,7 @@ def test_failure_with_progress(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"upload failed: file to {dest} my exception"] def test_final_total_does_not_print_out_newline_for_no_transfers( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test final total.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") @@ -862,7 +863,7 @@ def test_final_total_does_not_print_out_newline_for_no_transfers( assert not self.out_file.getvalue() def test_final_total_notification_with_no_more_expected_progress( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test final total.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") @@ -877,7 +878,7 @@ def test_final_total_notification_with_no_more_expected_progress( success_result = SuccessResult(transfer_type=transfer_type, src=src, dest=dest) self.result_printer(success_result) assert self.out_file.getvalue() == ( - "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) " "remaining (calculating...)\r" + "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) remaining (calculating...)\r" ) assert caplog.messages == [f"upload: file to {dest}"] @@ -998,7 +999,7 @@ def test_init_no_out_file(self, mocker: MockerFixture) -> None: result = ResultPrinter(self.result_recorder, error_file=self.error_file) assert result._out_file == mock_stdout - def test_success(self, caplog: LogCaptureFixture) -> None: + def test_success(self, caplog: pytest.LogCaptureFixture) -> None: """Test success.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -1012,7 +1013,7 @@ def test_success(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"upload: file to {dest}"] def test_success_but_no_expected_files_transferred_provided( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test success but no expected files transferred provided.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") @@ -1028,11 +1029,11 @@ def test_success_but_no_expected_files_transferred_provided( success_result = SuccessResult(transfer_type=transfer_type, src=src, dest=dest) self.result_printer(success_result) assert self.out_file.getvalue() == ( - "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) " "remaining (calculating...)\r" + "Completed 1.0 MiB/~1.0 MiB (0 Bytes/s) with ~0 file(s) remaining (calculating...)\r" ) assert caplog.messages == [f"upload: file to {dest}"] - def test_success_delete(self, caplog: LogCaptureFixture) -> None: + def test_success_delete(self, caplog: pytest.LogCaptureFixture) -> None: """Test success for delete.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "delete" @@ -1045,7 +1046,7 @@ def test_success_delete(self, caplog: LogCaptureFixture) -> None: assert caplog.messages == [f"delete: {src}"] def test_success_delete_but_no_expected_files_transferred_provided( - self, caplog: LogCaptureFixture + self, caplog: pytest.LogCaptureFixture ) -> None: """Test success delete but no expected files transferred provided.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") @@ -1061,7 +1062,7 @@ def test_success_delete_but_no_expected_files_transferred_provided( ) assert caplog.messages == [f"delete: {src}"] - def test_success_delete_with_files_remaining(self, caplog: LogCaptureFixture) -> None: + def test_success_delete_with_files_remaining(self, caplog: pytest.LogCaptureFixture) -> None: """Test success delete with files remaining.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "delete" @@ -1076,7 +1077,7 @@ def test_success_delete_with_files_remaining(self, caplog: LogCaptureFixture) -> ) assert caplog.messages == [f"delete: {src}"] - def test_success_unicode_src(self, caplog: LogCaptureFixture) -> None: + def test_success_unicode_src(self, caplog: pytest.LogCaptureFixture) -> None: """Test success.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") self.result_recorder.final_expected_files_transferred = 1 @@ -1086,7 +1087,7 @@ def test_success_unicode_src(self, caplog: LogCaptureFixture) -> None: self.result_printer(result) assert caplog.messages == [f"delete: {result.src}"] - def test_success_unicode_src_and_dest(self, caplog: LogCaptureFixture) -> None: + def test_success_unicode_src_and_dest(self, caplog: pytest.LogCaptureFixture) -> None: """Test success.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") self.result_recorder.final_expected_files_transferred = 1 @@ -1098,7 +1099,7 @@ def test_success_unicode_src_and_dest(self, caplog: LogCaptureFixture) -> None: self.result_printer(result) assert caplog.messages == [f"upload: {result.src} to {result.dest}"] - def test_success_with_files_remaining(self, caplog: LogCaptureFixture) -> None: + def test_success_with_files_remaining(self, caplog: pytest.LogCaptureFixture) -> None: """Test success with files remaining.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") transfer_type = "upload" @@ -1112,11 +1113,11 @@ def test_success_with_files_remaining(self, caplog: LogCaptureFixture) -> None: success_result = SuccessResult(transfer_type=transfer_type, src=src, dest=dest) self.result_printer(success_result) assert self.out_file.getvalue() == ( - "Completed 1.0 MiB/~4.0 MiB (0 Bytes/s) with ~3 file(s) " "remaining (calculating...)\r" + "Completed 1.0 MiB/~4.0 MiB (0 Bytes/s) with ~3 file(s) remaining (calculating...)\r" ) assert caplog.messages == [f"upload: file to {dest}"] - def test_success_with_progress(self, caplog: LogCaptureFixture) -> None: + def test_success_with_progress(self, caplog: pytest.LogCaptureFixture) -> None: """Test success with progress.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") mb = 1024**2 @@ -1145,7 +1146,7 @@ def test_unknown_result_object(self) -> None: assert self.out_file.getvalue() == "" assert self.error_file.getvalue() == "" - def test_warning(self, caplog: LogCaptureFixture) -> None: + def test_warning(self, caplog: pytest.LogCaptureFixture) -> None: """Test warning.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") self.result_recorder.final_expected_files_transferred = 1 @@ -1154,7 +1155,7 @@ def test_warning(self, caplog: LogCaptureFixture) -> None: self.result_printer(PrintTask("warning: my warning")) assert caplog.messages == ["warning: my warning"] - def test_warning_unicode(self, caplog: LogCaptureFixture) -> None: + def test_warning_unicode(self, caplog: pytest.LogCaptureFixture) -> None: """Test warning.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") self.result_recorder.final_expected_files_transferred = 1 @@ -1163,7 +1164,7 @@ def test_warning_unicode(self, caplog: LogCaptureFixture) -> None: self.result_printer(PrintTask("warning: unicode exists \u2713")) assert caplog.messages == ["warning: unicode exists \u2713"] - def test_warning_with_progress(self, caplog: LogCaptureFixture) -> None: + def test_warning_with_progress(self, caplog: pytest.LogCaptureFixture) -> None: """Test warning.""" caplog.set_level(LogLevels.NOTICE, "runway.core.providers.aws.s3") shared_file = self.out_file @@ -1192,7 +1193,7 @@ class TestResultProcessor: """Test ResultProcessor.""" result_processor: ResultProcessor - result_queue: "Queue[Any]" + result_queue: Queue[Any] def setup_method(self) -> None: """Run before each test method if run to return the class instance attrs to default.""" diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_s3handler.py b/tests/unit/core/providers/aws/s3/_helpers/test_s3handler.py index 7d35f3e2d..0add05f28 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_s3handler.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_s3handler.py @@ -4,10 +4,10 @@ from pathlib import Path from queue import Queue -from typing import TYPE_CHECKING, Any, ClassVar, Dict, NamedTuple, Optional, cast +from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Optional, cast +from unittest.mock import MagicMock, Mock import pytest -from mock import MagicMock, Mock from s3transfer.manager import TransferManager from runway.core.providers.aws.s3._helpers.file_info import FileInfo @@ -69,11 +69,11 @@ class MockSubmitters(NamedTuple): """Named tuple return value of mock_submitters.""" - classes: Dict[str, Mock] - instances: Dict[str, Mock] + classes: dict[str, Mock] + instances: dict[str, Mock] -@pytest.fixture(scope="function") +@pytest.fixture() def mock_submitters(mocker: MockerFixture) -> MockSubmitters: """Mock handler submitters.""" classes = { @@ -85,7 +85,7 @@ def mock_submitters(mocker: MockerFixture) -> MockSubmitters: "upload": mocker.patch(f"{MODULE}.UploadRequestSubmitter", Mock()), "upload_stream": mocker.patch(f"{MODULE}.UploadStreamRequestSubmitter", Mock()), } - instances: Dict[str, Mock] = {} + instances: dict[str, Mock] = {} for name, mock_class in classes.items(): inst = Mock(can_submit=Mock(return_value=False), submit=Mock(return_value=True)) mock_class.return_value = inst @@ -100,7 +100,7 @@ class BaseTransferRequestSubmitterTest: config_params: ParametersDataModel filename: ClassVar[str] = "test-file.txt" key: ClassVar[str] = "test-key.txt" - result_queue: "Queue[Any]" + result_queue: Queue[Any] transfer_manager: Mock def setup_method(self) -> None: @@ -196,7 +196,7 @@ def test_submit(self) -> None: self.config_params["guess_mime_type"] = True future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.copy.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.copy.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.copy.call_args[1]) assert call_kwargs["copy_source"] == { "Bucket": self.source_bucket, "Key": self.source_key, @@ -224,7 +224,7 @@ def test_submit_content_type_specified(self) -> None: self.config_params["content_type"] = "text/plain" self.transfer_request_submitter.submit(fileinfo) - copy_call_kwargs = cast(Dict[str, Any], self.transfer_manager.copy.call_args[1]) + copy_call_kwargs = cast(dict[str, Any], self.transfer_manager.copy.call_args[1]) assert copy_call_kwargs["extra_args"] == {"ContentType": "text/plain"} ref_subscribers = [ProvideSizeSubscriber, CopyResultSubscriber] actual_subscribers = copy_call_kwargs["subscribers"] @@ -263,7 +263,7 @@ def test_submit_extra_args(self) -> None: self.config_params["storage_class"] = "STANDARD_IA" self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.copy.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.copy.call_args[1]) assert call_kwargs["extra_args"] == {"StorageClass": "STANDARD_IA"} def test_submit_move_adds_delete_source_subscriber(self) -> None: @@ -281,7 +281,7 @@ def test_submit_move_adds_delete_source_subscriber(self) -> None: DeleteSourceObjectSubscriber, CopyResultSubscriber, ] - call_kwargs = cast(Dict[str, Any], self.transfer_manager.copy.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.copy.call_args[1]) actual_subscribers = call_kwargs["subscribers"] assert len(ref_subscribers) == len(actual_subscribers) for i, actual_subscriber in enumerate(actual_subscribers): @@ -295,7 +295,7 @@ def test_submit_no_guess_content_mime_type(self) -> None: ) self.config_params["guess_mime_type"] = False self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.copy.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.copy.call_args[1]) ref_subscribers = [ProvideSizeSubscriber, CopyResultSubscriber] actual_subscribers = call_kwargs["subscribers"] assert len(ref_subscribers) == len(actual_subscribers) @@ -387,7 +387,7 @@ def test_submit(self) -> None: future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.delete.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.delete.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.delete.call_args[1]) assert call_kwargs["bucket"] == self.bucket assert call_kwargs["key"] == self.key assert call_kwargs["extra_args"] == {} @@ -437,10 +437,10 @@ def assert_no_downloads_happened(self) -> None: assert len(self.transfer_manager.download.call_args_list) == 0 # type: ignore def create_file_info( - self, key: str, response_data: Optional[Dict[str, Any]] = None + self, key: str, response_data: Optional[dict[str, Any]] = None ) -> FileInfo: """Create FileInfo.""" - kwargs: Dict[str, Any] = { + kwargs: dict[str, Any] = { "src": self.bucket + "/" + key, "src_type": "s3", "dest": self.filename, @@ -468,7 +468,7 @@ def test_submit(self) -> None: fileinfo = self.create_file_info(self.key) future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.download.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.download.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.download.call_args[1]) assert call_kwargs["fileobj"] == self.filename assert call_kwargs["bucket"] == self.bucket assert call_kwargs["key"] == self.key @@ -512,7 +512,7 @@ def test_submit_extra_args(self) -> None: self.config_params["sse_c"] = "AES256" self.config_params["sse_c_key"] = "test-key" self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.download.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.download.call_args[1]) assert call_kwargs["extra_args"] == { "SSECustomerAlgorithm": "AES256", "SSECustomerKey": "test-key", @@ -531,7 +531,7 @@ def test_submit_move_adds_delete_source_subscriber(self) -> None: DeleteSourceObjectSubscriber, DownloadResultSubscriber, ] - call_kwargs = cast(Dict[str, Any], self.transfer_manager.download.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.download.call_args[1]) actual_subscribers = call_kwargs["subscribers"] assert len(ref_subscribers) == len(actual_subscribers) for i, actual_subscriber in enumerate(actual_subscribers): @@ -639,7 +639,7 @@ def test_submit(self) -> None: future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.download.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.download.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.download.call_args[1]) assert isinstance(call_kwargs["fileobj"], StdoutBytesWriter) assert call_kwargs["bucket"] == self.bucket assert call_kwargs["key"] == self.key @@ -759,7 +759,7 @@ class TestS3TransferHandler: config_params: ClassVar[ParametersDataModel] = ParametersDataModel(dest="", src="") result_command_recorder: CommandResultRecorder - result_queue: "Queue[Any]" + result_queue: Queue[Any] transfer_manager: TransferManager def setup_method(self) -> None: @@ -791,7 +791,7 @@ class TestS3TransferHandlerFactory: config_params: ParametersDataModel client: S3Client - result_queue: "Queue[Any]" + result_queue: Queue[Any] runtime_config: TransferConfigDict def setup_method(self) -> None: @@ -813,7 +813,7 @@ def test_call_is_stream(self, mocker: MockerFixture) -> None: assert S3TransferHandlerFactory(self.config_params, self.runtime_config)( self.client, self.result_queue ) - call_kwargs = cast(Dict[str, Any], mock_processor.call_args[1]) + call_kwargs = cast(dict[str, Any], mock_processor.call_args[1]) assert len(call_kwargs["result_handlers"]) == 2 assert isinstance(call_kwargs["result_handlers"][0], ResultRecorder) assert isinstance(call_kwargs["result_handlers"][1], OnlyShowErrorsResultPrinter) @@ -825,7 +825,7 @@ def test_call_no_progress(self, mocker: MockerFixture) -> None: assert S3TransferHandlerFactory(self.config_params, self.runtime_config)( self.client, self.result_queue ) - call_kwargs = cast(Dict[str, Any], mock_processor.call_args[1]) + call_kwargs = cast(dict[str, Any], mock_processor.call_args[1]) assert len(call_kwargs["result_handlers"]) == 2 assert isinstance(call_kwargs["result_handlers"][0], ResultRecorder) assert isinstance(call_kwargs["result_handlers"][1], NoProgressResultPrinter) @@ -837,7 +837,7 @@ def test_call_only_show_errors(self, mocker: MockerFixture) -> None: assert S3TransferHandlerFactory(self.config_params, self.runtime_config)( self.client, self.result_queue ) - call_kwargs = cast(Dict[str, Any], mock_processor.call_args[1]) + call_kwargs = cast(dict[str, Any], mock_processor.call_args[1]) assert len(call_kwargs["result_handlers"]) == 2 assert isinstance(call_kwargs["result_handlers"][0], ResultRecorder) assert isinstance(call_kwargs["result_handlers"][1], OnlyShowErrorsResultPrinter) @@ -849,7 +849,7 @@ def test_call_quiet(self, mocker: MockerFixture) -> None: assert S3TransferHandlerFactory(self.config_params, self.runtime_config)( self.client, self.result_queue ) - call_kwargs = cast(Dict[str, Any], mock_processor.call_args[1]) + call_kwargs = cast(dict[str, Any], mock_processor.call_args[1]) assert len(call_kwargs["result_handlers"]) == 1 assert isinstance(call_kwargs["result_handlers"][0], ResultRecorder) @@ -884,7 +884,7 @@ def test_submit(self) -> None: future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.upload.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) assert call_kwargs["fileobj"] == self.filename assert call_kwargs["bucket"] == self.bucket assert call_kwargs["key"] == self.key @@ -906,7 +906,7 @@ def test_submit_content_type_specified(self) -> None: self.config_params["content_type"] = "text/plain" self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) assert call_kwargs["extra_args"] == {"ContentType": "text/plain"} ref_subscribers = [ProvideSizeSubscriber, UploadResultSubscriber] actual_subscribers = call_kwargs["subscribers"] @@ -961,7 +961,7 @@ def test_submit_extra_args(self) -> None: self.config_params["storage_class"] = "STANDARD_IA" self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) assert call_kwargs["extra_args"] == {"StorageClass": "STANDARD_IA"} def test_submit_move_adds_delete_source_subscriber(self) -> None: @@ -976,7 +976,7 @@ def test_submit_move_adds_delete_source_subscriber(self) -> None: DeleteSourceFileSubscriber, UploadResultSubscriber, ] - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) actual_subscribers = call_kwargs["subscribers"] assert len(ref_subscribers) == len(actual_subscribers) for i, actual_subscriber in enumerate(actual_subscribers): @@ -988,7 +988,7 @@ def test_submit_no_guess_content_mime_type(self) -> None: self.config_params["guess_mime_type"] = False self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) ref_subscribers = [ProvideSizeSubscriber, UploadResultSubscriber] actual_subscribers = call_kwargs["subscribers"] assert len(ref_subscribers) == len(actual_subscribers) @@ -1042,7 +1042,7 @@ def test_submit(self) -> None: future = self.transfer_request_submitter.submit(fileinfo) assert self.transfer_manager.upload.return_value is future - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) assert isinstance(call_kwargs["fileobj"], NonSeekableStream) assert call_kwargs["bucket"] == self.bucket assert call_kwargs["key"] == self.key @@ -1078,7 +1078,7 @@ def test_submit_expected_size_provided(self) -> None: self.config_params["expected_size"] = provided_size fileinfo = FileInfo(src=self.filename, dest=self.bucket + "/" + self.key) self.transfer_request_submitter.submit(fileinfo) - call_kwargs = cast(Dict[str, Any], self.transfer_manager.upload.call_args[1]) + call_kwargs = cast(dict[str, Any], self.transfer_manager.upload.call_args[1]) ref_subscribers = [ProvideSizeSubscriber, UploadStreamResultSubscriber] actual_subscribers = call_kwargs["subscribers"] diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_transfer_config.py b/tests/unit/core/providers/aws/s3/_helpers/test_transfer_config.py index 8645ad75e..9b8c7dd50 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_transfer_config.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_transfer_config.py @@ -2,8 +2,6 @@ from __future__ import annotations -from typing import Dict - import pytest from s3transfer.manager import TransferConfig @@ -64,7 +62,7 @@ def test_build_config_partial_override(self) -> None: {"max_queue_size": "not an int"}, ], ) - def test_build_config_validates_integer_types(self, kwargs: Dict[str, str]) -> None: + def test_build_config_validates_integer_types(self, kwargs: dict[str, str]) -> None: """Test build_config.""" with pytest.raises(InvalidConfigError): RuntimeConfig.build_config(**kwargs) @@ -79,7 +77,7 @@ def test_build_config_validates_integer_types(self, kwargs: Dict[str, str]) -> N {"multipart_threshold": -15}, ], ) - def test_build_config_validates_positive_integers(self, kwargs: Dict[str, str]) -> None: + def test_build_config_validates_positive_integers(self, kwargs: dict[str, str]) -> None: """Test build_config.""" with pytest.raises(InvalidConfigError): RuntimeConfig.build_config(**kwargs) diff --git a/tests/unit/core/providers/aws/s3/_helpers/test_utils.py b/tests/unit/core/providers/aws/s3/_helpers/test_utils.py index fd5c81c7f..c60592f79 100644 --- a/tests/unit/core/providers/aws/s3/_helpers/test_utils.py +++ b/tests/unit/core/providers/aws/s3/_helpers/test_utils.py @@ -12,7 +12,8 @@ from io import BytesIO from pathlib import Path from queue import Queue -from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional +from typing import TYPE_CHECKING, Any, ClassVar, Optional +from unittest.mock import Mock, PropertyMock, sentinel import boto3 import pytest @@ -20,7 +21,6 @@ from botocore.hooks import HierarchicalEmitter from botocore.stub import Stubber from dateutil.tz import tzlocal -from mock import Mock, PropertyMock, sentinel from s3transfer.compat import seekable from s3transfer.futures import TransferFuture @@ -89,9 +89,9 @@ class TestBucketLister: date_parser: ClassVar[Mock] = Mock(return_value=sentinel.now) emitter: ClassVar[HierarchicalEmitter] = HierarchicalEmitter() client: ClassVar[Mock] = Mock(meta=Mock(events=emitter)) - responses: List[Any] = [] + responses: list[Any] = [] - def fake_paginate(self, *_args: Any, **_kwargs: Any) -> List[Any]: + def fake_paginate(self, *_args: Any, **_kwargs: Any) -> list[Any]: """Fake paginate.""" for response in self.responses: self.emitter.emit("after-call.s3.ListObjectsV2", parsed=response) @@ -120,7 +120,7 @@ def test_list_objects(self) -> None: for individual_response in individual_response_elements: assert individual_response["LastModified"] == now - def test_list_objects_pass_extra_args(self): + def test_list_objects_pass_extra_args(self) -> None: """Test list_objects.""" self.client.get_paginator.return_value.paginate = Mock( return_value=[ @@ -143,7 +143,7 @@ def test_list_objects_pass_extra_args(self): RequestPayer="requester", ) - def test_list_objects_pass_prefix(self): + def test_list_objects_pass_prefix(self) -> None: """Test list_objects.""" self.client.get_paginator.return_value.paginate = Mock( return_value=[ @@ -385,10 +385,10 @@ class TestOnDoneFilteredSubscriber: class Subscriber(OnDoneFilteredSubscriber): """Subscriber subclass to test.""" - def __init__(self): + def __init__(self) -> None: """Instantiate class.""" - self.on_success_calls: List[Any] = [] - self.on_failure_calls: List[Any] = [] + self.on_success_calls: list[Any] = [] + self.on_failure_calls: list[Any] = [] def _on_success(self, future: Any) -> None: self.on_success_calls.append(future) @@ -396,22 +396,24 @@ def _on_success(self, future: Any) -> None: def _on_failure(self, future: Any, exception: Exception) -> None: self.on_failure_calls.append((future, exception)) - def test_on_done_failure(self): + def test_on_done_failure(self) -> None: """Test on_done.""" subscriber = self.Subscriber() exception = Exception("my exception") future = FakeTransferFuture(exception=exception) subscriber.on_done(future) # type: ignore assert subscriber.on_failure_calls == [(future, exception)] - assert not subscriber.on_success_calls and isinstance(subscriber.on_success_calls, list) + assert not subscriber.on_success_calls + assert isinstance(subscriber.on_success_calls, list) - def test_on_done_success(self): + def test_on_done_success(self) -> None: """Test on_done.""" subscriber = self.Subscriber() future = FakeTransferFuture("return-value") subscriber.on_done(future) # type: ignore assert subscriber.on_success_calls == [future] - assert not subscriber.on_failure_calls and isinstance(subscriber.on_failure_calls, list) + assert not subscriber.on_failure_calls + assert isinstance(subscriber.on_failure_calls, list) class TestProvideCopyContentTypeSubscriber: @@ -440,7 +442,7 @@ class TestProvideLastModifiedTimeSubscriber: desired_utime: ClassVar[datetime.datetime] = datetime.datetime( 2016, 1, 18, 7, 0, 0, tzinfo=tzlocal() ) - result_queue: ClassVar["Queue[Any]"] = Queue() + result_queue: ClassVar[Queue[Any]] = Queue() subscriber: ClassVar[ProvideLastModifiedTimeSubscriber] = ProvideLastModifiedTimeSubscriber( desired_utime, result_queue ) @@ -502,7 +504,7 @@ def test_on_queued(self) -> None: class TestRequestParamsMapper: """Test RequestParamsMapper.""" - params: ClassVar[Dict[str, str]] = { + params: ClassVar[dict[str, str]] = { "sse": "AES256", "sse_kms_key_id": "my-kms-key", "sse_c": "AES256", @@ -513,7 +515,7 @@ class TestRequestParamsMapper: def test_map_copy_object_params(self) -> None: """Test map_copy_object_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_copy_object_params( params, {"metadata": "something", **self.params} ) @@ -530,7 +532,7 @@ def test_map_copy_object_params(self) -> None: def test_map_copy_object_params_metadata_directive(self) -> None: """Test map_copy_object_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_copy_object_params( params, {"metadata_directive": "something", **self.params} ) @@ -546,7 +548,7 @@ def test_map_copy_object_params_metadata_directive(self) -> None: def test_map_create_multipart_upload_params(self) -> None: """Test map_create_multipart_upload_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_create_multipart_upload_params(params, self.params) assert params == { "SSECustomerAlgorithm": "AES256", @@ -557,7 +559,7 @@ def test_map_create_multipart_upload_params(self) -> None: def test_map_delete_object_params(self) -> None: """Test map_delete_object_params.""" - params: Dict[str, Any] = {} + params: dict[str, Any] = {} assert not RequestParamsMapper.map_delete_object_params( params, {"request_payer": "requester", **self.params} ) @@ -565,7 +567,7 @@ def test_map_delete_object_params(self) -> None: def test_map_get_object_params(self) -> None: """Test map_get_object_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_get_object_params(params, self.params) assert params == { "SSECustomerAlgorithm": "AES256", @@ -574,7 +576,7 @@ def test_map_get_object_params(self) -> None: def test_map_head_object_params(self) -> None: """Test map_head_object_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_head_object_params(params, self.params) assert params == { "SSECustomerAlgorithm": "AES256", @@ -583,7 +585,7 @@ def test_map_head_object_params(self) -> None: def test_map_list_objects_v2_params(self) -> None: """Test map_list_objects_v2_params.""" - params: Dict[str, Any] = {} + params: dict[str, Any] = {} assert not RequestParamsMapper.map_list_objects_v2_params( params, {"request_payer": "requester", **self.params} ) @@ -591,7 +593,7 @@ def test_map_list_objects_v2_params(self) -> None: def test_map_put_object_params(self) -> None: """Test map_put_object_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_put_object_params( params, { @@ -619,25 +621,25 @@ def test_map_put_object_params(self) -> None: def test_map_put_object_params_raise_value_error_format(self) -> None: """Test map_put_object_params.""" - params: Dict[str, str] = {} - with pytest.raises(ValueError) as excinfo: + params: dict[str, str] = {} + with pytest.raises(ValueError, match="grants should be of the form permission=principal"): RequestParamsMapper.map_put_object_params( params, {"grants": ["invalid"], **self.params} ) - assert str(excinfo.value) == "grants should be of the form permission=principal" def test_map_put_object_params_raise_value_error_permission(self) -> None: """Test map_put_object_params.""" - params: Dict[str, str] = {} - with pytest.raises(ValueError) as excinfo: + params: dict[str, str] = {} + with pytest.raises( + ValueError, match="permission must be one of: read|readacl|writeacl|full" + ): RequestParamsMapper.map_put_object_params( params, {"grants": ["invalid=test-read"], **self.params} ) - assert str(excinfo.value) == "permission must be one of: read|readacl|writeacl|full" def test_map_upload_part_params(self) -> None: """Test map_upload_part_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_upload_part_params(params, self.params) assert params == { "SSECustomerAlgorithm": "AES256", @@ -646,7 +648,7 @@ def test_map_upload_part_params(self) -> None: def test_map_upload_part_copy_params(self) -> None: """Test map_upload_part_copy_params.""" - params: Dict[str, str] = {} + params: dict[str, str] = {} assert not RequestParamsMapper.map_upload_part_copy_params(params, self.params) assert params == { "CopySourceSSECustomerAlgorithm": "AES256", @@ -688,20 +690,18 @@ def test_write_no_stdout(self, mocker: MockerFixture) -> None: def test_block_s3_object_lambda_raise_colon() -> None: """Test block_s3_object_lambda.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="S3 action does not support S3 Object Lambda resources"): block_s3_object_lambda( - "arn:aws:s3-object-lambda:us-west-2:123456789012:" "accesspoint:my-accesspoint" + "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:my-accesspoint" ) - assert "does not support S3 Object Lambda resources" in str(excinfo.value) def test_block_s3_object_lambda_raise_slash() -> None: """Test block_s3_object_lambda.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="S3 action does not support S3 Object Lambda resources"): block_s3_object_lambda( - "arn:aws:s3-object-lambda:us-west-2:123456789012:" "accesspoint/my-accesspoint" + "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint/my-accesspoint" ) - assert "does not support S3 Object Lambda resources" in str(excinfo.value) def test_create_warning() -> None: @@ -870,11 +870,10 @@ def test_get_file_stat_handle_timestamp_error( def test_get_file_stat_raise_value_error(mocker: MockerFixture, tmp_path: Path) -> None: """Test get_file_stat.""" - mocker.patch.object(Path, "stat", PropertyMock(side_effect=IOError("msg"))) + mocker.patch.object(Path, "stat", PropertyMock(side_effect=OSError("msg"))) tmp_file = tmp_path / "test.txt" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="Could not retrieve file stat"): get_file_stat(tmp_file) - assert str(excinfo.value) == f"Could not retrieve file stat of {tmp_file}: msg" def test_guess_content_type(mocker: MockerFixture, tmp_path: Path) -> None: @@ -945,9 +944,8 @@ def test_human_readable_to_bytes(expected: int, value: str) -> None: def test_human_readable_to_bytes_raise_value_error() -> None: """Test human_readable_to_bytes.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="Invalid size value"): human_readable_to_bytes("test") - assert str(excinfo.value) == "Invalid size value: test" @pytest.mark.skipif(platform.system() == "Windows", reason="crashes xdist worker on Windows") @@ -957,7 +955,7 @@ def test_relative_path_handle_value_error(mocker: MockerFixture, tmp_path: Path) mocker.patch("os.path.split", side_effect=ValueError()) result = relative_path(tmp_file, tmp_path) assert isinstance(result, str) - assert os.path.isabs(result) + assert Path(result).is_absolute() @pytest.mark.parametrize( @@ -1014,7 +1012,7 @@ def test_set_file_utime_raise_os_error(mocker: MockerFixture, tmp_path: Path) -> mocker.patch("os.utime", side_effect=OSError(2, "")) now = datetime.datetime.now(tzlocal()) epoch_now = time.mktime(now.timetuple()) - with pytest.raises(OSError): + with pytest.raises(OSError): # noqa: PT011 set_file_utime(tmp_file, epoch_now) diff --git a/tests/unit/core/providers/aws/s3/test_bucket.py b/tests/unit/core/providers/aws/s3/test_bucket.py index c3b6d3264..8ddc70828 100644 --- a/tests/unit/core/providers/aws/s3/test_bucket.py +++ b/tests/unit/core/providers/aws/s3/test_bucket.py @@ -6,15 +6,14 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING +from unittest.mock import MagicMock import pytest -from mock import MagicMock from runway.core.providers.aws import BaseResponse from runway.core.providers.aws.s3 import Bucket if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from .....factories import MockRunwayContext @@ -87,7 +86,7 @@ def test_create(self, runway_context: MockRunwayContext) -> None: stubber.assert_no_pending_responses() def test_create_exists( - self, caplog: LogCaptureFixture, runway_context: MockRunwayContext + self, caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext ) -> None: """Test create with exists=True.""" caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket") @@ -106,7 +105,7 @@ def test_create_exists( assert "bucket already exists" in "\n".join(caplog.messages) def test_create_forbidden( - self, caplog: LogCaptureFixture, runway_context: MockRunwayContext + self, caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext ) -> None: """Test create with forbidden=True.""" caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket") @@ -178,7 +177,7 @@ def test_enable_versioning(self, runway_context: MockRunwayContext) -> None: stubber.assert_no_pending_responses() def test_enable_versioning_skipped( - self, caplog: LogCaptureFixture, runway_context: MockRunwayContext + self, caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext ) -> None: """Test enable_versioning with Status=Enabled.""" caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket") @@ -277,7 +276,7 @@ def test_head(self, runway_context: MockRunwayContext) -> None: stubber.assert_no_pending_responses() def test_head_clienterror( - self, caplog: LogCaptureFixture, runway_context: MockRunwayContext + self, caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext ) -> None: """Test head with ClientError.""" caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket") diff --git a/tests/unit/core/providers/aws/s3/test_sync_handler.py b/tests/unit/core/providers/aws/s3/test_sync_handler.py index 6a1a36a7b..d4188e069 100644 --- a/tests/unit/core/providers/aws/s3/test_sync_handler.py +++ b/tests/unit/core/providers/aws/s3/test_sync_handler.py @@ -3,8 +3,7 @@ from __future__ import annotations from typing import TYPE_CHECKING - -from mock import Mock +from unittest.mock import Mock from runway.core.providers.aws.s3._sync_handler import S3SyncHandler diff --git a/tests/unit/core/providers/aws/test_assume_role.py b/tests/unit/core/providers/aws/test_assume_role.py index 9c3645f93..75ab2e872 100644 --- a/tests/unit/core/providers/aws/test_assume_role.py +++ b/tests/unit/core/providers/aws/test_assume_role.py @@ -12,7 +12,6 @@ from runway.core.providers.aws import AssumeRole if TYPE_CHECKING: - from pytest import LogCaptureFixture from ....factories import MockRunwayContext @@ -102,7 +101,9 @@ def test_assume_role_no_revert_on_exit(runway_context: MockRunwayContext) -> Non assert runway_context.env.aws_credentials == NEW_CREDENTIALS -def test_assume_role_no_role(caplog: LogCaptureFixture, runway_context: MockRunwayContext) -> None: +def test_assume_role_no_role( + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext +) -> None: """Test AssumeRole with no role_arn.""" caplog.set_level(logging.DEBUG, logger="runway") with AssumeRole(runway_context) as result: @@ -124,6 +125,9 @@ def test_assume_role_raise_value_error(runway_context: MockRunwayContext) -> Non {"RoleArn": ROLE_ARN, "RoleSessionName": "runway", "DurationSeconds": 3600}, ) - with stubber, pytest.raises(ValueError, match="assume_role did not return Credentials"): - with AssumeRole(runway_context, role_arn=ROLE_ARN): - raise AssertionError + with ( + stubber, + pytest.raises(ValueError, match="assume_role did not return Credentials"), + AssumeRole(runway_context, role_arn=ROLE_ARN), + ): + raise AssertionError diff --git a/tests/unit/core/test_core.py b/tests/unit/core/test_core.py index ae142b6bc..8b53e49cd 100644 --- a/tests/unit/core/test_core.py +++ b/tests/unit/core/test_core.py @@ -4,15 +4,14 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any +from unittest.mock import MagicMock, call import pytest -from mock import MagicMock, call from runway.core import Runway if TYPE_CHECKING: - from pytest import LogCaptureFixture, MonkeyPatch from pytest_mock import MockerFixture from ..factories import MockRunwayConfig, MockRunwayContext @@ -38,8 +37,8 @@ def test___init___( def test___init___undetermined_version( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: @@ -51,7 +50,7 @@ def test___init___undetermined_version( def test___init___unsupported_version( self, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: @@ -220,8 +219,8 @@ def test_reverse_deployments(self) -> None: def test_test( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: @@ -281,14 +280,14 @@ def test_test( def test_test_keyerror( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: """Test test with handler not found.""" caplog.set_level(logging.ERROR, logger="runway") - test_handlers: Dict[str, Any] = {} + test_handlers: dict[str, Any] = {} monkeypatch.setattr(MODULE + "._TEST_HANDLERS", test_handlers) obj = Runway(runway_config, runway_context) # type: ignore @@ -308,7 +307,7 @@ def test_test_keyerror( def test_test_no_tests( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_config: MockRunwayConfig, runway_context: MockRunwayContext, ) -> None: diff --git a/tests/unit/dependency_managers/test__pip.py b/tests/unit/dependency_managers/test__pip.py index 5e70dab17..cd41e8ae6 100644 --- a/tests/unit/dependency_managers/test__pip.py +++ b/tests/unit/dependency_managers/test__pip.py @@ -5,16 +5,15 @@ import logging import subprocess from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Union +from typing import TYPE_CHECKING, Any, Union +from unittest.mock import Mock import pytest -from mock import Mock from runway.compat import shlex_join from runway.dependency_managers import Pip, PipInstallFailedError if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture MODULE = "runway.dependency_managers._pip" @@ -36,7 +35,7 @@ def test_config_files(self) -> None: ({"file_name": "foo.txt"}, True), ], ) - def test_dir_is_project(self, expected: bool, kwargs: Dict[str, str], tmp_path: Path) -> None: + def test_dir_is_project(self, expected: bool, kwargs: dict[str, str], tmp_path: Path) -> None: """Test dir_is_project.""" requirements_txt = tmp_path / kwargs.get("file_name", "requirements.txt") if expected: @@ -48,8 +47,8 @@ def test_dir_is_project(self, expected: bool, kwargs: Dict[str, str], tmp_path: @pytest.mark.parametrize("command", ["test", ["test"]]) def test_generate_command( self, - caplog: LogCaptureFixture, - command: Union[List[str], str], + caplog: pytest.LogCaptureFixture, + command: Union[list[str], str], mocker: MockerFixture, ) -> None: """Test generate_command.""" @@ -87,7 +86,7 @@ def test_generate_command( ], ) def test_generate_install_command( - self, call_args: Dict[str, Any], expected: Dict[str, Any], mocker: MockerFixture + self, call_args: dict[str, Any], expected: dict[str, Any], mocker: MockerFixture ) -> None: """Test generate_install_command.""" expected.setdefault("cache_dir", None) @@ -134,7 +133,7 @@ def test_install( target=target, ) mock_run_command.assert_called_once_with( - mock_generate_install_command.return_value + ["--foo", "bar"], + [*mock_generate_install_command.return_value, "--foo", "bar"], suppress_output=False, ) diff --git a/tests/unit/dependency_managers/test__pipenv.py b/tests/unit/dependency_managers/test__pipenv.py index 9bfca540f..8ac918da2 100644 --- a/tests/unit/dependency_managers/test__pipenv.py +++ b/tests/unit/dependency_managers/test__pipenv.py @@ -4,17 +4,16 @@ import logging import subprocess -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any +from unittest.mock import Mock import pytest -from mock import Mock from runway.dependency_managers import Pipenv, PipenvExportFailedError if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture MODULE = "runway.dependency_managers._pipenv" @@ -33,7 +32,7 @@ def test_config_files(self) -> None: ) def test_dir_is_project( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, lock_exists: bool, pipfile_exists: bool, tmp_path: Path, @@ -61,7 +60,7 @@ def test_dir_is_project( ) def test_export( self, - export_kwargs: Dict[str, Any], + export_kwargs: dict[str, Any], mocker: MockerFixture, tmp_path: Path, ) -> None: diff --git a/tests/unit/dependency_managers/test__poetry.py b/tests/unit/dependency_managers/test__poetry.py index 77283deda..dc88fb3e7 100644 --- a/tests/unit/dependency_managers/test__poetry.py +++ b/tests/unit/dependency_managers/test__poetry.py @@ -3,11 +3,11 @@ from __future__ import annotations import subprocess -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any +from unittest.mock import Mock import pytest import tomli_w -from mock import Mock from runway.dependency_managers import Poetry, PoetryExportFailedError @@ -42,7 +42,7 @@ def test_config_files(self) -> None: ], ) def test_dir_is_project( - self, build_system: Dict[str, Any], expected: bool, tmp_path: Path + self, build_system: dict[str, Any], expected: bool, tmp_path: Path ) -> None: """Test dir_is_project.""" pyproject_contents = {"build-system": build_system} @@ -69,7 +69,7 @@ def test_dir_is_project_file_not_found(self, tmp_path: Path) -> None: ) def test_export( self, - export_kwargs: Dict[str, Any], + export_kwargs: dict[str, Any], mocker: MockerFixture, tmp_path: Path, ) -> None: diff --git a/tests/unit/dependency_managers/test_base_classes.py b/tests/unit/dependency_managers/test_base_classes.py index 15b3b484d..3f5bdeb9f 100644 --- a/tests/unit/dependency_managers/test_base_classes.py +++ b/tests/unit/dependency_managers/test_base_classes.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from runway.dependency_managers.base_classes import DependencyManager diff --git a/tests/unit/env_mgr/test_env_mgr.py b/tests/unit/env_mgr/test_env_mgr.py index a5c1bf24b..a0886cda0 100644 --- a/tests/unit/env_mgr/test_env_mgr.py +++ b/tests/unit/env_mgr/test_env_mgr.py @@ -12,7 +12,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch from pytest_mock import MockerFixture @@ -20,7 +19,7 @@ class TestEnvManager: """Test runway.env_mgr.EnvManager.""" def test___init___darwin( - self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture + self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture # noqa: ARG002 ) -> None: """Test __init__ on Darwin platform.""" home = cd_tmp_path / "home" @@ -35,10 +34,10 @@ def test___init___darwin( def test___init___windows( self, - platform_windows: None, + platform_windows: None, # noqa: ARG002 cd_tmp_path: Path, mocker: MockerFixture, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test __init__ on Windows platform.""" home = cd_tmp_path / "home" @@ -55,7 +54,10 @@ def test___init___windows( assert obj.versions_dir == expected_env_dir / "versions" def test___init___windows_appdata( - self, platform_windows: None, cd_tmp_path: Path, monkeypatch: MonkeyPatch + self, + platform_windows: None, # noqa: ARG002 + cd_tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test __init__ on Windows platform.""" monkeypatch.setenv("APPDATA", str(cd_tmp_path / "custom_path")) @@ -69,7 +71,9 @@ def test___init___windows_appdata( assert obj.env_dir == expected_env_dir assert obj.versions_dir == expected_env_dir / "versions" - def test_bin(self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture) -> None: + def test_bin( + self, platform_darwin: None, cd_tmp_path: Path, mocker: MockerFixture # noqa: ARG002 + ) -> None: """Test bin.""" home = cd_tmp_path / "home" mocker.patch("runway.env_mgr.Path.home", return_value=home) @@ -97,7 +101,7 @@ def test_path(self, cd_tmp_path: Path) -> None: @pytest.mark.parametrize("exists", [False, True]) def test_uninstall( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, exists: bool, mocker: MockerFixture, tmp_path: Path, diff --git a/tests/unit/env_mgr/test_tfenv.py b/tests/unit/env_mgr/test_tfenv.py index 66503feb0..3b45d1af4 100644 --- a/tests/unit/env_mgr/test_tfenv.py +++ b/tests/unit/env_mgr/test_tfenv.py @@ -6,12 +6,12 @@ import json import re import subprocess -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Optional +from unittest.mock import MagicMock, call import hcl import hcl2 import pytest -from mock import MagicMock, call from runway._logging import LogLevels from runway.env_mgr.tfenv import ( @@ -28,7 +28,6 @@ from pathlib import Path from types import ModuleType - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from pytest_subprocess import FakeProcess @@ -64,7 +63,7 @@ def test_get_available_tf_versions(mocker: MockerFixture) -> None: """Test runway.env_mgr.tfenv.get_available_tf_versions.""" mock_requests = mocker.patch(f"{MODULE}.requests") - response: Dict[str, Any] = {"terraform": {"versions": {"0.12.0": {}, "0.12.0-beta": {}}}} + response: dict[str, Any] = {"terraform": {"versions": {"0.12.0": {}, "0.12.0-beta": {}}}} mock_requests.get.return_value = MagicMock(text=json.dumps(response)) assert get_available_tf_versions() == ["0.12.0"] assert get_available_tf_versions(include_prerelease=True) == [ @@ -92,7 +91,7 @@ def test_get_latest_tf_version(mocker: MockerFixture) -> None: ], ) def test_load_terraform_module( - parser: ModuleType, expected: Dict[str, Any], tmp_path: Path + parser: ModuleType, expected: dict[str, Any], tmp_path: Path ) -> None: """Test runway.env_mgr.tfenv.load_terraform_module.""" tf_file = tmp_path / "module.tf" @@ -151,8 +150,8 @@ class TestTFEnvManager: def test_backend( self, mocker: MockerFixture, - response: Dict[str, Any], - expected: Dict[str, Any], + response: dict[str, Any], + expected: dict[str, Any], tmp_path: Path, ) -> None: """Test backend.""" @@ -384,10 +383,10 @@ def test_set_version_same(self, mocker: MockerFixture, tmp_path: Path) -> None: ) def test_terraform_block( self, - caplog: LogCaptureFixture, - expected: Dict[str, Any], + caplog: pytest.LogCaptureFixture, + expected: dict[str, Any], mocker: MockerFixture, - response: List[Any], + response: list[Any], tmp_path: Path, ) -> None: """Test terraform_block.""" diff --git a/tests/unit/factories.py b/tests/unit/factories.py index eac85fc4f..ffa454c26 100644 --- a/tests/unit/factories.py +++ b/tests/unit/factories.py @@ -1,48 +1,53 @@ """Test classes.""" -# pyright: basic, reportIncompatibleMethodOverride=none +# pyright: reportIncompatibleMethodOverride=none from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List, MutableMapping, Optional, Tuple +from functools import cached_property +from typing import TYPE_CHECKING, Any, cast +from unittest.mock import MagicMock import boto3 import yaml from botocore.stub import Stubber -from mock import MagicMock from packaging.specifiers import SpecifierSet from runway.config.components.runway import RunwayDeploymentDefinition from runway.context import CfnginContext, RunwayContext -from runway.core.components import DeployEnvironment from runway.utils import MutableMap if TYPE_CHECKING: + from collections.abc import MutableMapping from pathlib import Path from boto3.resources.base import ServiceResource from botocore.client import BaseClient + from mypy_boto3_s3.client import S3Client from runway.config import CfnginConfig + from runway.core.components import DeployEnvironment from runway.core.type_defs import RunwayActionTypeDef class MockBoto3Session: - """Mock class that acts like a boto3.session. + """Mock class that acts like a :class:`boto3.session.Session`. - Must be preloaded with stubbers. + Clients must be registered using :meth:`~pytest_runway.MockBoto3Session.register_client` + before the can be created with the usual :meth:`~pytest_runway.MockBoto3Session.client` + call. This is to ensure that all AWS calls are stubbed. """ def __init__( self, *, - clients: Optional[MutableMap] = None, - aws_access_key_id: Optional[str] = None, - aws_secret_access_key: Optional[str] = None, - aws_session_token: Optional[str] = None, - profile_name: Optional[str] = None, - region_name: Optional[str] = None, - ): + clients: MutableMap | None = None, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile_name: str | None = None, + region_name: str | None = None, + ) -> None: """Instantiate class. Args: @@ -55,7 +60,6 @@ def __init__( """ self._clients = clients or MutableMap() - self._client_calls: Dict[str, Any] = {} self._session = MagicMock() self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key @@ -63,81 +67,95 @@ def __init__( self.profile_name = profile_name self.region_name = region_name - def assert_client_called_with(self, service_name: str, **kwargs: Any) -> None: - """Assert a client was created with the provided kwargs.""" - key = f"{service_name}.{kwargs.get('region_name', self.region_name)}" - assert self._client_calls[key] == kwargs - def client(self, service_name: str, **kwargs: Any) -> BaseClient: """Return a stubbed client. Args: service_name: The name of a service, e.g. 's3' or 'ec2'. + **kwargs: Arbitrary keyword arguments. Returns: Stubbed boto3 client. Raises: - KeyError: Client was not stubbed from Context before trying to use. + ValueError: Client was not stubbed from Context before trying to use. """ - key = f"{service_name}.{kwargs.get('region_name', self.region_name)}" - self._client_calls[key] = kwargs - return self._clients[key] + key = f"{service_name}.{kwargs.get('region_name') or self.region_name}" + try: + return self._clients[key] + except AttributeError: + raise ValueError(f"client not registered for {key}") from None def register_client( - self, service_name: str, region_name: Optional[str] = None - ) -> Tuple[Any, Stubber]: + self, service_name: str, *, region: str | None = None + ) -> tuple[Any, Stubber]: """Register a client for the boto3 session. Args: service_name: The name of a service, e.g. 's3' or 'ec2'. - region_name: AWS region. + region: AWS region. """ - key = f"{service_name}.{region_name or self.region_name}" - client = boto3.client( # type: ignore - service_name, # type: ignore - region_name=region_name or self.region_name, + key = f"{service_name}.{region or self.region_name}" + client = cast( + "BaseClient", + boto3.client( + service_name, # pyright: ignore[reportCallIssue, reportArgumentType] + region_name=region or self.region_name, + ), ) - stubber = Stubber(client) # type: ignore - self._clients[key] = client # type: ignore - return client, stubber # type: ignore + stubber = Stubber(client) + self._clients[key] = client + return client, stubber def resource(self, service_name: str, **kwargs: Any) -> ServiceResource: """Return a stubbed resource.""" kwargs.setdefault("region_name", self.region_name) - resource: ServiceResource = boto3.resource(service_name, **kwargs) # type: ignore - resource.meta.client = self._clients[f"{service_name}.{kwargs['region_name']}"] + resource = cast( + "ServiceResource", + boto3.resource( + service_name, # pyright: ignore[reportCallIssue, reportArgumentType] + **kwargs, + ), + ) + resource.meta.client = self.client(service_name, **kwargs) return resource - def service(self, service_name: str, region_name: Optional[str] = None) -> None: - """Not implimented.""" + def service(self, service_name: str, *, region_name: str | None = None) -> None: + """Not implemented.""" raise NotImplementedError -class MockCFNginContext(CfnginContext): - """Subclass CFNgin context object for tests.""" +class MockCfnginContext(CfnginContext): + """Subclass of :class:`~runway.context.CfnginContext` for tests.""" def __init__( self, *, - config_path: Optional[Path] = None, - config: Optional[CfnginConfig] = None, - deploy_environment: Optional[DeployEnvironment] = None, - parameters: Optional[MutableMapping[str, Any]] = None, - force_stacks: Optional[List[str]] = None, - region: Optional[str] = "us-east-1", - stack_names: Optional[List[str]] = None, - work_dir: Optional[Path] = None, + config: CfnginConfig | None = None, + config_path: Path | None = None, + deploy_environment: DeployEnvironment, + force_stacks: list[str] | None = None, + parameters: MutableMapping[str, Any] | None = None, + stack_names: list[str] | None = None, + work_dir: Path | None = None, **_: Any, ) -> None: - """Instantiate class.""" - self._boto3_test_client = MutableMap() - self._boto3_test_stubber = MutableMap() + """Instantiate class. - # used during init process - self.s3_stubber = self.add_stubber("s3", region=region) + Args: + config: The CFNgin configuration being operated on. + config_path: Path to the config file that was provided. + deploy_environment: The current deploy environment. + force_stacks: A list of stacks to force work on. Used to work on locked stacks. + parameters: Parameters passed from Runway or read from a file. + stack_names: A list of stack_names to operate on. If not passed, + all stacks defined in the config will be operated on. + work_dir: Working directory used by CFNgin. + + """ + self._boto3_sessions: dict[str, MockBoto3Session] = {} super().__init__( config_path=config_path, @@ -149,42 +167,113 @@ def __init__( work_dir=work_dir, ) - def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber: + @cached_property + def s3_client(self) -> S3Client: + """AWS S3 client. + + Adds an S3 stubber prior to returning from :attr:`~runway.context.CfnginContext.s3_client`. + + """ + self.add_stubber("s3", region=self.bucket_region) + return super().s3_client + + def add_stubber( + self, + service_name: str, + *, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, + ) -> Stubber: """Add a stubber to context. Args: - service_name: The name of a service, e.g. 's3' or 'ec2'. - region: AWS region. + service_name: The name of the service to stub. + aws_access_key_id: AWS Access Key ID. + aws_secret_access_key: AWS secret Access Key. + aws_session_token: AWS session token. + profile: The profile for the session. + region: The region for the session. """ - key = f"{service_name}.{region or self.env.aws_region}" - - self._boto3_test_client[key] = boto3.client( # type: ignore - service_name, # type: ignore - region_name=region or self.env.aws_region, + session = self._get_mocked_session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + profile=profile, + region=region or self.env.aws_region, ) - self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key]) - return self._boto3_test_stubber[key] + _client, stubber = session.register_client(service_name, region=region) + return stubber - def get_session( + def _get_mocked_session( self, *, - aws_access_key_id: Optional[str] = None, - aws_secret_access_key: Optional[str] = None, - aws_session_token: Optional[str] = None, - profile: Optional[str] = None, - region: Optional[str] = None, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, ) -> MockBoto3Session: - """Wrap get_session to enable stubbing.""" - return MockBoto3Session( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - aws_session_token=aws_session_token, - clients=self._boto3_test_client, - profile_name=profile, - region_name=region or self.env.aws_region, + """Get a mocked boto3 session.""" + region = region or self.env.aws_region + if region not in self._boto3_sessions: + self._boto3_sessions[region] = MockBoto3Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + profile_name=profile, + region_name=region or self.env.aws_region, + ) + return self._boto3_sessions[region] + + def get_session( + self, + *, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, + ) -> boto3.Session: + """Wrap get_session to enable stubbing. + + A stubber must exist before ``get_session`` is called or an error will be raised. + + Args: + aws_access_key_id: AWS Access Key ID. + aws_secret_access_key: AWS secret Access Key. + aws_session_token: AWS session token. + profile: The profile for the session. + region: The region for the session. + + """ + return cast( + boto3.Session, + self._get_mocked_session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + profile=profile, + region=region or self.env.aws_region, + ), ) + def get_stubbed_client(self, service_name: str, *, region: str | None = None) -> BaseClient: + """Get an existing stubbed client. + + This can be used after :meth:`~pytest_runway.MockCfnginContext.add_stubber` has + been called to get the stubber client. + + Args: + service_name: The name of the service that was stubbed. + region: The region of the session. + + """ + return self._get_mocked_session(region=region).client(service_name, region_name=region) + class MockRunwayConfig(MutableMap): """Mock Runway config object.""" @@ -211,84 +300,150 @@ def __call__(self, **kwargs: Any) -> MockRunwayConfig: class MockRunwayContext(RunwayContext): - """Subclass Runway context object for tests.""" + """Subclass of :class:`~runway.context.RunwayContext` for tests.""" - _use_concurrent: bool + _use_concurrent: bool = True def __init__( self, *, - command: Optional[RunwayActionTypeDef] = None, - deploy_environment: Any = None, - work_dir: Optional[Path] = None, + command: RunwayActionTypeDef | None = None, + deploy_environment: DeployEnvironment, + work_dir: Path | None = None, **_: Any, ) -> None: - """Instantiate class.""" - if not deploy_environment: - deploy_environment = DeployEnvironment(environ={}, explicit_name="test") + """Instantiate class. + + Args: + command: Runway command/action being run. + deploy_environment: The current deploy environment. + work_dir: Working directory used by Runway. + + """ + self._boto3_sessions: dict[str, MockBoto3Session] = {} + super().__init__(command=command, deploy_environment=deploy_environment, work_dir=work_dir) - self._boto3_test_client = MutableMap() - self._boto3_test_stubber = MutableMap() - self._use_concurrent = True - def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber: - """Add a stubber to context. + @property + def use_concurrent(self) -> bool: + """Override property of parent with something that can be set.""" + return self._use_concurrent + + @use_concurrent.setter + def use_concurrent( # pyright: ignore[reportIncompatibleVariableOverride] + self, value: bool + ) -> None: + """Override property of parent with something that can be set. Args: - service_name: The name of a service, e.g. 's3' or 'ec2'. - region: AWS region name. + value: New value for the attribute. """ - key = f"{service_name}.{region or self.env.aws_region}" - - self._boto3_test_client[key] = boto3.client( # type: ignore - service_name, # type: ignore - region_name=region or self.env.aws_region, - **self.boto3_credentials, - ) - self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key]) - return self._boto3_test_stubber[key] + self._use_concurrent = value - def get_session( + def add_stubber( self, + service_name: str, *, - aws_access_key_id: Optional[str] = None, - aws_secret_access_key: Optional[str] = None, - aws_session_token: Optional[str] = None, - profile: Optional[str] = None, - region: Optional[str] = None, - ) -> MockBoto3Session: - """Wrap get_session to enable stubbing.""" - return MockBoto3Session( + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, + ) -> Stubber: + """Add a stubber to context. + + Args: + service_name: The name of the service to stub. + aws_access_key_id: AWS Access Key ID. + aws_secret_access_key: AWS secret Access Key. + aws_session_token: AWS session token. + profile: The profile for the session. + region: The region for the session. + + """ + session = self._get_mocked_session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, - clients=self._boto3_test_client, - profile_name=profile, - region_name=region or self.env.aws_region, + profile=profile, + region=region or self.env.aws_region, ) + _client, stubber = session.register_client(service_name, region=region) + return stubber - @property - def use_concurrent(self) -> bool: - """Override property of parent with something that can be set.""" - return self._use_concurrent + def _get_mocked_session( + self, + *, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, + ) -> MockBoto3Session: + """Get a mocked boto3 session.""" + region = region or self.env.aws_region + if region not in self._boto3_sessions: + self._boto3_sessions[region] = MockBoto3Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + profile_name=profile, + region_name=region or self.env.aws_region, + ) + return self._boto3_sessions[region] - @use_concurrent.setter # type: ignore - def use_concurrent(self, value: bool) -> None: - """Override property of parent with something that can be set. + def get_session( + self, + *, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_session_token: str | None = None, + profile: str | None = None, + region: str | None = None, + ) -> boto3.Session: + """Wrap get_session to enable stubbing. + + A stubber must exist before ``get_session`` is called or an error will be raised. Args: - value: New value for the attribute. + aws_access_key_id: AWS Access Key ID. + aws_secret_access_key: AWS secret Access Key. + aws_session_token: AWS session token. + profile: The profile for the session. + region: The region for the session. """ - self._use_concurrent = value + return cast( + boto3.Session, + self._get_mocked_session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + profile=profile, + region=region or self.env.aws_region, + ), + ) + + def get_stubbed_client(self, service_name: str, *, region: str | None = None) -> BaseClient: + """Get an existing stubbed client. + + This can be used after :meth:`~pytest_runway.MockCfnginContext.add_stubber` has + been called to get the stubber client. + + Args: + service_name: The name of the service that was stubbed. + region: The region of the session. + + """ + return self._get_mocked_session(region=region).client(service_name, region_name=region) class YamlLoader: """Load YAML files from a directory.""" def __init__( - self, root: Path, load_class: Optional[type] = None, load_type: str = "default" + self, root: Path, load_class: type | None = None, load_type: str = "default" ) -> None: """Instantiate class. diff --git a/tests/unit/lookups/handlers/test_base.py b/tests/unit/lookups/handlers/test_base.py index de0e0eb7c..e4c348289 100644 --- a/tests/unit/lookups/handlers/test_base.py +++ b/tests/unit/lookups/handlers/test_base.py @@ -3,11 +3,11 @@ from __future__ import annotations import json -from typing import Dict, Optional +from typing import Optional +from unittest.mock import MagicMock import pytest import yaml -from mock import MagicMock from runway.lookups.handlers.base import LookupHandler from runway.utils import MutableMap @@ -117,8 +117,8 @@ def test_load_list(self) -> None: def test_parse( self, query: str, - raw_args: Optional[Dict[str, str]], - expected_args: Dict[str, str], + raw_args: Optional[dict[str, str]], + expected_args: dict[str, str], ) -> None: """Test parse.""" value = f"{query}::{raw_args}" @@ -131,7 +131,8 @@ def test_transform_bool_to_bool(self) -> None: result_true = LookupHandler.transform(True, to_type="bool") result_false = LookupHandler.transform(False, to_type="bool") - assert isinstance(result_true, bool) and isinstance(result_false, bool) + assert isinstance(result_true, bool) + assert isinstance(result_false, bool) assert result_true assert not result_false @@ -146,7 +147,8 @@ def test_transform_str_to_bool(self) -> None: result_true = LookupHandler.transform("true", to_type="bool") result_false = LookupHandler.transform("false", to_type="bool") - assert isinstance(result_true, bool) and isinstance(result_false, bool) + assert isinstance(result_true, bool) + assert isinstance(result_false, bool) assert result_true assert not result_false @@ -178,7 +180,7 @@ def test_transform_str_direct(self) -> None: def test_transform_str_list(self) -> None: """Test list type joined to create string.""" assert LookupHandler.transform(["val1", "val2"], to_type="str") == "val1,val2" - assert LookupHandler.transform({"val", "val"}, to_type="str") == "val" + assert LookupHandler.transform({"val"}, to_type="str") == "val" assert LookupHandler.transform(("val1", "val2"), to_type="str") == "val1,val2" def test_transform_str_list_delimiter(self) -> None: diff --git a/tests/unit/lookups/handlers/test_cfn.py b/tests/unit/lookups/handlers/test_cfn.py index e02cf6218..274b2a589 100644 --- a/tests/unit/lookups/handlers/test_cfn.py +++ b/tests/unit/lookups/handlers/test_cfn.py @@ -6,13 +6,13 @@ import json import logging from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Optional +from unittest.mock import MagicMock import boto3 import pytest from botocore.exceptions import ClientError from botocore.stub import Stubber -from mock import MagicMock from runway.cfngin.exceptions import StackDoesNotExist from runway.cfngin.providers.aws.default import Provider @@ -21,7 +21,6 @@ if TYPE_CHECKING: from mypy_boto3_cloudformation.client import CloudFormationClient - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from ...factories import MockRunwayContext @@ -29,10 +28,10 @@ def generate_describe_stacks_stack( stack_name: str, - outputs: Dict[str, str], + outputs: dict[str, str], creation_time: Optional[datetime] = None, stack_status: str = "CREATE_COMPLETE", -) -> Dict[str, Any]: +) -> dict[str, Any]: """Generate describe stacks stack. Args: @@ -59,7 +58,7 @@ def generate_describe_stacks_stack( } -def setup_cfn_client() -> Tuple[CloudFormationClient, Stubber]: +def setup_cfn_client() -> tuple[CloudFormationClient, Stubber]: """Create a CloudFormation client & Stubber.""" client = boto3.client("cloudformation") return client, Stubber(client) @@ -126,7 +125,7 @@ def test_handle(self, mocker: MockerFixture) -> None: ) def test_handle_exception( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, default: Optional[str], exception: Exception, mocker: MockerFixture, @@ -182,7 +181,7 @@ def test_handle_exception( ) def test_handle_provider_exception( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, default: Optional[str], exception: Exception, mocker: MockerFixture, @@ -227,11 +226,10 @@ def test_handle_provider_exception( def test_handle_valueerror(self, runway_context: MockRunwayContext) -> None: """Test handle raising ValueError.""" - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="query must be ."): assert CfnLookup.handle("something", runway_context) - assert str(excinfo.value) == 'query must be .; got "something"' - def test_get_stack_output(self, caplog: LogCaptureFixture) -> None: + def test_get_stack_output(self, caplog: pytest.LogCaptureFixture) -> None: """Test get_stack_output.""" caplog.set_level(logging.DEBUG, logger="runway.lookups.handlers.cfn") client, stubber = setup_cfn_client() @@ -252,7 +250,7 @@ def test_get_stack_output(self, caplog: LogCaptureFixture) -> None: assert f"describing stack: {stack_name}" in caplog.messages assert f"{stack_name} stack outputs: {json.dumps(outputs)}" in caplog.messages - def test_get_stack_output_clienterror(self, caplog: LogCaptureFixture) -> None: + def test_get_stack_output_clienterror(self, caplog: pytest.LogCaptureFixture) -> None: """Test get_stack_output raising ClientError.""" caplog.set_level(logging.DEBUG, logger="runway.lookups.handlers.cfn") client, stubber = setup_cfn_client() @@ -272,7 +270,7 @@ def test_get_stack_output_clienterror(self, caplog: LogCaptureFixture) -> None: stubber.assert_no_pending_responses() assert f"describing stack: {stack_name}" in caplog.messages - def test_get_stack_output_keyerror(self, caplog: LogCaptureFixture) -> None: + def test_get_stack_output_keyerror(self, caplog: pytest.LogCaptureFixture) -> None: """Test get_stack_output raising KeyError.""" caplog.set_level(logging.DEBUG, logger="runway.lookups.handlers.cfn") client, stubber = setup_cfn_client() @@ -302,8 +300,8 @@ def test_get_stack_output_keyerror(self, caplog: LogCaptureFixture) -> None: ) def test_should_use_provider_falsy( self, - args: Dict[str, Any], - caplog: LogCaptureFixture, + args: dict[str, Any], + caplog: pytest.LogCaptureFixture, provider: Optional[Provider], ) -> None: """Test should_use_provider with falsy cases.""" @@ -322,8 +320,8 @@ def test_should_use_provider_falsy( ) def test_should_use_provider_truthy( self, - args: Dict[str, Any], - caplog: LogCaptureFixture, + args: dict[str, Any], + caplog: pytest.LogCaptureFixture, provider: Optional[Provider], ) -> None: """Test should_use_provider with truthy cases.""" diff --git a/tests/unit/lookups/handlers/test_ecr.py b/tests/unit/lookups/handlers/test_ecr.py index 762d78e29..7cc395e79 100644 --- a/tests/unit/lookups/handlers/test_ecr.py +++ b/tests/unit/lookups/handlers/test_ecr.py @@ -11,15 +11,16 @@ from runway.lookups.handlers.ecr import EcrLookup if TYPE_CHECKING: - from mock import MagicMock + from unittest.mock import MagicMock + from pytest_mock import MockerFixture - from ...factories import MockCFNginContext, MockRunwayContext + from ...factories import MockCfnginContext, MockRunwayContext MODULE = "runway.lookups.handlers.ecr" -@pytest.fixture(scope="function") +@pytest.fixture() def mock_format_results(mocker: MockerFixture) -> MagicMock: """Mock EcrLookup.format_results.""" return mocker.patch.object( @@ -31,7 +32,7 @@ class TestEcrLookup: """Test runway.lookups.handlers.ecr.EcrLookup.""" def test_get_login_password( - self, cfngin_context: MockCFNginContext, runway_context: MockRunwayContext + self, cfngin_context: MockCfnginContext, runway_context: MockRunwayContext ) -> None: """Test get_login_password.""" cfngin_stubber = cfngin_context.add_stubber("ecr") @@ -103,8 +104,8 @@ def test_handle_login_password( def test_handle_value_error(self, runway_context: MockRunwayContext) -> None: """Test handle raise ValueError.""" runway_context.add_stubber("ecr") - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match="ecr lookup does not support") as excinfo: EcrLookup.handle("unsupported", runway_context) assert str(excinfo.value) == "ecr lookup does not support 'unsupported'" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="ecr lookup does not support"): EcrLookup.handle("unsupported::default=something", runway_context) diff --git a/tests/unit/lookups/handlers/test_env.py b/tests/unit/lookups/handlers/test_env.py index 4a85ced3a..f0f7d054c 100644 --- a/tests/unit/lookups/handlers/test_env.py +++ b/tests/unit/lookups/handlers/test_env.py @@ -27,5 +27,5 @@ def test_handle(self, runway_context: MockRunwayContext) -> None: def test_handle_not_found(self, runway_context: MockRunwayContext) -> None: """Validate exception when lookup cannot be resolved.""" runway_context.env.vars = ENV_VARS.copy() - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="does not exist in the environment"): EnvLookup.handle("NOT_VALID", context=runway_context) diff --git a/tests/unit/lookups/handlers/test_random_string.py b/tests/unit/lookups/handlers/test_random_string.py index bc0f1bd33..222e99503 100644 --- a/tests/unit/lookups/handlers/test_random_string.py +++ b/tests/unit/lookups/handlers/test_random_string.py @@ -4,9 +4,9 @@ import string from typing import TYPE_CHECKING +from unittest.mock import Mock import pytest -from mock import Mock from runway.lookups.handlers.random_string import ArgsDataModel, RandomStringLookup @@ -189,7 +189,7 @@ def test_handle_digit(self, mocker: MockerFixture) -> None: def test_handle_raise_value_error(self) -> None: """Test handle.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 RandomStringLookup.handle("test", Mock()) @pytest.mark.parametrize("value, expected", [(">!?test", False), ("t3st", True)]) diff --git a/tests/unit/lookups/handlers/test_ssm.py b/tests/unit/lookups/handlers/test_ssm.py index 7c55f586c..4ef5616cb 100644 --- a/tests/unit/lookups/handlers/test_ssm.py +++ b/tests/unit/lookups/handlers/test_ssm.py @@ -5,7 +5,7 @@ import json from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union import pytest import yaml @@ -14,7 +14,7 @@ from runway.variables import Variable if TYPE_CHECKING: - from ...factories import MockCFNginContext, MockRunwayContext + from ...factories import MockCfnginContext, MockRunwayContext def get_parameter_response( @@ -23,7 +23,7 @@ def get_parameter_response( value_type: str = "String", label: Optional[str] = None, version: int = 1, -) -> Dict[str, Any]: +) -> dict[str, Any]: """Generate a mock ssm.get_parameter response.""" selector = f"{name}/{label or version}" return { @@ -40,7 +40,7 @@ def get_parameter_response( } -def get_parameter_request(name: str, decrypt: bool = True) -> Dict[str, Union[bool, str]]: +def get_parameter_request(name: str, decrypt: bool = True) -> dict[str, Union[bool, str]]: """Generate the expected request parameters for ssm.get_parameter.""" return {"Name": name, "WithDecryption": decrypt} @@ -49,7 +49,7 @@ class TestSsmLookup: """Test runway.lookups.handlers.ssm.SsmLookup.""" def test_basic( - self, cfngin_context: MockCFNginContext, runway_context: MockRunwayContext + self, cfngin_context: MockCfnginContext, runway_context: MockRunwayContext ) -> None: """Test resolution of a basic lookup.""" name = "/test/param" diff --git a/tests/unit/lookups/handlers/test_var.py b/tests/unit/lookups/handlers/test_var.py index 1865bd41f..e15adc7b6 100644 --- a/tests/unit/lookups/handlers/test_var.py +++ b/tests/unit/lookups/handlers/test_var.py @@ -13,7 +13,7 @@ if TYPE_CHECKING: from ...factories import MockRunwayContext -VARIABLES = MutableMap(**{"str_val": "test", "false_val": False}) +VARIABLES = MutableMap(str_val="test", false_val=False) class TestVarLookup: @@ -29,5 +29,5 @@ def test_handle_false_result(self, runway_context: MockRunwayContext) -> None: def test_handle_not_found(self, runway_context: MockRunwayContext) -> None: """Validate exception when lookup cannot be resolved.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="does not exist in the variable definition"): VarLookup.handle("NOT_VALID", context=runway_context, variables=VARIABLES) diff --git a/tests/unit/lookups/test_registry.py b/tests/unit/lookups/test_registry.py index 234570426..9a53e7353 100644 --- a/tests/unit/lookups/test_registry.py +++ b/tests/unit/lookups/test_registry.py @@ -21,7 +21,7 @@ from ..factories import MockRunwayContext VALUES = {"str_val": "test"} -CONTEXT = MutableMap(**{"env_vars": VALUES}) +CONTEXT = MutableMap(env_vars=VALUES) VARIABLES = MutableMap(**VALUES) diff --git a/tests/unit/mock_docker/fake_api.py b/tests/unit/mock_docker/fake_api.py index e25f3dfd2..9b997ea0e 100644 --- a/tests/unit/mock_docker/fake_api.py +++ b/tests/unit/mock_docker/fake_api.py @@ -1,6 +1,8 @@ """Fake Docker API.""" -from typing import Any, Callable, Dict, Tuple, Union +# cspell:disable +# ruff: noqa: D103 +from typing import Any, Callable, Union from docker import constants @@ -26,7 +28,7 @@ # for clarity and readability -def get_fake_version() -> Tuple[int, Any]: +def get_fake_version() -> tuple[int, Any]: status_code = 200 response = { "ApiVersion": "1.35", @@ -61,7 +63,7 @@ def get_fake_version() -> Tuple[int, Any]: return status_code, response -def get_fake_info() -> Tuple[int, Any]: +def get_fake_info() -> tuple[int, Any]: status_code = 200 response = { "Containers": 1, @@ -74,23 +76,23 @@ def get_fake_info() -> Tuple[int, Any]: return status_code, response -def post_fake_auth() -> Tuple[int, Any]: +def post_fake_auth() -> tuple[int, Any]: status_code = 200 response = {"Status": "Login Succeeded", "IdentityToken": "9cbaf023786cd7"} return status_code, response -def get_fake_ping() -> Tuple[int, Any]: +def get_fake_ping() -> tuple[int, Any]: return 200, "OK" -def get_fake_search() -> Tuple[int, Any]: +def get_fake_search() -> tuple[int, Any]: status_code = 200 response = [{"Name": "busybox", "Description": "Fake Description"}] return status_code, response -def get_fake_images() -> Tuple[int, Any]: +def get_fake_images() -> tuple[int, Any]: status_code = 200 response = [ { @@ -103,7 +105,7 @@ def get_fake_images() -> Tuple[int, Any]: return status_code, response -def get_fake_image_history() -> Tuple[int, Any]: +def get_fake_image_history() -> tuple[int, Any]: status_code = 200 response = [ {"Id": "b750fe79269d", "Created": 1364102658, "CreatedBy": "/bin/bash"}, @@ -113,14 +115,14 @@ def get_fake_image_history() -> Tuple[int, Any]: return status_code, response -def post_fake_import_image() -> Tuple[int, Any]: +def post_fake_import_image() -> tuple[int, Any]: status_code = 200 response = "Import messages..." return status_code, response -def get_fake_containers() -> Tuple[int, Any]: +def get_fake_containers() -> tuple[int, Any]: status_code = 200 response = [ { @@ -134,27 +136,27 @@ def get_fake_containers() -> Tuple[int, Any]: return status_code, response -def post_fake_start_container() -> Tuple[int, Any]: +def post_fake_start_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_resize_container() -> Tuple[int, Any]: +def post_fake_resize_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_create_container() -> Tuple[int, Any]: +def post_fake_create_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def get_fake_inspect_container(tty: bool = False) -> Tuple[int, Any]: +def get_fake_inspect_container(tty: bool = False) -> tuple[int, Any]: status_code = 200 - response: Dict[str, Any] = { + response: dict[str, Any] = { "Id": FAKE_CONTAINER_ID, "Config": {"Labels": {"foo": "bar"}, "Privileged": True, "Tty": tty}, "ID": FAKE_CONTAINER_ID, @@ -174,7 +176,7 @@ def get_fake_inspect_container(tty: bool = False) -> Tuple[int, Any]: return status_code, response -def get_fake_inspect_image() -> Tuple[int, Any]: +def get_fake_inspect_image() -> tuple[int, Any]: status_code = 200 response = { "Id": FAKE_IMAGE_ID, @@ -207,19 +209,19 @@ def get_fake_inspect_image() -> Tuple[int, Any]: return status_code, response -def get_fake_insert_image() -> Tuple[int, Any]: +def get_fake_insert_image() -> tuple[int, Any]: status_code = 200 response = {"StatusCode": 0} return status_code, response -def get_fake_wait() -> Tuple[int, Any]: +def get_fake_wait() -> tuple[int, Any]: status_code = 200 response = {"StatusCode": 0} return status_code, response -def get_fake_logs() -> Tuple[int, Any]: +def get_fake_logs() -> tuple[int, Any]: status_code = 200 response = ( b"\x01\x00\x00\x00\x00\x00\x00\x00" @@ -230,13 +232,13 @@ def get_fake_logs() -> Tuple[int, Any]: return status_code, response -def get_fake_diff() -> Tuple[int, Any]: +def get_fake_diff() -> tuple[int, Any]: status_code = 200 response = [{"Path": "/test", "Kind": 1}] return status_code, response -def get_fake_events() -> Tuple[int, Any]: +def get_fake_events() -> tuple[int, Any]: status_code = 200 response = [ { @@ -249,19 +251,19 @@ def get_fake_events() -> Tuple[int, Any]: return status_code, response -def get_fake_export() -> Tuple[int, Any]: +def get_fake_export() -> tuple[int, Any]: status_code = 200 response = "Byte Stream...." return status_code, response -def post_fake_exec_create() -> Tuple[int, Any]: +def post_fake_exec_create() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_EXEC_ID} return status_code, response -def post_fake_exec_start() -> Tuple[int, Any]: +def post_fake_exec_start() -> tuple[int, Any]: status_code = 200 response = ( b"\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n" @@ -271,12 +273,12 @@ def post_fake_exec_start() -> Tuple[int, Any]: return status_code, response -def post_fake_exec_resize() -> Tuple[int, Any]: +def post_fake_exec_resize() -> tuple[int, Any]: status_code = 201 return status_code, "" -def get_fake_exec_inspect() -> Tuple[int, Any]: +def get_fake_exec_inspect() -> tuple[int, Any]: return ( 200, { @@ -298,102 +300,102 @@ def get_fake_exec_inspect() -> Tuple[int, Any]: ) -def post_fake_stop_container() -> Tuple[int, Any]: +def post_fake_stop_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_kill_container() -> Tuple[int, Any]: +def post_fake_kill_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_pause_container() -> Tuple[int, Any]: +def post_fake_pause_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_unpause_container() -> Tuple[int, Any]: +def post_fake_unpause_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_restart_container() -> Tuple[int, Any]: +def post_fake_restart_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_rename_container() -> Tuple[int, Any]: +def post_fake_rename_container() -> tuple[int, Any]: status_code = 204 return status_code, None -def delete_fake_remove_container() -> Tuple[int, Any]: +def delete_fake_remove_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_image_create() -> Tuple[int, Any]: +def post_fake_image_create() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_IMAGE_ID} return status_code, response -def delete_fake_remove_image() -> Tuple[int, Any]: +def delete_fake_remove_image() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_IMAGE_ID} return status_code, response -def get_fake_get_image() -> Tuple[int, Any]: +def get_fake_get_image() -> tuple[int, Any]: status_code = 200 response = "Byte Stream...." return status_code, response -def post_fake_load_image() -> Tuple[int, Any]: +def post_fake_load_image() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_IMAGE_ID} return status_code, response -def post_fake_commit() -> Tuple[int, Any]: +def post_fake_commit() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_push() -> Tuple[int, Any]: +def post_fake_push() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_IMAGE_ID} return status_code, response -def post_fake_build_container() -> Tuple[int, Any]: +def post_fake_build_container() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_CONTAINER_ID} return status_code, response -def post_fake_tag_image() -> Tuple[int, Any]: +def post_fake_tag_image() -> tuple[int, Any]: status_code = 200 response = {"Id": FAKE_IMAGE_ID} return status_code, response -def get_fake_stats() -> Tuple[int, Any]: +def get_fake_stats() -> tuple[int, Any]: status_code = 200 response = fake_stat.OBJ return status_code, response -def get_fake_top() -> Tuple[int, Any]: +def get_fake_top() -> tuple[int, Any]: return ( 200, { @@ -414,7 +416,7 @@ def get_fake_top() -> Tuple[int, Any]: ) -def get_fake_volume_list() -> Tuple[int, Any]: +def get_fake_volume_list() -> tuple[int, Any]: status_code = 200 response = { "Volumes": [ @@ -435,7 +437,7 @@ def get_fake_volume_list() -> Tuple[int, Any]: return status_code, response -def get_fake_volume() -> Tuple[int, Any]: +def get_fake_volume() -> tuple[int, Any]: status_code = 200 response = { "Name": "perfectcherryblossom", @@ -447,23 +449,23 @@ def get_fake_volume() -> Tuple[int, Any]: return status_code, response -def fake_remove_volume() -> Tuple[int, Any]: +def fake_remove_volume() -> tuple[int, Any]: return 204, None -def post_fake_update_container() -> Tuple[int, Any]: +def post_fake_update_container() -> tuple[int, Any]: return 200, {"Warnings": []} -def post_fake_update_node() -> Tuple[int, Any]: +def post_fake_update_node() -> tuple[int, Any]: return 200, None -def post_fake_join_swarm() -> Tuple[int, Any]: +def post_fake_join_swarm() -> tuple[int, Any]: return 200, None -def get_fake_network_list() -> Tuple[int, Any]: +def get_fake_network_list() -> tuple[int, Any]: return ( 200, [ @@ -487,7 +489,7 @@ def get_fake_network_list() -> Tuple[int, Any]: "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", # noqa: S104 "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500", }, @@ -496,23 +498,23 @@ def get_fake_network_list() -> Tuple[int, Any]: ) -def get_fake_network() -> Tuple[int, Any]: +def get_fake_network() -> tuple[int, Any]: return 200, get_fake_network_list()[1][0] -def post_fake_network() -> Tuple[int, Any]: +def post_fake_network() -> tuple[int, Any]: return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []} -def delete_fake_network() -> Tuple[int, Any]: +def delete_fake_network() -> tuple[int, Any]: return 204, None -def post_fake_network_connect() -> Tuple[int, Any]: +def post_fake_network_connect() -> tuple[int, Any]: return 200, None -def post_fake_network_disconnect() -> Tuple[int, Any]: +def post_fake_network_disconnect() -> tuple[int, Any]: return 200, None @@ -521,107 +523,86 @@ def post_fake_network_disconnect() -> Tuple[int, Any]: if constants.IS_WINDOWS_PLATFORM: prefix = "http+docker://localnpipe" -fake_responses: Dict[Union[str, Tuple[str, str]], Callable[..., Tuple[int, Any]]] = { - "{0}/version".format(prefix): get_fake_version, - "{1}/{0}/version".format(CURRENT_VERSION, prefix): get_fake_version, - "{1}/{0}/info".format(CURRENT_VERSION, prefix): get_fake_info, - "{1}/{0}/auth".format(CURRENT_VERSION, prefix): post_fake_auth, - "{1}/{0}/_ping".format(CURRENT_VERSION, prefix): get_fake_ping, - "{1}/{0}/images/search".format(CURRENT_VERSION, prefix): get_fake_search, - "{1}/{0}/images/json".format(CURRENT_VERSION, prefix): get_fake_images, - "{1}/{0}/images/test_image/history".format(CURRENT_VERSION, prefix): get_fake_image_history, - "{1}/{0}/images/create".format(CURRENT_VERSION, prefix): post_fake_import_image, - "{1}/{0}/containers/json".format(CURRENT_VERSION, prefix): get_fake_containers, - "{1}/{0}/containers/3cc2351ab11b/start".format( - CURRENT_VERSION, prefix - ): post_fake_start_container, - "{1}/{0}/containers/3cc2351ab11b/resize".format( - CURRENT_VERSION, prefix - ): post_fake_resize_container, - "{1}/{0}/containers/3cc2351ab11b/json".format( - CURRENT_VERSION, prefix - ): get_fake_inspect_container, - "{1}/{0}/containers/3cc2351ab11b/rename".format( - CURRENT_VERSION, prefix - ): post_fake_rename_container, - "{1}/{0}/images/e9aa60c60128/tag".format(CURRENT_VERSION, prefix): post_fake_tag_image, - "{1}/{0}/containers/3cc2351ab11b/wait".format(CURRENT_VERSION, prefix): get_fake_wait, - "{1}/{0}/containers/3cc2351ab11b/logs".format(CURRENT_VERSION, prefix): get_fake_logs, - "{1}/{0}/containers/3cc2351ab11b/changes".format(CURRENT_VERSION, prefix): get_fake_diff, - "{1}/{0}/containers/3cc2351ab11b/export".format(CURRENT_VERSION, prefix): get_fake_export, - "{1}/{0}/containers/3cc2351ab11b/update".format( - CURRENT_VERSION, prefix - ): post_fake_update_container, - "{1}/{0}/containers/3cc2351ab11b/exec".format(CURRENT_VERSION, prefix): post_fake_exec_create, - "{1}/{0}/exec/d5d177f121dc/start".format(CURRENT_VERSION, prefix): post_fake_exec_start, - "{1}/{0}/exec/d5d177f121dc/json".format(CURRENT_VERSION, prefix): get_fake_exec_inspect, - "{1}/{0}/exec/d5d177f121dc/resize".format(CURRENT_VERSION, prefix): post_fake_exec_resize, - "{1}/{0}/containers/3cc2351ab11b/stats".format(CURRENT_VERSION, prefix): get_fake_stats, - "{1}/{0}/containers/3cc2351ab11b/top".format(CURRENT_VERSION, prefix): get_fake_top, - "{1}/{0}/containers/3cc2351ab11b/stop".format( - CURRENT_VERSION, prefix - ): post_fake_stop_container, - "{1}/{0}/containers/3cc2351ab11b/kill".format( - CURRENT_VERSION, prefix - ): post_fake_kill_container, - "{1}/{0}/containers/3cc2351ab11b/pause".format( - CURRENT_VERSION, prefix - ): post_fake_pause_container, - "{1}/{0}/containers/3cc2351ab11b/unpause".format( - CURRENT_VERSION, prefix - ): post_fake_unpause_container, - "{1}/{0}/containers/3cc2351ab11b/restart".format( - CURRENT_VERSION, prefix - ): post_fake_restart_container, - "{1}/{0}/containers/3cc2351ab11b".format(CURRENT_VERSION, prefix): delete_fake_remove_container, - "{1}/{0}/images/create".format(CURRENT_VERSION, prefix): post_fake_image_create, - "{1}/{0}/images/e9aa60c60128".format(CURRENT_VERSION, prefix): delete_fake_remove_image, - "{1}/{0}/images/e9aa60c60128/get".format(CURRENT_VERSION, prefix): get_fake_get_image, - "{1}/{0}/images/load".format(CURRENT_VERSION, prefix): post_fake_load_image, - "{1}/{0}/images/test_image/json".format(CURRENT_VERSION, prefix): get_fake_inspect_image, - "{1}/{0}/images/test_image/insert".format(CURRENT_VERSION, prefix): get_fake_insert_image, - "{1}/{0}/images/test_image/push".format(CURRENT_VERSION, prefix): post_fake_push, - "{1}/{0}/commit".format(CURRENT_VERSION, prefix): post_fake_commit, - "{1}/{0}/containers/create".format(CURRENT_VERSION, prefix): post_fake_create_container, - "{1}/{0}/build".format(CURRENT_VERSION, prefix): post_fake_build_container, - "{1}/{0}/events".format(CURRENT_VERSION, prefix): get_fake_events, - ("{1}/{0}/volumes".format(CURRENT_VERSION, prefix), "GET"): get_fake_volume_list, - ("{1}/{0}/volumes/create".format(CURRENT_VERSION, prefix), "POST"): get_fake_volume, +fake_responses: dict[Union[str, tuple[str, str]], Callable[..., tuple[int, Any]]] = { + f"{prefix}/version": get_fake_version, + f"{prefix}/{CURRENT_VERSION}/version": get_fake_version, + f"{prefix}/{CURRENT_VERSION}/info": get_fake_info, + f"{prefix}/{CURRENT_VERSION}/auth": post_fake_auth, + f"{prefix}/{CURRENT_VERSION}/_ping": get_fake_ping, + f"{prefix}/{CURRENT_VERSION}/images/search": get_fake_search, + f"{prefix}/{CURRENT_VERSION}/images/json": get_fake_images, + f"{prefix}/{CURRENT_VERSION}/images/test_image/history": get_fake_image_history, + f"{prefix}/{CURRENT_VERSION}/containers/json": get_fake_containers, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start": post_fake_start_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize": post_fake_resize_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json": get_fake_inspect_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename": post_fake_rename_container, + f"{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag": post_fake_tag_image, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait": get_fake_wait, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs": get_fake_logs, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes": get_fake_diff, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export": get_fake_export, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update": post_fake_update_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec": post_fake_exec_create, + f"{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start": post_fake_exec_start, + f"{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json": get_fake_exec_inspect, + f"{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize": post_fake_exec_resize, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats": get_fake_stats, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top": get_fake_top, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop": post_fake_stop_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill": post_fake_kill_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause": post_fake_pause_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause": post_fake_unpause_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart": post_fake_restart_container, + f"{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b": delete_fake_remove_container, + f"{prefix}/{CURRENT_VERSION}/images/create": post_fake_image_create, + f"{prefix}/{CURRENT_VERSION}/images/e9aa60c60128": delete_fake_remove_image, + f"{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get": get_fake_get_image, + f"{prefix}/{CURRENT_VERSION}/images/load": post_fake_load_image, + f"{prefix}/{CURRENT_VERSION}/images/test_image/json": get_fake_inspect_image, + f"{prefix}/{CURRENT_VERSION}/images/test_image/insert": get_fake_insert_image, + f"{prefix}/{CURRENT_VERSION}/images/test_image/push": post_fake_push, + f"{prefix}/{CURRENT_VERSION}/commit": post_fake_commit, + f"{prefix}/{CURRENT_VERSION}/containers/create": post_fake_create_container, + f"{prefix}/{CURRENT_VERSION}/build": post_fake_build_container, + f"{prefix}/{CURRENT_VERSION}/events": get_fake_events, + (f"{prefix}/{CURRENT_VERSION}/volumes", "GET"): get_fake_volume_list, + (f"{prefix}/{CURRENT_VERSION}/volumes/create", "POST"): get_fake_volume, ( - "{1}/{0}/volumes/{2}".format(CURRENT_VERSION, prefix, FAKE_VOLUME_NAME), + f"{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}", "GET", ): get_fake_volume, ( - "{1}/{0}/volumes/{2}".format(CURRENT_VERSION, prefix, FAKE_VOLUME_NAME), + f"{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}", "DELETE", ): fake_remove_volume, ( - "{1}/{0}/nodes/{2}/update?version=1".format(CURRENT_VERSION, prefix, FAKE_NODE_ID), + f"{prefix}/{CURRENT_VERSION}/nodes/{FAKE_NODE_ID}/update?version=1", "POST", ): post_fake_update_node, ( - "{1}/{0}/swarm/join".format(CURRENT_VERSION, prefix), + f"{prefix}/{CURRENT_VERSION}/swarm/join", "POST", ): post_fake_join_swarm, - ("{1}/{0}/networks".format(CURRENT_VERSION, prefix), "GET"): get_fake_network_list, + (f"{prefix}/{CURRENT_VERSION}/networks", "GET"): get_fake_network_list, ( - "{1}/{0}/networks/create".format(CURRENT_VERSION, prefix), + f"{prefix}/{CURRENT_VERSION}/networks/create", "POST", ): post_fake_network, ( - "{1}/{0}/networks/{2}".format(CURRENT_VERSION, prefix, FAKE_NETWORK_ID), + f"{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}", "GET", ): get_fake_network, ( - "{1}/{0}/networks/{2}".format(CURRENT_VERSION, prefix, FAKE_NETWORK_ID), + f"{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}", "DELETE", ): delete_fake_network, ( - "{1}/{0}/networks/{2}/connect".format(CURRENT_VERSION, prefix, FAKE_NETWORK_ID), + f"{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/connect", "POST", ): post_fake_network_connect, ( - "{1}/{0}/networks/{2}/disconnect".format(CURRENT_VERSION, prefix, FAKE_NETWORK_ID), + f"{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/disconnect", "POST", ): post_fake_network_disconnect, } diff --git a/tests/unit/mock_docker/fake_api_client.py b/tests/unit/mock_docker/fake_api_client.py index 7c35b2ba8..1d106f53e 100644 --- a/tests/unit/mock_docker/fake_api_client.py +++ b/tests/unit/mock_docker/fake_api_client.py @@ -1,10 +1,10 @@ """Fake Docker API client.""" import copy -from typing import Any, Dict, Optional +from typing import Any, Optional +from unittest import mock import docker -import mock from docker.constants import DEFAULT_DOCKER_API_VERSION from . import fake_api @@ -20,7 +20,7 @@ def _mock_call(self, *args: Any, **kwargs: Any) -> Any: return ret # type: ignore -def make_fake_api_client(overrides: Optional[Dict[str, Any]] = None) -> CopyReturnMagicMock: +def make_fake_api_client(overrides: Optional[dict[str, Any]] = None) -> CopyReturnMagicMock: """Return non-complete fake APIClient. This returns most of the default cases correctly, but most arguments that @@ -56,7 +56,7 @@ def make_fake_api_client(overrides: Optional[Dict[str, Any]] = None) -> CopyRetu return mock_client -def make_fake_client(overrides: Optional[Dict[str, Any]] = None) -> docker.DockerClient: +def make_fake_client(overrides: Optional[dict[str, Any]] = None) -> docker.DockerClient: """Return a Client with a fake APIClient.""" client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION) client.api = make_fake_api_client(overrides) diff --git a/tests/unit/mock_docker/fake_stat.py b/tests/unit/mock_docker/fake_stat.py index 98814c704..12c5d1a37 100644 --- a/tests/unit/mock_docker/fake_stat.py +++ b/tests/unit/mock_docker/fake_stat.py @@ -1,9 +1,9 @@ """Stats for fake Docker API.""" # cspell:disable -from typing import Any, Dict +from typing import Any -OBJ: Dict[str, Any] = { +OBJ: dict[str, Any] = { "read": "2015-02-11T19:20:46.667237763+02:00", "network": { "rx_bytes": 567224, diff --git a/tests/unit/module/conftest.py b/tests/unit/module/conftest.py index f1d4a6bea..406de82e4 100644 --- a/tests/unit/module/conftest.py +++ b/tests/unit/module/conftest.py @@ -10,7 +10,7 @@ from pytest_mock import MockerFixture -@pytest.fixture +@pytest.fixture() def patch_module_npm(mocker: MockerFixture) -> None: """Patch methods and functions used during init of RunwayModuleNpm.""" mocker.patch("runway.module.base.RunwayModuleNpm.check_for_npm") diff --git a/tests/unit/module/staticsite/conftest.py b/tests/unit/module/staticsite/conftest.py index 56b119420..7c4bb8941 100644 --- a/tests/unit/module/staticsite/conftest.py +++ b/tests/unit/module/staticsite/conftest.py @@ -7,13 +7,13 @@ import pytest -@pytest.fixture(scope="function") +@pytest.fixture() def expected_yaml(local_fixtures: Path) -> Path: """Path to local fixtures expected yaml.""" return local_fixtures / "expected_yaml" -@pytest.fixture(scope="function") +@pytest.fixture() def local_fixtures() -> Path: """Local fixtures directory.""" return Path(__file__).parent / "fixtures" diff --git a/tests/unit/module/staticsite/options/test_models.py b/tests/unit/module/staticsite/options/test_models.py index 523915e85..62372f679 100644 --- a/tests/unit/module/staticsite/options/test_models.py +++ b/tests/unit/module/staticsite/options/test_models.py @@ -4,7 +4,7 @@ from __future__ import annotations from pathlib import Path -from typing import Any, Dict, Optional, cast +from typing import Any, Optional, cast import pytest from pydantic import ValidationError @@ -81,13 +81,13 @@ def test_init_file(self, tmp_path: Path) -> None: @pytest.mark.parametrize( "data", [ - cast(Dict[str, str], {}), + cast(dict[str, str], {}), {"name": "test"}, {"content": "test"}, {"file": "test"}, ], ) - def test_init_required(self, data: Dict[str, Any]) -> None: + def test_init_required(self, data: dict[str, Any]) -> None: """Test init required fields.""" with pytest.raises(ValidationError): RunwayStaticSiteExtraFileDataModel(**data) @@ -100,9 +100,12 @@ def test_init_default(self) -> None: """Test init default.""" obj = RunwayStaticSiteModuleOptionsDataModel() assert obj.build_output == "./" - assert not obj.build_steps and isinstance(obj.build_steps, list) - assert not obj.extra_files and isinstance(obj.extra_files, list) - assert not obj.pre_build_steps and isinstance(obj.pre_build_steps, list) + assert not obj.build_steps + assert isinstance(obj.build_steps, list) + assert not obj.extra_files + assert isinstance(obj.extra_files, list) + assert not obj.pre_build_steps + assert isinstance(obj.pre_build_steps, list) assert obj.source_hashing == RunwayStaticSiteSourceHashingDataModel() def test_init_extra(self) -> None: @@ -201,7 +204,8 @@ class TestRunwayStaticSiteSourceHashingDirectoryDataModel: def test_init_default(self, tmp_path: Path) -> None: """Test init default.""" obj = RunwayStaticSiteSourceHashingDirectoryDataModel(path=tmp_path) - assert not obj.exclusions and isinstance(obj.exclusions, list) + assert not obj.exclusions + assert isinstance(obj.exclusions, list) assert obj.path == tmp_path def test_init_extra(self, tmp_path: Path) -> None: diff --git a/tests/unit/module/staticsite/parameters/test_models.py b/tests/unit/module/staticsite/parameters/test_models.py index 100768b41..712033827 100644 --- a/tests/unit/module/staticsite/parameters/test_models.py +++ b/tests/unit/module/staticsite/parameters/test_models.py @@ -1,7 +1,7 @@ """Test runway.module.staticsite.parameters.models.""" # pyright: basic -from typing import Any, Dict, cast +from typing import Any, cast import pytest from pydantic import ValidationError @@ -57,12 +57,12 @@ def test_init_extra(self) -> None: @pytest.mark.parametrize( "data", [ - cast(Dict[str, Any], {}), + cast(dict[str, Any], {}), {"arn": "aws:arn:lambda:us-east-1:function:test"}, {"type": "origin-request"}, ], ) - def test_init_required(self, data: Dict[str, Any]) -> None: + def test_init_required(self, data: dict[str, Any]) -> None: """Test init required.""" with pytest.raises(ValidationError): RunwayStaticSiteLambdaFunctionAssociationDataModel.parse_obj(data) @@ -115,7 +115,7 @@ def test_init_default(self) -> None: "font-src 'self' 'unsafe-inline' 'unsafe-eval' data: https:; " "object-src 'none'; " "connect-src 'self' https://*.amazonaws.com https://*.amazoncognito.com", - "Strict-Transport-Security": "max-age=31536000; " "includeSubdomains; " "preload", + "Strict-Transport-Security": "max-age=31536000; includeSubdomains; preload", "Referrer-Policy": "same-origin", "X-XSS-Protection": "1; mode=block", "X-Frame-Options": "DENY", diff --git a/tests/unit/module/staticsite/test_handler.py b/tests/unit/module/staticsite/test_handler.py index f4b337cb3..520e4de8d 100644 --- a/tests/unit/module/staticsite/test_handler.py +++ b/tests/unit/module/staticsite/test_handler.py @@ -5,10 +5,10 @@ import logging import platform import string -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any +from unittest.mock import Mock import pytest -from mock import Mock from runway.module.staticsite.handler import StaticSite from runway.module.staticsite.options.components import StaticSiteOptions @@ -19,7 +19,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from runway.context import RunwayContext @@ -97,7 +96,7 @@ def test_create_cleanup_yaml( def test_create_dependencies_yaml( self, expected_yaml: Path, - parameters: Dict[str, Any], + parameters: dict[str, Any], runway_context: RunwayContext, test_file_number: str, tmp_path: Path, @@ -135,7 +134,7 @@ def test_create_dependencies_yaml( def test_create_staticsite_yaml( self, expected_yaml: Path, - parameters: Dict[str, Any], + parameters: dict[str, Any], runway_context: RunwayContext, test_file_number: str, tmp_path: Path, @@ -256,7 +255,7 @@ def test_get_client_updater_variables( assert result["supported_identity_providers"] == obj.parameters.supported_identity_providers def test_init( - self, caplog: LogCaptureFixture, runway_context: RunwayContext, tmp_path: Path + self, caplog: pytest.LogCaptureFixture, runway_context: RunwayContext, tmp_path: Path ) -> None: """Test init.""" caplog.set_level(logging.WARNING, logger=MODULE) diff --git a/tests/unit/module/test_base.py b/tests/unit/module/test_base.py index eb41f3821..39d323b46 100644 --- a/tests/unit/module/test_base.py +++ b/tests/unit/module/test_base.py @@ -4,7 +4,7 @@ import logging from contextlib import contextmanager -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, cast +from typing import TYPE_CHECKING, Any, cast import pytest @@ -12,9 +12,9 @@ from runway.module.base import NPM_BIN, ModuleOptions, RunwayModule, RunwayModuleNpm if TYPE_CHECKING: + from collections.abc import Iterator from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from pytest_subprocess import FakeProcess @@ -44,7 +44,9 @@ def test_get(self) -> None: class TestRunwayModuleNpm: """Test runway.module.base.RunwayModuleNpm.""" - def test_check_for_npm_missing(self, caplog: LogCaptureFixture, mocker: MockerFixture) -> None: + def test_check_for_npm_missing( + self, caplog: pytest.LogCaptureFixture, mocker: MockerFixture + ) -> None: """Test check_for_npm missing.""" caplog.set_level(logging.ERROR, logger=MODULE) mock_which = mocker.patch(f"{MODULE}.which", return_value=False) @@ -101,7 +103,7 @@ def test_init( def test_log_npm_command( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -121,7 +123,7 @@ def test_log_npm_command( @pytest.mark.parametrize("colorize", [True, False]) def test_npm_install_ci( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, colorize: bool, fake_process: FakeProcess, mocker: MockerFixture, @@ -135,7 +137,7 @@ def test_npm_install_ci( mocker.patch.object(RunwayModuleNpm, "warn_on_boto_env_vars") runway_context.env.ci = True runway_context.env.vars["RUNWAY_COLORIZE"] = str(colorize) - cmd: List[Any] = [NPM_BIN, "ci"] + cmd: list[Any] = [NPM_BIN, "ci"] if not colorize: cmd.append("--no-color") fake_process.register_subprocess(cmd, returncode=0) @@ -157,7 +159,7 @@ def test_npm_install_ci( ) def test_npm_install_install( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, colorize: bool, fake_process: FakeProcess, is_noninteractive: bool, @@ -173,7 +175,7 @@ def test_npm_install_install( mocker.patch.object(RunwayModuleNpm, "warn_on_boto_env_vars") runway_context.env.ci = is_noninteractive runway_context.env.vars["RUNWAY_COLORIZE"] = str(colorize) - cmd: List[Any] = [NPM_BIN, "install"] + cmd: list[Any] = [NPM_BIN, "install"] if not colorize: cmd.append("--no-color") fake_process.register_subprocess(cmd, returncode=0) @@ -183,7 +185,7 @@ def test_npm_install_install( def test_npm_install_skip( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -199,7 +201,7 @@ def test_npm_install_skip( def test_package_json_missing( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -211,12 +213,12 @@ def test_package_json_missing( obj = RunwayModuleNpm(context=runway_context, module_root=tmp_path) assert obj.package_json_missing() - assert ["module is missing package.json"] == caplog.messages + assert caplog.messages == ["module is missing package.json"] (tmp_path / "package.json").touch() assert not obj.package_json_missing() - def test_warn_on_boto_env_vars(self, caplog: LogCaptureFixture) -> None: + def test_warn_on_boto_env_vars(self, caplog: pytest.LogCaptureFixture) -> None: """Test warn_on_boto_env_vars.""" caplog.set_level(logging.WARNING, logger=MODULE) RunwayModuleNpm.warn_on_boto_env_vars({"AWS_DEFAULT_PROFILE": "something"}) @@ -229,13 +231,13 @@ def test_warn_on_boto_env_vars(self, caplog: LogCaptureFixture) -> None: @pytest.mark.parametrize( "env_vars", [ - cast(Dict[str, str], {}), + cast(dict[str, str], {}), {"AWS_PROFILE": "something"}, {"AWS_DEFAULT_PROFILE": "something", "AWS_PROFILE": "something"}, ], ) def test_warn_on_boto_env_vars_no_warn( - self, caplog: LogCaptureFixture, env_vars: Dict[str, str] + self, caplog: pytest.LogCaptureFixture, env_vars: dict[str, str] ) -> None: """Test warn_on_boto_env_vars no warn.""" caplog.set_level(logging.WARNING, logger=MODULE) diff --git a/tests/unit/module/test_cdk.py b/tests/unit/module/test_cdk.py index 55b09fcae..7d5cfc6ac 100644 --- a/tests/unit/module/test_cdk.py +++ b/tests/unit/module/test_cdk.py @@ -4,11 +4,10 @@ import logging from subprocess import CalledProcessError -from typing import TYPE_CHECKING, Any, Dict, List, Optional -from unittest.mock import call +from typing import TYPE_CHECKING, Any, Optional +from unittest.mock import Mock, call import pytest -from mock import Mock from runway.config.models.runway.options.cdk import RunwayCdkModuleOptionsDataModel from runway.module.cdk import CloudDevelopmentKit, CloudDevelopmentKitOptions @@ -16,7 +15,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from pytest_subprocess import FakeProcess from pytest_subprocess.fake_popen import FakePopen @@ -33,7 +31,7 @@ class TestCloudDevelopmentKit: def test_cdk_bootstrap( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: RunwayContext, tmp_path: Path, @@ -70,7 +68,7 @@ def test_cdk_bootstrap_raise_called_process_error( def test_cdk_deploy( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: RunwayContext, tmp_path: Path, @@ -105,7 +103,7 @@ def test_cdk_deploy_raise_called_process_error( def test_cdk_destroy( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: RunwayContext, tmp_path: Path, @@ -140,7 +138,7 @@ def test_cdk_destroy_raise_called_process_error( def test_cdk_diff( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: RunwayContext, tmp_path: Path, @@ -245,7 +243,7 @@ def test_cdk_list_raise_called_process_error( def test_cli_args( self, debug: bool, - expected: List[str], + expected: list[str], no_color: bool, tmp_path: Path, verbose: bool, @@ -273,9 +271,9 @@ def test_cli_args( ) def test_cli_args_context( self, - expected: List[str], + expected: list[str], runway_context: RunwayContext, - parameters: Dict[str, Any], + parameters: dict[str, Any], tmp_path: Path, ) -> None: """Test cli_args_context.""" @@ -383,10 +381,10 @@ def test_destroy( ) def test_gen_cmd( self, - args_list: Optional[List[str]], + args_list: Optional[list[str]], command: CdkCommandTypeDef, env_ci: bool, - expected: List[str], + expected: list[str], include_context: bool, mocker: MockerFixture, runway_context: RunwayContext, @@ -467,8 +465,8 @@ def test_plan( def test_run_build_steps_empty( self, - caplog: LogCaptureFixture, - fake_process: FakeProcess, + caplog: pytest.LogCaptureFixture, + fake_process: FakeProcess, # noqa: ARG002 runway_context: RunwayContext, tmp_path: Path, ) -> None: @@ -482,10 +480,10 @@ def test_run_build_steps_empty( def test_run_build_steps_linux( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fake_process: FakeProcess, mocker: MockerFixture, - platform_linux: None, + platform_linux: None, # noqa: ARG002 runway_context: RunwayContext, tmp_path: Path, ) -> None: @@ -505,9 +503,9 @@ def test_run_build_steps_linux( def test_run_build_steps_raise_file_not_found( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fake_process: FakeProcess, - platform_linux: None, + platform_linux: None, # noqa: ARG002 runway_context: RunwayContext, tmp_path: Path, ) -> None: @@ -531,7 +529,7 @@ def _callback(process: FakePopen) -> None: def test_run_build_steps_raise_called_process_error( self, fake_process: FakeProcess, - platform_linux: None, + platform_linux: None, # noqa: ARG002 runway_context: RunwayContext, tmp_path: Path, ) -> None: @@ -547,10 +545,10 @@ def test_run_build_steps_raise_called_process_error( def test_run_build_steps_windows( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fake_process: FakeProcess, mocker: MockerFixture, - platform_windows: None, + platform_windows: None, # noqa: ARG002 runway_context: RunwayContext, tmp_path: Path, ) -> None: @@ -581,7 +579,7 @@ def test_run_build_steps_windows( ) def test_skip( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, expected: bool, explicitly_enabled: bool, mocker: MockerFixture, diff --git a/tests/unit/module/test_cloudformation.py b/tests/unit/module/test_cloudformation.py index 0906378db..575e7eeeb 100644 --- a/tests/unit/module/test_cloudformation.py +++ b/tests/unit/module/test_cloudformation.py @@ -3,7 +3,7 @@ # pyright: basic from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any from runway.core.components import DeployEnvironment from runway.module.cloudformation import CloudFormation @@ -25,7 +25,7 @@ class TestCloudFormation: """Test runway.module.cloudformation.CloudFormation.""" @property - def generic_parameters(self) -> Dict[str, Any]: + def generic_parameters(self) -> dict[str, Any]: """Return generic module options.""" return {"test_key": "test-value"} diff --git a/tests/unit/module/test_k8s.py b/tests/unit/module/test_k8s.py index 5b2bed4ed..20bad3a1f 100644 --- a/tests/unit/module/test_k8s.py +++ b/tests/unit/module/test_k8s.py @@ -5,7 +5,7 @@ import logging from subprocess import CalledProcessError -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Optional import pytest import yaml @@ -17,7 +17,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from pytest_subprocess import FakeProcess @@ -86,9 +85,9 @@ def test_destroy( ) def test_gen_cmd( self, - args_list: Optional[List[str]], + args_list: Optional[list[str]], command: KubectlCommandTypeDef, - expected: List[str], + expected: list[str], mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -107,7 +106,7 @@ def test_gen_cmd( def test_init( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -128,7 +127,7 @@ def test_kbenv( def test_kubectl_apply( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -202,7 +201,7 @@ def test_kubectl_bin_handle_version_not_specified_exit( def test_kubectl_delete( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -237,7 +236,7 @@ def test_kubectl_delete_raise_called_process_error( def test_kubectl_kustomize( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, fake_process: FakeProcess, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -280,7 +279,7 @@ def test_skip(self, runway_context: MockRunwayContext, tmp_path: Path) -> None: @pytest.mark.parametrize("skip", [False, True]) def test_plan( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, skip: bool, @@ -323,7 +322,7 @@ def test_gen_overlay_dirs(self) -> None: (["test2/kustomization.yaml"], "test"), ], ) - def test_get_overlay_dir(self, expected: str, files: List[str], tmp_path: Path) -> None: + def test_get_overlay_dir(self, expected: str, files: list[str], tmp_path: Path) -> None: """Test get_overlay_dir.""" for f in files: tmp_file = tmp_path / f diff --git a/tests/unit/module/test_serverless.py b/tests/unit/module/test_serverless.py index 83ade2cc7..4929f63ff 100644 --- a/tests/unit/module/test_serverless.py +++ b/tests/unit/module/test_serverless.py @@ -5,11 +5,11 @@ import logging from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Optional, Union, cast +from unittest.mock import ANY, MagicMock, Mock, call import pytest import yaml -from mock import ANY, MagicMock, Mock, call from pydantic import ValidationError from runway.config.models.runway.options.serverless import ( @@ -23,7 +23,6 @@ ) if TYPE_CHECKING: - from pytest import LogCaptureFixture from pytest_mock import MockerFixture from pytest_subprocess.fake_process import FakeProcess @@ -54,7 +53,7 @@ def test___init__(self, runway_context: MockRunwayContext, tmp_path: Path) -> No def test__deploy_package( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tempfile_temporary_directory: MagicMock, @@ -71,7 +70,7 @@ def test__deploy_package( def test__deploy_package_promotezip( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tempfile_temporary_directory: MagicMock, @@ -272,7 +271,7 @@ def test_env_file(self, runway_context: MockRunwayContext, tmp_path: Path) -> No def test_extend_serverless_yml( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -356,7 +355,7 @@ def test_gen_cmd( def test_init( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -371,7 +370,7 @@ def test_init( def test_plan( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -383,7 +382,7 @@ def test_plan( def test_skip( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -391,7 +390,7 @@ def test_skip( """Test skip.""" caplog.set_level(logging.INFO, logger="runway") obj = Serverless(runway_context, module_root=tmp_path) - mocker.patch.object(obj, "package_json_missing", lambda: True) + mocker.patch.object(obj, "package_json_missing", return_value=True) mocker.patch.object(obj, "env_file", False) assert obj.skip @@ -401,7 +400,7 @@ def test_skip( ] == caplog.messages caplog.clear() - mocker.patch.object(obj, "package_json_missing", lambda: False) + mocker.patch.object(obj, "package_json_missing", return_value=False) assert obj.skip assert [ f"{tmp_path.name}:skipped; config file for this stage/region not found" @@ -623,7 +622,7 @@ def test_source_hash( self, mocker: MockerFixture, runway_context: MockRunwayContext, - service: Union[Dict[str, Any], str], + service: Union[dict[str, Any], str], service_name: str, tmp_path: Path, ) -> None: @@ -653,7 +652,7 @@ def test_source_hash_individually( self, mocker: MockerFixture, runway_context: MockRunwayContext, - service: Union[Dict[str, Any], str], + service: Union[dict[str, Any], str], tmp_path: Path, ) -> None: """Test source_hash.""" @@ -786,7 +785,7 @@ class TestServerlessOptions: (["-u"], ["-u"]), ], ) - def test_args(self, args: List[str], expected: List[str]) -> None: + def test_args(self, args: list[str], expected: list[str]) -> None: """Test args.""" obj = ServerlessOptions.parse_obj({"args": args}) assert obj.args == expected @@ -829,19 +828,19 @@ def test_args(self, args: List[str], expected: List[str]) -> None: ), ], ) - def test_parse(self, config: Dict[str, Any]) -> None: + def test_parse(self, config: dict[str, Any]) -> None: """Test parse.""" obj = ServerlessOptions.parse_obj(config) assert obj.args == config.get("args", []) assert obj.extend_serverless_yml == config.get( - "extend_serverless_yml", cast(Dict[str, Any], {}) + "extend_serverless_yml", cast(dict[str, Any], {}) ) if config.get("promotezip"): assert obj.promotezip else: assert not obj.promotezip - assert obj.promotezip.bucketname == config.get("promotezip", cast(Dict[str, Any], {})).get( + assert obj.promotezip.bucketname == config.get("promotezip", cast(dict[str, Any], {})).get( "bucketname" ) assert obj.skip_npm_ci == config.get("skip_npm_ci", False) diff --git a/tests/unit/module/test_terraform.py b/tests/unit/module/test_terraform.py index 492935e5f..d61ec5e8e 100644 --- a/tests/unit/module/test_terraform.py +++ b/tests/unit/module/test_terraform.py @@ -6,10 +6,10 @@ import json import logging import subprocess -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union +from typing import TYPE_CHECKING, Any, Optional, Union +from unittest.mock import MagicMock, Mock import pytest -from mock import MagicMock, Mock from runway._logging import LogLevels from runway.module.terraform import ( @@ -24,7 +24,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch from pytest_mock import MockerFixture from ..factories import MockRunwayContext @@ -89,7 +88,7 @@ def test___init___options_workspace( def test_auto_tfvars( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -121,7 +120,7 @@ def test_auto_tfvars( def test_auto_tfvars_unsupported_version( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -143,7 +142,7 @@ def test_auto_tfvars_unsupported_version( def test_cleanup_dot_terraform( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -193,7 +192,7 @@ def test_current_workspace( ) def test_env_file( self, - filename: Union[List[str], str], + filename: Union[list[str], str], expected: Optional[str], runway_context: MockRunwayContext, tmp_path: Path, @@ -212,10 +211,10 @@ def test_env_file( assert not obj.env_file @pytest.mark.parametrize("action", ["deploy", "destroy", "init", "plan"]) - def test_execute( + def test_execute( # noqa: PLR0915 self, action: str, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -301,9 +300,9 @@ def test_execute( ) def test_gen_command( self, - command: Union[List[str], str], - args_list: Optional[List[str]], - expected: List[str], + command: Union[list[str], str], + args_list: Optional[list[str]], + expected: list[str], mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -322,7 +321,7 @@ def test_gen_command( def test_handle_backend_no_handler( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -330,7 +329,7 @@ def test_handle_backend_no_handler( """Test handle_backend with no handler.""" caplog.set_level(LogLevels.DEBUG, logger=MODULE) mock_get_full_configuration = MagicMock(return_value={}) - backend: Dict[str, Union[Dict[str, Any], str]] = { + backend: dict[str, Union[dict[str, Any], str]] = { "type": "unsupported", "config": {}, } @@ -350,7 +349,7 @@ def test_handle_backend_no_handler( def test_handle_backend_no_type( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -364,8 +363,8 @@ def test_handle_backend_no_type( def test_handle_backend_remote_name( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -391,8 +390,8 @@ def test_handle_backend_remote_name( def test_handle_backend_remote_prefix( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -419,8 +418,8 @@ def test_handle_backend_remote_prefix( def test_handle_backend_remote_undetermined( self, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -428,7 +427,7 @@ def test_handle_backend_remote_undetermined( caplog.set_level(LogLevels.WARNING, logger=MODULE) monkeypatch.delenv("TF_WORKSPACE", raising=False) mock_get_full_configuration = MagicMock(return_value={}) - backend: Dict[str, Union[Dict[str, Any], str]] = { + backend: dict[str, Union[dict[str, Any], str]] = { "type": "remote", "config": {}, } @@ -524,7 +523,7 @@ def test_tf_bin_global( def test_tf_bin_missing( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, mocker: MockerFixture, runway_context: MockRunwayContext, tmp_path: Path, @@ -593,7 +592,7 @@ def test_terraform_apply( ) def test_terraform_destroy( self, - expected_options: List[str], + expected_options: list[str], expected_subcmd: str, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -643,7 +642,7 @@ def test_terraform_init( Terraform, "gen_command", return_value=["mock_gen_command"] ) mock_run_command = mocker.patch(f"{MODULE}.run_module_command") - options: Dict[str, Union[Dict[str, Any], str]] = { + options: dict[str, Union[dict[str, Any], str]] = { "args": {"init": ["init_arg"]}, "terraform_backend_config": {"bucket": "name"}, } @@ -911,7 +910,7 @@ def test_backend_config( ], ) def test_parse_obj( - self, config: Dict[str, Any], runway_context: MockRunwayContext, tmp_path: Path + self, config: dict[str, Any], runway_context: MockRunwayContext, tmp_path: Path ) -> None: """Test parse_obj.""" obj = TerraformOptions.parse_obj( @@ -973,25 +972,17 @@ def test_get_full_configuration( }, ["bucket=test-bucket", "dynamodb_table=test-table", "region=us-east-1"], ), - ( - { - "bucket": "test-bucket", - "dynamodb_table": "test-table", - "region": "us-east-1", - }, - ["bucket=test-bucket", "dynamodb_table=test-table", "region=us-east-1"], - ), ], ) def test_init_args( self, - expected_items: List[str], - input_data: Dict[str, str], + expected_items: list[str], + input_data: dict[str, str], runway_context: MockRunwayContext, tmp_path: Path, ) -> None: """Test init_args.""" - expected: List[str] = [] + expected: list[str] = [] for i in expected_items: expected.extend(["-backend-config", i]) assert ( @@ -1003,7 +994,7 @@ def test_init_args( def test_init_args_file( self, - caplog: LogCaptureFixture, + caplog: pytest.LogCaptureFixture, runway_context: MockRunwayContext, tmp_path: Path, ) -> None: @@ -1047,7 +1038,7 @@ def test_gen_backend_filenames(self) -> None: ], ) def test_get_backend_file( - self, tmp_path: Path, filename: Union[List[str], str], expected: Optional[str] + self, tmp_path: Path, filename: Union[list[str], str], expected: Optional[str] ) -> None: """Test get_backend_file.""" if isinstance(filename, list): @@ -1073,7 +1064,7 @@ def test_get_backend_file( ) def test_parse_obj( self, - config: Dict[str, str], + config: dict[str, str], expected_region: str, mocker: MockerFixture, runway_context: MockRunwayContext, @@ -1082,11 +1073,11 @@ def test_parse_obj( """Test parse_obj.""" def assert_get_backend_file_args( - _cls: Type[TerraformBackendConfig], + _cls: type[TerraformBackendConfig], path: Path, env_name: str, env_region: str, - ): + ) -> str: """Assert args passed to the method during parse.""" assert path == tmp_path assert env_name == "test" diff --git a/tests/unit/module/test_utils.py b/tests/unit/module/test_utils.py index aff1ba523..91b7cbaaf 100644 --- a/tests/unit/module/test_utils.py +++ b/tests/unit/module/test_utils.py @@ -2,9 +2,8 @@ from __future__ import annotations -from pathlib import Path from subprocess import CalledProcessError -from typing import TYPE_CHECKING, Any, List +from typing import TYPE_CHECKING, Any import pytest @@ -18,6 +17,8 @@ ) if TYPE_CHECKING: + from pathlib import Path + from pytest_mock import MockerFixture from pytest_subprocess import FakeProcess @@ -34,7 +35,7 @@ ], ) def test_format_npm_command_for_logging_darwin( - command: List[str], expected: str, platform_darwin: None + command: list[str], expected: str, platform_darwin: None # noqa: ARG001 ) -> None: """Test format_npm_command_for_logging on Darwin/macOS.""" assert format_npm_command_for_logging(command) == expected @@ -50,7 +51,7 @@ def test_format_npm_command_for_logging_darwin( ], ) def test_format_npm_command_for_logging_windows( - command: List[str], expected: str, platform_windows: None + command: list[str], expected: str, platform_windows: None # noqa: ARG001 ) -> None: """Test format_npm_command_for_logging on windows.""" assert format_npm_command_for_logging(command) == expected @@ -60,7 +61,7 @@ def test_format_npm_command_for_logging_windows( "command, opts", [("test", []), ("test", ["arg1"]), ("test", ["arg1", "arg2"])] ) def test_generate_node_command( - command: str, mocker: MockerFixture, opts: List[str], tmp_path: Path + command: str, mocker: MockerFixture, opts: list[str], tmp_path: Path ) -> None: """Test generate_node_command.""" mock_which = mocker.patch(f"{MODULE}.which", return_value=False) @@ -81,9 +82,9 @@ def test_generate_node_command( ) def test_generate_node_command_npx( command: str, - expected: List[str], + expected: list[str], mocker: MockerFixture, - opts: List[str], + opts: list[str], tmp_path: Path, ) -> None: """Test generate_node_command.""" @@ -166,7 +167,7 @@ def test_use_npm_ci( (tmp_path / "package-lock.json").touch() if has_shrinkwrap: (tmp_path / "package-lock.json").touch() - cmd: List[Any] = [NPM_BIN, "ci", "-h"] + cmd: list[Any] = [NPM_BIN, "ci", "-h"] fake_process.register_subprocess(cmd, returncode=exit_code) assert use_npm_ci(tmp_path) is expected diff --git a/tests/unit/test_mixins.py b/tests/unit/test_mixins.py index 896bfb256..eaa974a08 100644 --- a/tests/unit/test_mixins.py +++ b/tests/unit/test_mixins.py @@ -3,10 +3,10 @@ from __future__ import annotations import subprocess -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Optional +from unittest.mock import Mock import pytest -from mock import Mock from runway.compat import cached_property from runway.mixins import CliInterfaceMixin, DelCachedPropMixin @@ -34,7 +34,7 @@ def __init__(self, context: CfnginContext, cwd: Path) -> None: @pytest.mark.parametrize("env", [None, {"foo": "bar"}]) def test__run_command( - self, env: Optional[Dict[str, str]], mocker: MockerFixture, tmp_path: Path + self, env: Optional[dict[str, str]], mocker: MockerFixture, tmp_path: Path ) -> None: """Test _run_command.""" ctx_env = {"foo": "bar", "bar": "foo"} @@ -104,9 +104,9 @@ def test_found_in_path(self, mocker: MockerFixture, return_value: bool) -> None: ) def test_generate_command( self, - expected: List[str], + expected: list[str], mocker: MockerFixture, - provided: Dict[str, Any], + provided: dict[str, Any], ) -> None: """Test generate_command.""" exe = mocker.patch.object(self.Kls, "EXECUTABLE", "test.exe", create=True) @@ -116,7 +116,9 @@ def test_generate_command( *expected, ] - def test_list2cmdline_darwin(self, mocker: MockerFixture, platform_darwin: None) -> None: + def test_list2cmdline_darwin( + self, mocker: MockerFixture, platform_darwin: None # noqa: ARG002 + ) -> None: """Test list2cmdline on Darwin/macOS systems.""" mock_list2cmdline = mocker.patch(f"{MODULE}.subprocess.list2cmdline") mock_join = mocker.patch(f"{MODULE}.shlex_join", return_value="success") @@ -124,7 +126,9 @@ def test_list2cmdline_darwin(self, mocker: MockerFixture, platform_darwin: None) mock_list2cmdline.assert_not_called() mock_join.assert_called_once_with("foo") - def test_list2cmdline_linus(self, mocker: MockerFixture, platform_linux: None) -> None: + def test_list2cmdline_linus( + self, mocker: MockerFixture, platform_linux: None # noqa: ARG002 + ) -> None: """Test list2cmdline on Linux systems.""" mock_list2cmdline = mocker.patch(f"{MODULE}.subprocess.list2cmdline") mock_join = mocker.patch(f"{MODULE}.shlex_join", return_value="success") @@ -132,7 +136,9 @@ def test_list2cmdline_linus(self, mocker: MockerFixture, platform_linux: None) - mock_list2cmdline.assert_not_called() mock_join.assert_called_once_with("foo") - def test_list2cmdline_windows(self, mocker: MockerFixture, platform_windows: None) -> None: + def test_list2cmdline_windows( + self, mocker: MockerFixture, platform_windows: None # noqa: ARG002 + ) -> None: """Test list2cmdline on Windows systems.""" mock_list2cmdline = mocker.patch( f"{MODULE}.subprocess.list2cmdline", return_value="success" diff --git a/tests/unit/test_variables.py b/tests/unit/test_variables.py index 7d153b6c4..2bb4a5b63 100644 --- a/tests/unit/test_variables.py +++ b/tests/unit/test_variables.py @@ -3,13 +3,12 @@ # pyright: basic from __future__ import annotations -from typing import TYPE_CHECKING, Any, ClassVar, List, Union +from typing import TYPE_CHECKING, Any, ClassVar, Union +from unittest.mock import MagicMock, call import pytest -from mock import MagicMock, call from pydantic import BaseModel -from runway.context import CfnginContext, RunwayContext from runway.exceptions import ( FailedLookup, FailedVariableLookup, @@ -37,20 +36,18 @@ if TYPE_CHECKING: from pytest_mock import MockerFixture - from .factories import MockCFNginContext + from .factories import MockCfnginContext class MockLookupHandler(LookupHandler): """Mock lookup handler.""" return_value: ClassVar[Any] = "resolved" - side_effect: ClassVar[Union[Any, List[Any]]] = None + side_effect: ClassVar[Union[Any, list[Any]]] = None @classmethod def handle( cls, - value: str, - context: Union[CfnginContext, RunwayContext], *__args: Any, **__kwargs: Any, ) -> Any: @@ -62,24 +59,21 @@ def handle( return cls._handle_side_effect(cls.side_effect) @classmethod - def _handle_side_effect(cls, side_effect: Any): + def _handle_side_effect(cls, side_effect: Any) -> Any: """Handle side_effect.""" if isinstance(side_effect, BaseException): raise side_effect return side_effect -@pytest.fixture(autouse=True, scope="function") +@pytest.fixture(autouse=True) def patch_lookups(mocker: MockerFixture) -> None: """Patch registered lookups.""" for registry in [CFNGIN_LOOKUP_HANDLERS, RUNWAY_LOOKUP_HANDLERS]: - # mocked = {k: MockLookupHandler for k in registry} - # mocked["test"] = MockLookupHandler - # mocker.patch.dict(registry, mocked) mocker.patch.dict(registry, {"test": MockLookupHandler}) -def test_resolve_variables(cfngin_context: MockCFNginContext) -> None: +def test_resolve_variables(cfngin_context: MockCfnginContext) -> None: """Test resolve_variables.""" variable = MagicMock() assert not resolve_variables([variable], cfngin_context) @@ -97,7 +91,7 @@ def test_dependencies(self, mocker: MockerFixture) -> None: ) assert Variable("Param", "val").dependencies == {"test"} - def test_get(self, mocker: MockerFixture) -> None: + def test_get(self) -> None: """Test get.""" obj = Variable("Para", {"key": "val"}) assert obj.get("missing") is None @@ -221,7 +215,7 @@ def test_resolve_failed(self, mocker: MockerFixture) -> None: assert excinfo.value.cause == lookup_error assert excinfo.value.variable == obj - def test_repr(self) -> None: + def test___repr__(self) -> None: """Test __repr__.""" assert repr(Variable("Param", "val")) == "Variable[Param=val]" @@ -244,29 +238,29 @@ def test_simple_lookup(self) -> None: assert var.resolved is True assert var.value == "resolved" - def test_value_unresolved(self, mocker: MockerFixture): + def test_value_unresolved(self, mocker: MockerFixture) -> None: """Test value UnresolvedVariable.""" mocker.patch.object(VariableValue, "parse_obj", return_value=MagicMock(value="value")) def test_value(self) -> None: """Test value.""" with pytest.raises(UnresolvedVariable): - Variable("Param", "${test query}").value + Variable("Param", "${test query}").value # noqa: B018 class TestVariableValue: """Test runway.variables.VariableValue.""" + def test___iter__(self) -> None: + """Test __iter__.""" + with pytest.raises(NotImplementedError): + iter(VariableValue()) + def test_dependencies(self) -> None: """Test dependencies.""" obj = VariableValue() assert obj.dependencies == set() - def test_iter(self) -> None: - """Test __iter__.""" - with pytest.raises(NotImplementedError): - iter(VariableValue()) - def test_parse_obj_dict_empty(self) -> None: """Test parse_obj dict empty.""" assert isinstance(VariableValue.parse_obj({}), VariableValueDict) @@ -306,9 +300,9 @@ def test_repr(self) -> None: def test_resolved(self) -> None: """Test resolved.""" with pytest.raises(NotImplementedError): - VariableValue().resolved + VariableValue().resolved # noqa: B018 - def test_resolve(self, cfngin_context: MockCFNginContext) -> None: + def test_resolve(self, cfngin_context: MockCfnginContext) -> None: """Test resolve.""" assert not VariableValue().resolve(context=cfngin_context) @@ -320,7 +314,7 @@ def test_simplified(self) -> None: def test_value(self) -> None: """Test value.""" with pytest.raises(NotImplementedError): - VariableValue().value + VariableValue().value # noqa: B018 class TestVariableValueConcatenation: @@ -378,7 +372,7 @@ def test_resolved(self) -> None: is False ) - def test_resolve(self, cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: + def test_resolve(self, cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test resolve.""" mock_provider = MagicMock() mock_resolve = mocker.patch.object(VariableValueLiteral, "resolve", return_value=None) @@ -448,27 +442,36 @@ def test_simplified_literal_str(self) -> None: == "foobar" ) - def test_value_multiple(self) -> None: - """Test multiple.""" - assert ( - VariableValueConcatenation( - [VariableValueLiteral("foo"), VariableValueLiteral("bar")] - ).value - == "foobar" - ) - assert ( - VariableValueConcatenation( - [VariableValueLiteral(13), VariableValueLiteral("/test")] # type: ignore - ).value - == "13/test" - ) - assert ( - VariableValueConcatenation([VariableValueLiteral(5), VariableValueLiteral(13)]).value - == "513" - ) + @pytest.mark.parametrize( + "variable, expected", + [ + ( + VariableValueConcatenation( + [VariableValueLiteral("foo"), VariableValueLiteral("bar")] + ), + "foobar", + ), + ( + VariableValueConcatenation( + [VariableValueLiteral(13), VariableValueLiteral("/test")] + ), + "13/test", + ), + ( + VariableValueConcatenation([VariableValueLiteral(5), VariableValueLiteral(13)]), + "513", + ), + ], + ) + def test_value_multiple(self, expected: str, variable: VariableValueConcatenation[Any]) -> None: + """Test value multiple.""" + assert variable.value == expected + + def test_value_multiple_raise_concatenation_error(self) -> None: + """Test value multiple raises InvalidLookupConcatenationError.""" with pytest.raises(InvalidLookupConcatenation): - VariableValueConcatenation( - [VariableValueLiteral(True), VariableValueLiteral("test")] # type: ignore + VariableValueConcatenation( # noqa: B018 + [VariableValueLiteral(True), VariableValueLiteral(VariableValueLiteral)] # type: ignore ).value def test_value_single(self) -> None: @@ -533,7 +536,7 @@ def test_resolved(self, mocker: MockerFixture, resolved: bool) -> None: obj = VariableValueDict({"key": "val"}) assert obj.resolved is resolved - def test_resolve(self, cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: + def test_resolve(self, cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test resolve.""" mock_literal = MagicMock() mock_provider = MagicMock() @@ -636,7 +639,7 @@ def test_resolved(self, mocker: MockerFixture, resolved: bool) -> None: obj = VariableValueList(["val0"]) assert obj.resolved is resolved - def test_resolve(self, cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: + def test_resolve(self, cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test resolve.""" mock_literal = MagicMock() mock_provider = MagicMock() @@ -731,7 +734,7 @@ def test_dependencies(self, mocker: MockerFixture) -> None: obj = VariableValueLookup(VariableValueLiteral("test"), "query", MockLookupHandler) assert obj.dependencies == {"test"} - def test_init_convert_query(self) -> None: + def test___init___convert_query(self) -> None: """Test __init__ convert query.""" obj = VariableValueLookup( VariableValueLiteral("test"), "query", MockLookupHandler, "runway" @@ -739,13 +742,13 @@ def test_init_convert_query(self) -> None: assert isinstance(obj.lookup_query, VariableValueLiteral) assert obj.lookup_query.value == "query" - def test_init_find_handler_cfngin(self, mocker: MockerFixture) -> None: + def test___init___find_handler_cfngin(self, mocker: MockerFixture) -> None: """Test __init__ find handler cfngin.""" mocker.patch.dict(CFNGIN_LOOKUP_HANDLERS, {"test": "success"}) obj = VariableValueLookup(VariableValueLiteral("test"), VariableValueLiteral("query")) assert obj.handler == "success" - def test_init_find_handler_runway(self, mocker: MockerFixture) -> None: + def test___init___find_handler_runway(self, mocker: MockerFixture) -> None: """Test __init__ find handler runway.""" mocker.patch.dict(RUNWAY_LOOKUP_HANDLERS, {"test": "success"}) obj = VariableValueLookup( @@ -755,16 +758,16 @@ def test_init_find_handler_runway(self, mocker: MockerFixture) -> None: ) assert obj.handler == "success" - def test_init_find_handler_value_error(self) -> None: + def test___init___find_handler_value_error(self) -> None: """Test __init__ fund handler ValueError.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Variable type must be one of"): VariableValueLookup( VariableValueLiteral("test"), VariableValueLiteral("query"), variable_type="invalid", # type: ignore ) - def test_init_find_handler_unknown_lookup_type(self) -> None: + def test___init___find_handler_unknown_lookup_type(self) -> None: """Test __init__ fund handler UnknownLookupType.""" with pytest.raises(UnknownLookupType): VariableValueLookup( @@ -772,7 +775,7 @@ def test_init_find_handler_unknown_lookup_type(self) -> None: VariableValueLiteral("query"), ) - def test_init(self) -> None: + def test___init__(self) -> None: """Test __init__.""" name = VariableValueLiteral("test") query = VariableValueLiteral("query") @@ -782,12 +785,12 @@ def test_init(self) -> None: assert obj.lookup_query == query assert obj.variable_type == "runway" - def test_iter(self) -> None: + def test___iter__(self) -> None: """Test __iter__.""" obj = VariableValueLookup(VariableValueLiteral("test"), "query") assert list(iter(obj)) == [obj] - def test_repr(self) -> None: + def test___repr__(self) -> None: """Test __repr__.""" obj = VariableValueLookup(VariableValueLiteral("test"), "query") assert repr(obj) == "Lookup[Literal[test] Literal[query]]" @@ -832,7 +835,7 @@ def test_simplified(self) -> None: obj = VariableValueLookup(VariableValueLiteral("test"), "query") assert obj.simplified == obj - def test_str(self) -> None: + def test___str__(self) -> None: """Test __str__.""" assert str(VariableValueLookup(VariableValueLiteral("test"), "query")) == "${test query}" @@ -841,7 +844,7 @@ def test_value(self) -> None: obj = VariableValueLookup(VariableValueLiteral("test"), "query") assert obj.resolved is False with pytest.raises(UnresolvedVariableValue): - obj.value + assert obj.value obj._resolve("success") assert obj.resolved is True assert obj.value == "success" @@ -908,7 +911,7 @@ def test_dependencies(self, mocker: MockerFixture) -> None: obj = VariableValuePydanticModel(self.ModelClass()) assert obj.dependencies == mock_literal.dependencies - def test_resolve(self, cfngin_context: MockCFNginContext, mocker: MockerFixture) -> None: + def test_resolve(self, cfngin_context: MockCfnginContext, mocker: MockerFixture) -> None: """Test resolve.""" mock_literal = MagicMock() mock_provider = MagicMock() diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index b75002c66..6d46929f3 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -1,6 +1,5 @@ """Test runway.utils.__init__.""" -# pyright: basic from __future__ import annotations import datetime @@ -10,12 +9,13 @@ import os import string import sys +from contextlib import suppress from copy import deepcopy from decimal import Decimal -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union +from unittest.mock import MagicMock, patch import pytest -from mock import MagicMock, patch from runway.utils import ( JsonEncoder, @@ -33,7 +33,6 @@ if TYPE_CHECKING: from pathlib import Path - from pytest import LogCaptureFixture, MonkeyPatch from pytest_mock import MockerFixture MODULE = "runway.utils" @@ -127,13 +126,13 @@ def test_find_default(self) -> None: ), "default should be ignored" -TestParamsTypeDef = Optional[Union[Dict[str, str], List[str], str]] +TestParamsTypeDef = Optional[Union[dict[str, str], list[str], str]] class TestSafeHaven: """Test SafeHaven context manager.""" - TEST_PARAMS: List[TestParamsTypeDef] = [ + TEST_PARAMS: list[TestParamsTypeDef] = [ (None), ("string"), ({}), @@ -142,7 +141,7 @@ class TestSafeHaven: ] def test_context_manager_magic( - self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch + self, caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch ) -> None: """Test init and the attributes it sets.""" mock_reset_all = MagicMock() @@ -163,8 +162,8 @@ def test_context_manager_magic( def test_os_environ( self, provided: TestParamsTypeDef, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test os.environ interactions.""" caplog.set_level(logging.DEBUG, "runway.SafeHaven") @@ -188,7 +187,9 @@ def test_os_environ( assert os.environ == orig_val assert caplog.messages == expected_logs - def test_reset_all(self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch) -> None: + def test_reset_all( + self, caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch + ) -> None: """Test reset_all.""" mock_method = MagicMock() caplog.set_level(logging.DEBUG, "runway.SafeHaven") @@ -214,8 +215,8 @@ def test_reset_all(self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch) -> def test_sys_argv( self, provided: TestParamsTypeDef, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test sys.argv interactions.""" caplog.set_level(logging.DEBUG, "runway.SafeHaven") @@ -236,18 +237,17 @@ def test_sys_argv( assert sys.argv == orig_val assert caplog.messages == expected_logs - def test_sys_modules(self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch) -> None: + def test_sys_modules( + self, caplog: pytest.LogCaptureFixture, monkeypatch: pytest.MonkeyPatch + ) -> None: """Test sys.modules interactions.""" caplog.set_level(logging.DEBUG, "runway.SafeHaven") monkeypatch.setattr(SafeHaven, "reset_all", MagicMock()) - orig_val = {} - for k, v in sys.modules.items(): - orig_val[k] = v + orig_val = dict(sys.modules) expected_logs = ["entering a safe haven...", "resetting sys.modules..."] with SafeHaven() as obj: - from ..fixtures import mock_hooks assert sys.modules != orig_val obj.reset_sys_modules() @@ -255,7 +255,7 @@ def test_sys_modules(self, caplog: LogCaptureFixture, monkeypatch: MonkeyPatch) assert caplog.messages[:2] == expected_logs assert caplog.messages[-1] == "leaving the safe haven..." - def test_sys_modules_exclude(self, monkeypatch: MonkeyPatch) -> None: + def test_sys_modules_exclude(self, monkeypatch: pytest.MonkeyPatch) -> None: """Test sys.modules interactions with excluded module.""" monkeypatch.setattr(SafeHaven, "reset_all", MagicMock()) @@ -263,7 +263,6 @@ def test_sys_modules_exclude(self, monkeypatch: MonkeyPatch) -> None: assert module not in sys.modules with SafeHaven(sys_modules_exclude=[module]) as obj: - from ..fixtures import mock_hooks assert module in sys.modules obj.reset_sys_modules() @@ -276,8 +275,8 @@ def test_sys_modules_exclude(self, monkeypatch: MonkeyPatch) -> None: def test_sys_path( self, provided: TestParamsTypeDef, - caplog: LogCaptureFixture, - monkeypatch: MonkeyPatch, + caplog: pytest.LogCaptureFixture, + monkeypatch: pytest.MonkeyPatch, ) -> None: """Test sys.path interactions.""" caplog.set_level(logging.DEBUG, "runway.SafeHaven") @@ -375,17 +374,15 @@ def test_load_object_from_string() -> None: assert load_object_from_string(obj_path, try_reload=True) == "us-west-2" -def test_load_object_from_string_reload_conditions(monkeypatch: MonkeyPatch) -> None: +def test_load_object_from_string_reload_conditions(monkeypatch: pytest.MonkeyPatch) -> None: """Test load_object_from_string reload conditions.""" mock_reload = MagicMock() monkeypatch.setattr("runway.utils.importlib.reload", mock_reload) builtin_test = "sys.version_info" mock_hook = "tests.unit.fixtures.mock_hooks.GLOBAL_VALUE" - try: + with suppress(Exception): del sys.modules["tests.unit.fixtures.mock_hooks"] - except: - pass load_object_from_string(builtin_test, try_reload=False) mock_reload.assert_not_called()