From 63ed8e2ba963bc97a0a7b3854f38458bcd921e24 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 12:34:34 -0700 Subject: [PATCH 01/40] huge refactor: 1) removed ability and code for building images (for now) 2) add support for image tagging (defined in hubploy.yaml, defaults to 'latest' if not specified 3) added verbose and debuging output 4) added --dry-run for helm actions 5) fixed almost every line > 80 chars 6) consistent use of " vs a mix of " and ' 7) did a ton of readability cleanup 8) maybe more? --- hubploy/__main__.py | 187 ++++++++++++++----------------- hubploy/auth.py | 244 ++++++++++++++++++++++++---------------- hubploy/config.py | 264 ++++++++++++++------------------------------ hubploy/helm.py | 152 +++++++++++++++++-------- 4 files changed, 418 insertions(+), 429 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 7ae4974..77697cc 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -1,150 +1,125 @@ import argparse -import hubploy +import logging import sys -from hubploy import helm, auth, commitrange +logging.basicConfig(stream=sys.stdout, level=logging.WARNING) +logger = logging.getLogger(__name__) + +import hubploy +from hubploy import helm, auth, commitrange def main(): argparser = argparse.ArgumentParser() - subparsers = argparser.add_subparsers(dest='command') - build_parser = subparsers.add_parser('build', help='Build an image for a given deployment') - - build_parser.add_argument( - 'deployment', - help='Name of deployment to build image of' - ) + subparsers = argparser.add_subparsers(dest="command") - trigger_change_group = build_parser.add_mutually_exclusive_group() - trigger_change_group.add_argument( - '--commit-range', - help='Trigger image rebuilds only if files in image directory have changed in this git commit range', - ) - # FIXME: Needs a better name? - trigger_change_group.add_argument( - '--check-registry', - action='store_true', - help="Trigger image rebuild if image with expected name and tag is not in upstream registry." - ) - build_parser.add_argument( - '--push', - action='store_true', - help="Push image after building" - ) - build_parser.add_argument( - '--no-cache', - action='store_true', - help="Don't pull previous image to re-use cache from" + deploy_parser = subparsers.add_parser( + "deploy", + help="Deploy a chart to the given environment" ) - build_parser.add_argument( - '--image', - # FIXME: Have a friendlier way to reference this - help='Fully qualified docker image names to build', - action='append' - ) - - deploy_parser = subparsers.add_parser('deploy', help='Deploy a chart to the given environment') - deploy_parser.add_argument( - 'deployment' + "deployment" ) deploy_parser.add_argument( - 'chart' + "chart" ) deploy_parser.add_argument( - 'environment', - choices=['develop', 'staging', 'prod'] + "environment", + choices=["develop", "staging", "prod"] ) deploy_parser.add_argument( - '--namespace', + "--namespace", default=None ) deploy_parser.add_argument( - '--set', - action='append', + "--set", + action="append", ) deploy_parser.add_argument( - '--set-string', - action='append', + "--set-string", + action="append", ) deploy_parser.add_argument( - '--version', + "--version", ) deploy_parser.add_argument( - '--timeout' + "--timeout" ) deploy_parser.add_argument( - '--force', - action='store_true' + "--force", + action="store_true" ) deploy_parser.add_argument( - '--atomic', - action='store_true' + "--atomic", + action="store_true" ) deploy_parser.add_argument( - '--cleanup-on-fail', - action='store_true' + "--cleanup-on-fail", + action="store_true" + ) + deploy_parser.add_argument( + "--dry-run", + action="store_true", + help="Dry run the helm upgrade command. This also renders the " + + "chart to STDOUT." ) argparser.add_argument( - '-d', - '--debug', - action='store_true', - help='Enable helm debug output' + "-d", + "--debug", + action="store_true", + help="Enable tool debug output (not including helm debug)." + ) + argparser.add_argument( + "-D", + "--helm-debug", + action="store_true", + help="Helm debug only." + ) + argparser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output." ) args = argparser.parse_args() + if args.verbose: + logger.setLevel(logging.INFO) + elif args.debug: + logger.setLevel(logging.DEBUG) + logger.info(args) + + # Attempt to load the config early, fail if it doesn't exist or is invalid try: - config = hubploy.config.get_config(args.deployment) + config = hubploy.config.get_config( + args.deployment, + args.debug, + args.verbose + ) except hubploy.config.DeploymentNotFoundError as e: print(e, file=sys.stderr) sys.exit(1) - if args.command == 'build': - if not (args.check_registry or args.commit_range): - args.commit_range = commitrange.get_commit_range() - if not args.commit_range: - # commit_range autodetection failed, and check registry isn't set - # FIXME: Provide an actually useful error message - print("Could not auto-detect commit-range, and --check-registry is not set", file=sys.stderr) - print("Specify --commit-range manually, or pass --check-registry", file=sys.stderr) - sys.exit(1) - - with auth.registry_auth(args.deployment, args.push, args.check_registry): - - all_images = config.get('images', {}).get('images', {}) - - if args.image: - build_images = [i for i in all_images if i.name in args.image] - else: - build_images = all_images - - print(f"Images found: {len(build_images)}") - for image in build_images: - if image.needs_building(check_registry=args.check_registry, commit_range=args.commit_range): - print(f"Building image {image.name}") - image.build(not args.no_cache) - if args.push: - image.push() - else: - print(f"{image.name} does not require building") - - elif args.command == 'deploy': - helm.deploy( - args.deployment, - args.chart, - args.environment, - args.namespace, - args.set, - args.set_string, - args.version, - args.timeout, - args.force, - args.atomic, - args.cleanup_on_fail, - args.debug - ) + helm.deploy( + args.deployment, + args.chart, + args.environment, + args.namespace, + args.set, + args.set_string, + args.version, + args.timeout, + args.force, + args.atomic, + args.cleanup_on_fail, + args.debug, + args.verbose, + args.helm_debug, + args.dry_run + ) + +if __name__ == "__main__": + main() -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/hubploy/auth.py b/hubploy/auth.py index a382b34..1d7127f 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -5,11 +5,14 @@ Current cloud providers supported: gcloud, aws, and azure. """ +import logging +logger = logging.getLogger(__name__) + import json import os import subprocess -import shutil -import pathlib +#import shutil +#import pathlib import tempfile import boto3 @@ -18,7 +21,7 @@ from ruamel.yaml import YAML from ruamel.yaml.scanner import ScannerError -yaml = YAML(typ='rt') +yaml = YAML(typ="rt") @contextmanager @@ -31,28 +34,29 @@ def registry_auth(deployment, push, check_registry): config = get_config(deployment) - if 'images' in config and 'registry' in config['images']: - registry = config['images']['registry'] - provider = registry.get('provider') - if provider == 'gcloud': + if "images" in config and "registry" in config["images"]: + registry = config["images"]["registry"] + provider = registry.get("provider") + if provider == "gcloud": yield from registry_auth_gcloud( - deployment, **registry['gcloud'] + deployment, **registry["gcloud"] ) - elif provider == 'aws': + elif provider == "aws": yield from registry_auth_aws( - deployment, **registry['aws'] + deployment, **registry["aws"] ) - elif provider == 'azure': + elif provider == "azure": yield from registry_auth_azure( - deployment, **registry['azure'] + deployment, **registry["azure"] ) - elif provider == 'dockerconfig': + elif provider == "dockerconfig": yield from registry_auth_dockercfg( - deployment, **registry['dockerconfig'] + deployment, **registry["dockerconfig"] ) else: raise ValueError( - f'Unknown provider {provider} found in hubploy.yaml') + f"Unknown provider {provider} found in hubploy.yaml" + ) else: # We actually don't need to auth, but we are yielding anyway # contextlib.contextmanager does not like it when you don't yield @@ -60,7 +64,7 @@ def registry_auth(deployment, push, check_registry): def registry_auth_dockercfg(deployment, filename): encrypted_file_path = os.path.join( - 'deployments', deployment, 'secrets', filename + "deployments", deployment, "secrets", filename ) # DOCKER_CONFIG actually points to a *directory*, not a file. @@ -69,16 +73,16 @@ def registry_auth_dockercfg(deployment, filename): # temporary directory that we'll set DOCKER_CONFIG to # Our temporary directory (with symlink) and the decrypted # file will be deleted via the contextmanagers. - orig_dockercfg = os.environ.get('DOCKER_CONFIG', None) + orig_dockercfg = os.environ.get("DOCKER_CONFIG", None) with tempfile.TemporaryDirectory() as d: with decrypt_file(encrypted_file_path) as auth_file_path: try: - dst_path = os.path.join(d, 'config.json') + dst_path = os.path.join(d, "config.json") os.symlink(auth_file_path, dst_path) - os.environ['DOCKER_CONFIG'] = d + os.environ["DOCKER_CONFIG"] = d yield finally: - unset_env_var('DOCKER_CONFIG', orig_dockercfg) + unset_env_var("DOCKER_CONFIG", orig_dockercfg) def registry_auth_gcloud(deployment, project, service_key): """ @@ -87,17 +91,17 @@ def registry_auth_gcloud(deployment, project, service_key): This changes *global machine state* on where docker can push to! """ encrypted_service_key_path = os.path.join( - 'deployments', deployment, 'secrets', service_key + "deployments", deployment, "secrets", service_key ) with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: subprocess.check_call([ - 'gcloud', 'auth', - 'activate-service-account', - '--key-file', os.path.abspath(decrypted_service_key_path) + "gcloud", "auth", + "activate-service-account", + "--key-file", os.path.abspath(decrypted_service_key_path) ]) subprocess.check_call([ - 'gcloud', 'auth', 'configure-docker' + "gcloud", "auth", "configure-docker" ]) yield @@ -121,11 +125,11 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non # Get path to service_key and validate its around service_key_path = os.path.join( - 'deployments', deployment, 'secrets', service_key + "deployments", deployment, "secrets", service_key ) if not os.path.isfile(service_key_path): raise FileNotFoundError( - f'The service_key file {service_key_path} does not exist') + f"The service_key file {service_key_path} does not exist") os.environ["AWS_SHARED_CREDENTIALS_FILE"] = service_key_path @@ -134,16 +138,16 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non original_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) original_session_token = os.environ.get("AWS_SESSION_TOKEN", None) - sts_client = boto3.client('sts') + sts_client = boto3.client("sts") assumed_role_object = sts_client.assume_role( RoleArn=role_arn, RoleSessionName=role_session_name ) - creds = assumed_role_object['Credentials'] - os.environ['AWS_ACCESS_KEY_ID'] = creds['AccessKeyId'] - os.environ['AWS_SECRET_ACCESS_KEY'] = creds['SecretAccessKey'] - os.environ['AWS_SESSION_TOKEN'] = creds['SessionToken'] + creds = assumed_role_object["Credentials"] + os.environ["AWS_ACCESS_KEY_ID"] = creds["AccessKeyId"] + os.environ["AWS_SECRET_ACCESS_KEY"] = creds["SecretAccessKey"] + os.environ["AWS_SESSION_TOKEN"] = creds["SessionToken"] # return until context exits yield @@ -152,9 +156,9 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non if service_key: unset_env_var("AWS_SHARED_CREDENTIALS_FILE", original_credential_file_loc) elif role_arn: - unset_env_var('AWS_ACCESS_KEY_ID', original_access_key_id) - unset_env_var('AWS_SECRET_ACCESS_KEY', original_secret_access_key) - unset_env_var('AWS_SESSION_TOKEN', original_session_token) + unset_env_var("AWS_ACCESS_KEY_ID", original_access_key_id) + unset_env_var("AWS_SECRET_ACCESS_KEY", original_secret_access_key) + unset_env_var("AWS_SESSION_TOKEN", original_session_token) def registry_auth_aws(deployment, account_id, region, service_key=None, role_arn=None): @@ -167,18 +171,18 @@ def registry_auth_aws(deployment, account_id, region, service_key=None, role_arn # FIXME: Use a temporary docker config # Requires amazon-ecr-credential-helper to already be installed # this adds necessary line to authenticate docker with ecr - docker_config_dir = os.path.expanduser('~/.docker') + docker_config_dir = os.path.expanduser("~/.docker") os.makedirs(docker_config_dir, exist_ok=True) - docker_config = os.path.join(docker_config_dir, 'config.json') + docker_config = os.path.join(docker_config_dir, "config.json") if os.path.exists(docker_config): - with open(docker_config, 'r') as f: + with open(docker_config, "r") as f: config = json.load(f) else: config = {} - registry = f'{account_id}.dkr.ecr.{region}.amazonaws.com' - config.setdefault('credHelpers', {})[registry] = 'ecr-login' - with open(docker_config, 'w') as f: + registry = f"{account_id}.dkr.ecr.{region}.amazonaws.com" + config.setdefault("credHelpers", {})[registry] = "ecr-login" + with open(docker_config, "w") as f: json.dump(config, f) yield @@ -204,112 +208,153 @@ def registry_auth_azure(deployment, resource_group, registry, auth_file): """ # parse Azure auth file - auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) + auth_file_path = os.path.join("deployments", deployment, "secrets", auth_file) with open(auth_file_path) as f: auth = yaml.load(f) # log in subprocess.check_call([ - 'az', 'login', '--service-principal', - '--user', auth['appId'], - '--tenant', auth['tenant'], - '--password', auth['password'] + "az", "login", "--service-principal", + "--user", auth["appId"], + "--tenant", auth["tenant"], + "--password", auth["password"] ]) # log in to ACR subprocess.check_call([ - 'az', 'acr', 'login', - '--name', registry + "az", "acr", "login", + "--name", registry ]) yield @contextmanager -def cluster_auth(deployment): +def cluster_auth(deployment, debug=False, verbose=False): """ Do appropriate cluster authentication for given deployment """ - config = get_config(deployment) + if verbose: + logger.setLevel(logging.INFO) + elif debug: + logger.setLevel(logging.DEBUG) + + logger.info(f"Getting auth config for {deployment}") + config = get_config(deployment, debug, verbose) - if 'cluster' in config: - cluster = config['cluster'] - provider = cluster.get('provider') + if "cluster" in config: + cluster = config["cluster"] + provider = cluster.get("provider") orig_kubeconfig = os.environ.get("KUBECONFIG", None) + try: - if provider == 'kubeconfig': + if provider == "kubeconfig": + logger.info( + f"Attempting to authenticate to {cluster} with " + + "existing kubeconfig." + ) encrypted_kubeconfig_path = os.path.join( - 'deployments', deployment, 'secrets', cluster['kubeconfig']['filename'] + "deployments", + deployment, + "secrets", + cluster["kubeconfig"]["filename"] ) with decrypt_file(encrypted_kubeconfig_path) as kubeconfig_path: os.environ["KUBECONFIG"] = kubeconfig_path yield else: - # Temporarily kubeconfig file with tempfile.NamedTemporaryFile() as temp_kubeconfig: os.environ["KUBECONFIG"] = temp_kubeconfig.name + logger.info( + f"Attempting to authenticate with {provider}..." + ) - if provider == 'gcloud': + if provider == "gcloud": yield from cluster_auth_gcloud( - deployment, **cluster['gcloud'] + deployment, **cluster["gcloud"] ) - elif provider == 'aws': + elif provider == "aws": yield from cluster_auth_aws( - deployment, **cluster['aws'] + deployment, **cluster["aws"] ) - elif provider == 'azure': + elif provider == "azure": yield from cluster_auth_azure( - deployment, **cluster['azure'] + deployment, **cluster["azure"] ) else: raise ValueError( - f'Unknown provider {provider} found in hubploy.yaml') + f"Unknown provider {provider} found in " + + "hubploy.yaml" + ) finally: unset_env_var("KUBECONFIG", orig_kubeconfig) -def cluster_auth_gcloud(deployment, project, cluster, zone, service_key): +def cluster_auth_gcloud( + deployment, + project, + cluster, + zone, + service_key + ): """ Setup GKE authentication with service_key This changes *global machine state* on what current kubernetes cluster is! """ encrypted_service_key_path = os.path.join( - 'deployments', deployment, 'secrets', service_key + "deployments", deployment, "secrets", service_key ) with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: subprocess.check_call([ - 'gcloud', 'auth', - 'activate-service-account', - '--key-file', os.path.abspath(decrypted_service_key_path) + "gcloud", "auth", + "activate-service-account", + "--key-file", os.path.abspath(decrypted_service_key_path) ]) subprocess.check_call([ - 'gcloud', 'container', 'clusters', - f'--zone={zone}', - f'--project={project}', - 'get-credentials', cluster + "gcloud", "container", "clusters", + f"--zone={zone}", + f"--project={project}", + "get-credentials", cluster ]) yield -def cluster_auth_aws(deployment, account_id, cluster, region, service_key=None, role_arn=None): +def cluster_auth_aws( + deployment, + account_id, + cluster, + region, + service_key=None, + role_arn=None + ): """ Setup AWS authentication with service_key or with a role This changes *global machine state* on what current kubernetes cluster is! """ - with _auth_aws(deployment, service_key=service_key, role_arn=role_arn, role_session_name="hubploy-cluster-auth"): + with _auth_aws( + deployment, + service_key=service_key, + role_arn=role_arn, + role_session_name="hubploy-cluster-auth" + ): subprocess.check_call([ - 'aws', 'eks', 'update-kubeconfig', - '--name', cluster, '--region', region + "aws", "eks", "update-kubeconfig", + "--name", cluster, "--region", region ]) yield -def cluster_auth_azure(deployment, resource_group, cluster, auth_file): +def cluster_auth_azure( + deployment, + resource_group, + cluster, + auth_file + ): """ Azure authentication for AKS @@ -323,37 +368,44 @@ def cluster_auth_azure(deployment, resource_group, cluster, auth_file): cluster: cluster_name auth_file: azure_auth_file.yaml - The azure_service_principal.json file should have the following - keys: appId, tenant, password. This is the format produced - by the az command when creating a service principal. + The azure_service_principal.json file should have the following keys: + appId, tenant, password. + + This is the format produced by the az command when creating a service + principal. """ # parse Azure auth file - auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) + auth_file_path = os.path.join( + "deployments", deployment, "secrets", auth_file + ) with open(auth_file_path) as f: auth = yaml.load(f) # log in subprocess.check_call([ - 'az', 'login', '--service-principal', - '--user', auth['appId'], - '--tenant', auth['tenant'], - '--password', auth['password'] + "az", "login", "--service-principal", + "--user", auth["appId"], + "--tenant", auth["tenant"], + "--password", auth["password"] ]) # get cluster credentials subprocess.check_call([ - 'az', 'aks', 'get-credentials', - '--name', cluster, - '--resource-group', resource_group + "az", "aks", "get-credentials", + "--name", cluster, + "--resource-group", resource_group ]) yield def unset_env_var(env_var, old_env_var_value): """ - If the old environment variable's value exists, replace the current one with the old one - If the old environment variable's value does not exist, delete the current one + If the old environment variable's value exists, replace the current one + with the old one. + + If the old environment variable's value does not exist, delete the current + one. """ if env_var in os.environ: @@ -375,28 +427,28 @@ def decrypt_file(encrypted_path): with open(encrypted_path) as f: _, ext = os.path.splitext(encrypted_path) # Support the (clearly wrong) people who use .yml instead of .yaml - if ext == '.yaml' or ext == '.yml': + if ext == ".yaml" or ext == ".yml": try: encrypted_data = yaml.load(f) except ScannerError: yield encrypted_path return - elif ext == '.json': + elif ext == ".json": try: encrypted_data = json.load(f) except json.JSONDecodeError: yield encrypted_path return - if 'sops' not in encrypted_data: + if "sops" not in encrypted_data: yield encrypted_path return # If file has a `sops` key, we assume it's sops encrypted with tempfile.NamedTemporaryFile() as f: subprocess.check_call([ - 'sops', - '--output', f.name, - '--decrypt', encrypted_path + "sops", + "--output", f.name, + "--decrypt", encrypted_path ]) yield f.name diff --git a/hubploy/config.py b/hubploy/config.py index 45d7075..012159a 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -3,14 +3,13 @@ returns it embedded with a set of LocalImage objects with filesystem paths made absolute. """ +import logging +logger = logging.getLogger(__name__) + import os from ruamel.yaml import YAML -from repo2docker.app import Repo2Docker -import docker - -from . import utils -yaml = YAML(typ='safe') +yaml = YAML(typ="safe") class DeploymentNotFoundError(Exception): def __init__(self, deployment, path, *args, **kwargs): @@ -22,207 +21,112 @@ def __str__(self): return f"deployment {self.deployment} not found at {self.path}" -class LocalImage: +class RemoteImage: """ - A docker image that can be built from a local filesystem source + A simple class to represent a remote image """ - def __init__(self, name, path, repo2docker={}, helm_substitution_path='jupyterhub.singleuser.image'): + def __init__(self, + name, + tag, + helm_substitution_path="jupyterhub.singleuser.image" + ): """ - Create an Image from a local path + Define an Image from the hubploy config name: Fully qualified name of image - path: Absolute path to local directory with image contents - helm_substitution_path: Dot separated path in a helm file that should be populated with this image spec - - Expects cwd to be inside the git repo we are operating in + tag: Tag of image (latest or github hash) + helm_substitution_path: Dot separated path in a helm file that should + be populated with this image spec """ # name must not be empty # FIXME: Validate name to conform to docker image name guidelines - if not name or name.strip() == '': - raise ValueError("Name of image to be built is not specified. Check hubploy.yaml of your deployment") + if not name or name.strip() == "": + raise ValueError( + "Name of image to be built is not specified. Check " + + "hubploy.yaml of your deployment" + ) self.name = name - - self.tag = utils.last_modified_commit(path) - self.path = path - self.repo2docker = repo2docker + self.tag = tag self.helm_substitution_path = helm_substitution_path - self.image_spec = f'{self.name}:{self.tag}' - - # Make r2d object here so we can use it to build & push - self.r2d = Repo2Docker() - self.r2d.subdir = self.path - self.r2d.output_image_spec = self.image_spec - self.r2d.user_id = 1000 - self.r2d.user_name = 'jovyan' - self.r2d.target_repo_dir = '/srv/repo' - if 'base_image' in self.repo2docker: - self.r2d.base_image = repo2docker.get('base_image') - self.r2d.initialize() - - @property - def docker(self): - """ - Return a shared docker client object + self.image_spec = f"{self.name}:{self.tag}" - Creating a docker client object with automatic version - selection can be expensive (since there needs to be an API - request to determien version). So we cache it on a per-class - level. - """ - # FIXME: Is this racey? - if not hasattr(self.__class__, '_docker'): - self.__class__._docker = docker.from_env() - - return self.__class__._docker - - def exists_in_registry(self): - """ - Return true if image exists in registry - """ - try: - image_manifest = self.docker.images.get_registry_data(self.image_spec) - return image_manifest is not None - except docker.errors.ImageNotFound: - return False - except docker.errors.NotFound: - return False - except docker.errors.APIError as e: - # This message seems to vary across registries? - if e.explanation.startswith('manifest unknown: '): - return False - else: - raise - - def get_possible_parent_tags(self, n=16): - """ - List n possible image tags that might be the same image built previously. - - It is much faster to build a new image if we have a list of cached - images that were built from the same source. This forces a rebuild of - only the parts that have changed. - - Since we know how the tags are formed, we try to find upto n tags for - this image that might be possible cache hits - """ - last_tag = None - for i in range(1, n): - # FIXME: Make this look for last modified since before beginning of commit_range - # Otherwise, if there are more than n commits in the current PR that touch this - # local image, we might not get any useful caches - commit_sha = utils.last_modified_commit(self.path, n=i) - # Stop looking for tags if our commit hashes repeat - # This means `git log` is repeating itself - if commit_sha != last_tag: - last_tag = commit_sha - yield commit_sha - - def fetch_parent_image(self): - """ - Prime local image cache by pulling possible parent images. - - Return spec of parent image, or None if no parents could be pulled - """ - for tag in self.get_possible_parent_tags(): - parent_image_spec = f'{self.name}:{tag}' - try: - print(f'Trying to fetch parent image {parent_image_spec}') - self.docker.images.pull(parent_image_spec) - return parent_image_spec - except docker.errors.NotFound: - pass - except docker.errors.APIError: - # FIXME: This is too generic, but a lot of remote repos don't raise NotFound. ECR :() - pass - return None - - def needs_building(self, check_registry=False, commit_range=None): - """ - Return true if image needs to be built. - - One of check_registry or commit_range must be set - """ - if not (check_registry or commit_range): - raise ValueError("One of check_registry or commit_range must be set") - - if check_registry: - return not self.exists_in_registry() - - if commit_range: - return utils.path_touched(self.path, commit_range=commit_range) - - - def build(self, reuse_cache=True): - """ - Build local image with repo2docker - """ - if reuse_cache: - parent_image_spec = self.fetch_parent_image() - if parent_image_spec: - self.r2d.cache_from = [parent_image_spec] - - self.r2d.build() - - def push(self): - self.r2d.push_image() - - - -def get_config(deployment): +def get_config(deployment, debug, verbose): """ - Returns hubploy.yaml configuration as a Python dictionary if it exists for a - given deployment, and also augments it with a set of LocalImage objects in - ["images"]["images"] and updates the images' filesystem paths to be - absolute. + Returns hubploy.yaml configuration as a Python dictionary if it exists for + a given deployment, and also augments it with a set of RemoteImage objects + in ["images"]["images"]. """ - deployment_path = os.path.abspath(os.path.join('deployments', deployment)) + if verbose: + logger.setLevel(logging.INFO) + elif debug: + logger.setLevel(logging.DEBUG) + + deployment_path = os.path.abspath(os.path.join("deployments", deployment)) if not os.path.exists(deployment_path): raise DeploymentNotFoundError(deployment, deployment_path) - config_path = os.path.join(deployment_path, 'hubploy.yaml') + config_path = os.path.join(deployment_path, "hubploy.yaml") with open(config_path) as f: - # If config_path isn't found, this will raise a FileNotFoundError with useful info + # If config_path isn't found, this will raise a FileNotFoundError with + # useful info config = yaml.load(f) - if 'images' in config: - images_config = config['images'] + if "images" in config: + # A single image is being deployed + images_config = config["images"] - if 'image_name' in images_config: - # Only one image is being built - # FIXME: Deprecate after moving other hubploy users to list format + if "image_name" in images_config: + if ":" in images_config["image_name"]: + image_name, tag = images_config["image_name"].split(":") + else: + image_name = images_config["image_name"] + tag = "latest" images = [{ - 'name': images_config['image_name'], - 'path': 'image', + "name": image_name, + "tag": tag }] - if 'image_config_path' in images_config: - images[0]['helm_substitution_path'] = images_config['image_config_path'] else: - # Multiple images are being built - images = images_config['images'] + # Multiple images are being deployed + image_list = images_config["images"] + images = [] + for i in image_list: + if ":" in i["name"]: + image_name, tag = i["name"].split(":") + else: + image_name = i["name"] + tag = "latest" + images.append({ + "name": image_name, + "tag": tag, + }) + config["images"]["images"] = [RemoteImage(**i) for i in images] - for image in images: - # Normalize paths to be absolute paths - image['path'] = os.path.join(deployment_path, image['path']) - - config['images']['images'] = [LocalImage(**i) for i in images] - - # FIXME: Does not currently support multiple images in the images block # Backwards compatibility checker for images block - if config['images']['registry']['provider'] == 'aws' and 'project' in config['images']['registry']['aws']: - config['images']['registry']['aws']['account_id'] = config['images']['registry']['aws']['project'] - del config['images']['registry']['aws']['project'] - - if config['images']['registry']['provider'] == 'aws' and 'zone' in config['images']['registry']['aws']: - config['images']['registry']['aws']['region'] = config['images']['registry']['aws']['zone'] - del config['images']['registry']['aws']['zone'] + if config["images"]["registry"]["provider"] == "aws" and \ + "project" in config["images"]["registry"]["aws"]: + config["images"]["registry"]["aws"]["account_id"] = \ + config["images"]["registry"]["aws"]["project"] + del config["images"]["registry"]["aws"]["project"] + + if config["images"]["registry"]["provider"] == "aws" \ + and "zone" in config["images"]["registry"]["aws"]: + config["images"]["registry"]["aws"]["region"] = \ + config["images"]["registry"]["aws"]["zone"] + del config["images"]["registry"]["aws"]["zone"] # Backwards compatibility checker for cluster block - if config['cluster']['provider'] == 'aws' and 'project' in config['cluster']['aws']: - config['cluster']['aws']['account_id'] = config['cluster']['aws']['project'] - del config['cluster']['aws']['project'] - - if config['cluster']['provider'] == 'aws' and 'zone' in config['cluster']['aws']: - config['cluster']['aws']['region'] = config['cluster']['aws']['zone'] - del config['cluster']['aws']['zone'] - + if config["cluster"]["provider"] == "aws" and \ + "project" in config["cluster"]["aws"]: + config["cluster"]["aws"]["account_id"] = \ + config["cluster"]["aws"]["project"] + del config["cluster"]["aws"]["project"] + + if config["cluster"]["provider"] == "aws" and \ + "zone" in config["cluster"]["aws"]: + config["cluster"]["aws"]["region"] = \ + config["cluster"]["aws"]["zone"] + del config["cluster"]["aws"]["zone"] + + logger.debug(f"Config loaded and parsed: {config}") return config diff --git a/hubploy/helm.py b/hubploy/helm.py index 712716f..8d9c77d 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -6,7 +6,6 @@ chart-name/ (Helm deployment chart) deployments/ - deployment-name - - image/ (optional) - secrets/ - prod.yaml - staging.yaml @@ -18,10 +17,14 @@ Util to deploy a Helm chart (deploy) given hubploy configuration and Helm chart configuration located in accordance to hubploy conventions. """ +import logging +logger = logging.getLogger(__name__) + import itertools +import kubernetes.config import os import subprocess -import kubernetes.config + from contextlib import ExitStack from kubernetes.client import CoreV1Api, rest from kubernetes.client.models import V1Namespace, V1ObjectMeta @@ -29,8 +32,7 @@ from hubploy.config import get_config from hubploy.auth import decrypt_file, cluster_auth - -HELM_EXECUTABLE = os.environ.get('HELM_EXECUTABLE', 'helm') +HELM_EXECUTABLE = os.environ.get("HELM_EXECUTABLE", "helm") def helm_upgrade( @@ -45,15 +47,27 @@ def helm_upgrade( force, atomic, cleanup_on_fail, - debug -): + debug, + verbose, + helm_debug, + dry_run + ): + if verbose: + logger.setLevel(logging.INFO) + elif debug: + logger.setLevel(logging.DEBUG) + + logger.info(f"Deploying {name} in namespace {namespace}") + + logger.debug(f"Running helm dep up in subdirectory '{chart}'") subprocess.check_call([ - HELM_EXECUTABLE, 'dep', 'up' + HELM_EXECUTABLE, "dep", "up" ], cwd=chart) # Create namespace explicitly, since helm3 removes support for it # See https://github.com/helm/helm/issues/6794 # helm2 only creates the namespace if it doesn't exist, so we should be fine + logger.debug("Loading kubeconfig for k8s access") kubeconfig = os.environ.get("KUBECONFIG", None) try: @@ -61,6 +75,9 @@ def helm_upgrade( except: kubernetes.config.load_incluster_config() + logger.debug( + f"Checking for namespace {namespace} and creating if it doesn't exist" + ) api = CoreV1Api() try: api.read_namespace(namespace) @@ -68,36 +85,45 @@ def helm_upgrade( if e.status == 404: # Create namespace print(f"Namespace {namespace} does not exist, creating it...") - api.create_namespace(V1Namespace(metadata=V1ObjectMeta(name=namespace))) + api.create_namespace( + V1Namespace( + metadata=V1ObjectMeta(name=namespace) + ) + ) else: raise cmd = [ HELM_EXECUTABLE, - 'upgrade', - '--wait', - '--install', - '--namespace', namespace, - name, chart, + "upgrade", + "--wait", + "--install", + "--namespace", + namespace, + name, + chart, ] if version: - cmd += ['--version', version] + cmd += ["--version", version] if timeout: - cmd += ['--timeout', timeout] + cmd += ["--timeout", timeout] if force: - cmd += ['--force'] + cmd += ["--force"] if atomic: - cmd += ['--atomic'] + cmd += ["--atomic"] if cleanup_on_fail: - cmd += ['--cleanup-on-fail'] - if debug: - cmd += ['--debug'] - cmd += itertools.chain(*[['-f', cf] for cf in config_files]) - cmd += itertools.chain(*[['--set', v] for v in config_overrides_implicit]) - cmd += itertools.chain(*[['--set-string', v] for v in config_overrides_string]) + cmd += ["--cleanup-on-fail"] + if helm_debug: + cmd += ["--debug"] + if dry_run: + cmd += ["--dry-run"] + cmd += itertools.chain(*[["-f", cf] for cf in config_files]) + cmd += itertools.chain(*[["--set", v] for v in config_overrides_implicit]) + cmd += itertools.chain(*[["--set-string", v] for v in config_overrides_string]) + + logger.debug(f"Running helm upgrade with command: {cmd}") subprocess.check_call(cmd) - def deploy( deployment, chart, @@ -110,8 +136,11 @@ def deploy( force=False, atomic=False, cleanup_on_fail=False, - debug=False -): + debug=False, + verbose=False, + helm_debug=False, + dry_run=False + ): """ Deploy a JupyterHub. @@ -120,58 +149,84 @@ def deploy( {chart}/ (Helm deployment chart) deployments/ - {deployment} - - image/ (optional) - secrets/ - {environment}.yaml - config/ - common.yaml - {environment}.yaml - A docker image from deployments/{deployment}/image is expected to be - already built and available with imagebuilder. + A docker image is expected to have already been built and tagged with + "name" containing the full path to the repo, image name and tag. + `jupyterhub.singleuser.image.tag` will be automatically set to this image tag. """ + if verbose: + logger.setLevel(logging.INFO) + elif debug: + logger.setLevel(logging.DEBUG) + + logger.info(f"Deploying {deployment} to {environment}") + if helm_config_overrides_implicit is None: helm_config_overrides_implicit = [] if helm_config_overrides_string is None: helm_config_overrides_string = [] - config = get_config(deployment) - - name = f'{deployment}-{environment}' + logger.info(f"Getting image and deployment config for {deployment}") + config = get_config(deployment, debug, verbose) + name = f"{deployment}-{environment}" if namespace is None: namespace = name helm_config_files = [f for f in [ - os.path.join('deployments', deployment, 'config', 'common.yaml'), - os.path.join('deployments', deployment, 'config', f'{environment}.yaml'), + os.path.join( + "deployments", deployment, "config", "common.yaml" + ), + os.path.join( + "deployments", deployment, "config", f"{environment}.yaml" + ), ] if os.path.exists(f)] - + logger.debug(f"Using helm config files: {helm_config_files}") helm_secret_files = [f for f in [ # Support for secrets in same repo - os.path.join('deployments', deployment, 'secrets', f'{environment}.yaml'), + os.path.join( + "deployments", deployment, "secrets", f"{environment}.yaml" + ), # Support for secrets in a submodule repo - os.path.join('secrets', 'deployments', deployment, 'secrets', f'{environment}.yaml'), + os.path.join( + "secrets", "deployments", deployment, "secrets", f"{environment}.yaml" + ), ] if os.path.exists(f)] - - - - if config.get('images'): - for image in config['images']['images']: + logger.debug(f"Using helm secret files: {helm_secret_files}") + + if config.get("images"): + for image in config["images"]["images"]: + logger.info( + f"Using image {image.name}:{image.tag} for " + + f"{image.helm_substitution_path}" + ) # We can support other charts that wrap z2jh by allowing various # config paths where we set image tags and names. # We default to one sublevel, but we can do multiple levels. - # With the PANGEO chart, we this could be set to `pangeo.jupyterhub.singleuser.image` - helm_config_overrides_string.append(f'{image.helm_substitution_path}.tag={image.tag}') - helm_config_overrides_string.append(f'{image.helm_substitution_path}.name={image.name}') + helm_config_overrides_string.append( + f"{image.helm_substitution_path}.tag={image.tag}" + ) + helm_config_overrides_string.append( + f"{image.helm_substitution_path}.name={image.name}" + ) with ExitStack() as stack: - decrypted_secret_files = [stack.enter_context(decrypt_file(f)) for f in helm_secret_files] + decrypted_secret_files = [ + stack.enter_context(decrypt_file(f)) for f in helm_secret_files + ] # Just in time for k8s access, activate the cluster credentials - stack.enter_context(cluster_auth(deployment)) + logger.debug(f"Activating cluster credentials for deployment " + + f"{deployment}" + ) + stack.enter_context(cluster_auth(deployment, debug, verbose)) helm_upgrade( name, namespace, @@ -185,4 +240,7 @@ def deploy( atomic, cleanup_on_fail, debug, + verbose, + helm_debug, + dry_run ) From e0efb875592cb547dd725668309df6561b132dac Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 12:44:46 -0700 Subject: [PATCH 02/40] removing unused imports --- hubploy/auth.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 1d7127f..836a1a6 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -11,8 +11,6 @@ import json import os import subprocess -#import shutil -#import pathlib import tempfile import boto3 From 8a92d85d519ae3c22d3710aa02a32717e6e90ea0 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 14:01:41 -0700 Subject: [PATCH 03/40] disable some tests as we arent building or pushing anymore --- tests/test_imagebuilder.py | 119 ++++++++++++++++++------------------- 1 file changed, 59 insertions(+), 60 deletions(-) diff --git a/tests/test_imagebuilder.py b/tests/test_imagebuilder.py index 6752baf..218a90e 100644 --- a/tests/test_imagebuilder.py +++ b/tests/test_imagebuilder.py @@ -84,87 +84,86 @@ def commit_file(repo_dir, path, contents): git(repo_dir, 'commit', '-m', f'Added {path}') -def test_tag_generation(git_repo): - """ - Tag should be last commit of modified image dir - """ - commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') - commit_file(git_repo, 'unrelated/file', 'unrelated') +#def test_tag_generation(git_repo): +# """ +# Tag should be last commit of modified image dir +# """ +# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') +# commit_file(git_repo, 'unrelated/file', 'unrelated') - with cwd(git_repo): - image = config.LocalImage('test-image', 'image') - assert image.tag == utils.last_modified_commit('image') - # Make sure tag isn't influenced by changes outside of iamge dir - assert image.tag != utils.last_modified_commit('unrelated') +# with cwd(git_repo): +# image = config.LocalImage('test-image', 'image') +# assert image.tag == utils.last_modified_commit('image') +# # Make sure tag isn't influenced by changes outside of iamge dir +# assert image.tag != utils.last_modified_commit('unrelated') +# # Change the Dockerfile and see that the tag changes +# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox:latest') +# new_image = config.LocalImage('test-image', 'image') +# assert new_image.tag == utils.last_modified_commit('image') +# assert new_image.tag != image.tag - # Change the Dockerfile and see that the tag changes - commit_file(git_repo, 'image/Dockerfile', 'FROM busybox:latest') - new_image = config.LocalImage('test-image', 'image') - assert new_image.tag == utils.last_modified_commit('image') - assert new_image.tag != image.tag +#def test_build_image(git_repo, local_registry): +# """ +# Test building a small image, pushing it and testing it exists +# """ +# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') -def test_build_image(git_repo, local_registry): - """ - Test building a small image, pushing it and testing it exists - """ - commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') +# with cwd(git_repo): +# image = config.LocalImage(f'{local_registry}/test-build-image', 'image') +# image.build() - with cwd(git_repo): - image = config.LocalImage(f'{local_registry}/test-build-image', 'image') - image.build() +# assert not image.exists_in_registry() - assert not image.exists_in_registry() +# image.push() - image.push() +# assert image.exists_in_registry() - assert image.exists_in_registry() +#def test_parent_image_fetching(git_repo, local_registry): +# """ +# Previous tags of images should be fetched before building new one +# """ +# image_name = f'{local_registry}/parent-image-fetching' -def test_parent_image_fetching(git_repo, local_registry): - """ - Previous tags of images should be fetched before building new one - """ - image_name = f'{local_registry}/parent-image-fetching' - - with cwd(git_repo): +# with cwd(git_repo): # Create an image directory with a simple dockerfile - commit_file(git_repo, 'image/Dockerfile', - """ - FROM busybox - RUN echo 1 > /number - """) - first_image = config.LocalImage(image_name, 'image') - first_image.build() +# commit_file(git_repo, 'image/Dockerfile', +# """ +# FROM busybox +# RUN echo 1 > /number +# """) +# first_image = config.LocalImage(image_name, 'image') +# first_image.build() - # Image shouldn't exist in registry until we push it - assert not first_image.exists_in_registry() - first_image.push() +# # Image shouldn't exist in registry until we push it +# assert not first_image.exists_in_registry() +# first_image.push() - assert first_image.exists_in_registry() +# assert first_image.exists_in_registry() - client = docker.from_env() +# client = docker.from_env() # Remove it locally after pushing it, and make sure it is removed # This lets us test if the pulling actually worked - client.images.remove(first_image.image_spec) +# client.images.remove(first_image.image_spec) - with pytest.raises(docker.errors.ImageNotFound): - client.images.get(first_image.image_spec) +# with pytest.raises(docker.errors.ImageNotFound): +# client.images.get(first_image.image_spec) # Update the image directory - commit_file(git_repo, 'image/Dockerfile', - """ - FROM busybox - RUN echo 2 > /number - """) +# commit_file(git_repo, 'image/Dockerfile', +# """ +# FROM busybox +# RUN echo 2 > /number +# """) - second_image = config.LocalImage(image_name, 'image') +# second_image = config.LocalImage(image_name, 'image') - # We must be able to tell that the first image tag is a possible parent of the second - assert first_image.tag in second_image.get_possible_parent_tags() +# # We must be able to tell that the first image tag is a possible parent of the second +# assert first_image.tag in second_image.get_possible_parent_tags() - # Fetching the parents of the second image should bring the first docker image locally - second_image.fetch_parent_image() - assert client.images.get(first_image.image_spec) \ No newline at end of file +# # Fetching the parents of the second image should bring the first docker image locally +# second_image.fetch_parent_image() +# assert client.images.get(first_image.image_spec) \ No newline at end of file From 88be4783ffe9fbdf94c6c221af78bb97d98d9991 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 14:09:42 -0700 Subject: [PATCH 04/40] update build python --- orb/orb.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orb/orb.yml b/orb/orb.yml index d0ee25f..8395997 100644 --- a/orb/orb.yml +++ b/orb/orb.yml @@ -12,7 +12,7 @@ jobs: default: false docker: - - image: python:3.7-slim-buster + - image: python:3.11.9-slim-bullseye working_directory: ~/repo @@ -25,9 +25,9 @@ jobs: - restore_cache: keys: - - v3.7-dependencies-{{ checksum "requirements.txt" }} + - v3.11-dependencies-{{ checksum "requirements.txt" }} # fallback to using the latest cache if no exact match is found - - v3.7-dependencies- + - v3.11-dependencies- - run: name: install dependencies From 7d5491e15f2dc6583c84483e269383648c750bfb Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 14:31:40 -0700 Subject: [PATCH 05/40] add a little more messaging about tagging --- hubploy/config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hubploy/config.py b/hubploy/config.py index 012159a..31585d1 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -93,9 +93,15 @@ def get_config(deployment, debug, verbose): for i in image_list: if ":" in i["name"]: image_name, tag = i["name"].split(":") + logger.info( + f"Tag for {image_name}: {tag}" + ) else: image_name = i["name"] tag = "latest" + logger.info( + f"No tag specified for {image_name}. Using 'latest'" + ) images.append({ "name": image_name, "tag": tag, From 2ea0e83630d92b6d7779eb15f4c9e2984ea14ecb Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 14:48:18 -0700 Subject: [PATCH 06/40] wording etc --- hubploy/__main__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 77697cc..17e9731 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -14,7 +14,7 @@ def main(): deploy_parser = subparsers.add_parser( "deploy", - help="Deploy a chart to the given environment" + help="Deploy a chart to the given environment." ) deploy_parser.add_argument( @@ -74,7 +74,7 @@ def main(): "-D", "--helm-debug", action="store_true", - help="Helm debug only." + help="Enable Helm debug output." ) argparser.add_argument( "-v", From 758c6ed7b14d1f050bebd2d678a39ff384727aca Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 27 Jun 2024 17:20:18 -0700 Subject: [PATCH 07/40] add ability to override image and tag via the command line --- hubploy/__main__.py | 56 +++++++++++++++++++++++++++++---------------- hubploy/helm.py | 32 ++++++++++++++++++++++++-- 2 files changed, 66 insertions(+), 22 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 17e9731..7155cc8 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -1,6 +1,7 @@ import argparse import logging import sys +import textwrap logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logger = logging.getLogger(__name__) @@ -8,10 +9,31 @@ import hubploy from hubploy import helm, auth, commitrange +from argparse import RawTextHelpFormatter + def main(): - argparser = argparse.ArgumentParser() + argparser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) subparsers = argparser.add_subparsers(dest="command") + argparser.add_argument( + "-d", + "--debug", + action="store_true", + help="Enable tool debug output (not including helm debug)." + ) + argparser.add_argument( + "-D", + "--helm-debug", + action="store_true", + help="Enable Helm debug output." + ) + argparser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output." + ) + deploy_parser = subparsers.add_parser( "deploy", help="Deploy a chart to the given environment." @@ -63,24 +85,16 @@ def main(): help="Dry run the helm upgrade command. This also renders the " + "chart to STDOUT." ) + deploy_parser.add_argument( + "--image-overrides", + nargs="+", + help=textwrap.dedent("""\ + Override one or more images and tags to deploy. Format is:\n + : : ...\n \n - argparser.add_argument( - "-d", - "--debug", - action="store_true", - help="Enable tool debug output (not including helm debug)." - ) - argparser.add_argument( - "-D", - "--helm-debug", - action="store_true", - help="Enable Helm debug output." - ) - argparser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output." + IMPORTANT: The order of images passed in must match the order in which + they appear in hubploy.yaml and separated by spaces without quotes. + """) ) args = argparser.parse_args() @@ -90,7 +104,8 @@ def main(): elif args.debug: logger.setLevel(logging.DEBUG) logger.info(args) - + logger.info(args.image_overrides) + # Attempt to load the config early, fail if it doesn't exist or is invalid try: config = hubploy.config.get_config( @@ -117,7 +132,8 @@ def main(): args.debug, args.verbose, args.helm_debug, - args.dry_run + args.dry_run, + args.image_overrides ) if __name__ == "__main__": diff --git a/hubploy/helm.py b/hubploy/helm.py index 8d9c77d..9a3fc59 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -139,7 +139,8 @@ def deploy( debug=False, verbose=False, helm_debug=False, - dry_run=False + dry_run=False, + image_overrides=None ): """ Deploy a JupyterHub. @@ -202,6 +203,21 @@ def deploy( logger.debug(f"Using helm secret files: {helm_secret_files}") if config.get("images"): + if image_overrides is not None: + num_images = len(config["images"]["images"]) + num_overrides = len(image_overrides) + if num_images != num_overrides: + raise ValueError( + f"Number of image overrides ({num_overrides}) must match " + + f"number of images in hubploy.yaml ({num_images})" + ) + for override in image_overrides: + if ":" not in override: + raise ValueError( + f"Image override must be in the format " + + f":. Got {override}" + ) + count = 0 for image in config["images"]["images"]: logger.info( f"Using image {image.name}:{image.tag} for " + @@ -210,12 +226,23 @@ def deploy( # We can support other charts that wrap z2jh by allowing various # config paths where we set image tags and names. # We default to one sublevel, but we can do multiple levels. + if image_overrides is not None: + override = image_overrides[count] + image_name, tag = override.split(":") + image.name = image_name + image.tag = tag + logger.info( + f"Overriding image {image.helm_substitution_path} to " + + f"{image.name}:{image.tag}" + ) helm_config_overrides_string.append( f"{image.helm_substitution_path}.tag={image.tag}" ) helm_config_overrides_string.append( f"{image.helm_substitution_path}.name={image.name}" ) + count+=1 + print(helm_config_overrides_string) with ExitStack() as stack: decrypted_secret_files = [ @@ -242,5 +269,6 @@ def deploy( debug, verbose, helm_debug, - dry_run + dry_run, + image_overrides ) From acf520983463123c10c32b3a2f9103b8faffbfe5 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 28 Jun 2024 15:09:57 -0700 Subject: [PATCH 08/40] finish up image overrides --- hubploy/__main__.py | 8 +++----- hubploy/config.py | 5 +++-- hubploy/helm.py | 27 +++++++++++++++------------ 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 7155cc8..3c8d796 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -91,7 +91,6 @@ def main(): help=textwrap.dedent("""\ Override one or more images and tags to deploy. Format is:\n : : ...\n \n - IMPORTANT: The order of images passed in must match the order in which they appear in hubploy.yaml and separated by spaces without quotes. """) @@ -104,14 +103,13 @@ def main(): elif args.debug: logger.setLevel(logging.DEBUG) logger.info(args) - logger.info(args.image_overrides) - + # Attempt to load the config early, fail if it doesn't exist or is invalid try: config = hubploy.config.get_config( args.deployment, - args.debug, - args.verbose + debug=False, + verbose=False ) except hubploy.config.DeploymentNotFoundError as e: print(e, file=sys.stderr) diff --git a/hubploy/config.py b/hubploy/config.py index 31585d1..08261a8 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -50,7 +50,7 @@ def __init__(self, self.helm_substitution_path = helm_substitution_path self.image_spec = f"{self.name}:{self.tag}" -def get_config(deployment, debug, verbose): +def get_config(deployment, debug=False, verbose=False): """ Returns hubploy.yaml configuration as a Python dictionary if it exists for a given deployment, and also augments it with a set of RemoteImage objects @@ -72,9 +72,9 @@ def get_config(deployment, debug, verbose): config = yaml.load(f) if "images" in config: - # A single image is being deployed images_config = config["images"] + # A single image is being deployed if "image_name" in images_config: if ":" in images_config["image_name"]: image_name, tag = images_config["image_name"].split(":") @@ -106,6 +106,7 @@ def get_config(deployment, debug, verbose): "name": image_name, "tag": tag, }) + config["images"]["images"] = [RemoteImage(**i) for i in images] # Backwards compatibility checker for images block diff --git a/hubploy/helm.py b/hubploy/helm.py index 9a3fc59..c0059b7 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -204,12 +204,15 @@ def deploy( if config.get("images"): if image_overrides is not None: + print(f"Image overrides found: {image_overrides}") num_images = len(config["images"]["images"]) num_overrides = len(image_overrides) + if num_images != num_overrides: raise ValueError( f"Number of image overrides ({num_overrides}) must match " + - f"number of images in hubploy.yaml ({num_images})" + f"number of images found in " + + f"deployments/{deployment}/hubploy.yaml ({num_images})" ) for override in image_overrides: if ":" not in override: @@ -217,24 +220,25 @@ def deploy( f"Image override must be in the format " + f":. Got {override}" ) + count = 0 for image in config["images"]["images"]: - logger.info( - f"Using image {image.name}:{image.tag} for " + - f"{image.helm_substitution_path}" - ) # We can support other charts that wrap z2jh by allowing various # config paths where we set image tags and names. # We default to one sublevel, but we can do multiple levels. if image_overrides is not None: override = image_overrides[count] - image_name, tag = override.split(":") - image.name = image_name - image.tag = tag - logger.info( - f"Overriding image {image.helm_substitution_path} to " + - f"{image.name}:{image.tag}" + override_image, override_tag = override.split(":") + print( + f"Overriding image {image.name}:{image.tag} to " + + f"{override_image}:{override_tag}" ) + image.name = override_image + image.tag = override_tag + logger.info( + f"Using image {image.name}:{image.tag} for " + + f"{image.helm_substitution_path}" + ) helm_config_overrides_string.append( f"{image.helm_substitution_path}.tag={image.tag}" ) @@ -242,7 +246,6 @@ def deploy( f"{image.helm_substitution_path}.name={image.name}" ) count+=1 - print(helm_config_overrides_string) with ExitStack() as stack: decrypted_secret_files = [ From a665f21fdd71ced420f49455e40774c59fe500e9 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 28 Jun 2024 15:19:15 -0700 Subject: [PATCH 09/40] more error checking --- hubploy/helm.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/hubploy/helm.py b/hubploy/helm.py index c0059b7..911b553 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -228,7 +228,14 @@ def deploy( # We default to one sublevel, but we can do multiple levels. if image_overrides is not None: override = image_overrides[count] - override_image, override_tag = override.split(":") + try: + override_image, override_tag = override.split(":") + except: + print( + f"ERROR: You must specify a tag when overriding images: {override}" + ) + exit(1) + print( f"Overriding image {image.name}:{image.tag} to " + f"{override_image}:{override_tag}" From 36f4db690ccc81b1e146b4d41daa620dec1e75b3 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 28 Jun 2024 15:22:49 -0700 Subject: [PATCH 10/40] belay that, i was already error checking XD --- hubploy/helm.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/hubploy/helm.py b/hubploy/helm.py index 911b553..b26172d 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -218,7 +218,7 @@ def deploy( if ":" not in override: raise ValueError( f"Image override must be in the format " + - f":. Got {override}" + f":. Got: {override}" ) count = 0 @@ -228,14 +228,6 @@ def deploy( # We default to one sublevel, but we can do multiple levels. if image_overrides is not None: override = image_overrides[count] - try: - override_image, override_tag = override.split(":") - except: - print( - f"ERROR: You must specify a tag when overriding images: {override}" - ) - exit(1) - print( f"Overriding image {image.name}:{image.tag} to " + f"{override_image}:{override_tag}" From 355ecfde0b1a5985979ded173dc2c8a666062dcb Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 29 Jun 2024 10:21:39 -0700 Subject: [PATCH 11/40] be clear about tags w/overrides --- hubploy/__main__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 3c8d796..3af8f05 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -92,7 +92,8 @@ def main(): Override one or more images and tags to deploy. Format is:\n : : ...\n \n IMPORTANT: The order of images passed in must match the order in which - they appear in hubploy.yaml and separated by spaces without quotes. + they appear in hubploy.yaml and separated by spaces without quotes. You + must always specify a tag when overriding images. """) ) From 1541cc1e90606416225a2447c23d00480de62c5f Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 29 Jun 2024 10:34:19 -0700 Subject: [PATCH 12/40] the old deploy method is not needed any more --- .github/workflows/docker-push.yaml | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 .github/workflows/docker-push.yaml diff --git a/.github/workflows/docker-push.yaml b/.github/workflows/docker-push.yaml deleted file mode 100644 index 2884f06..0000000 --- a/.github/workflows/docker-push.yaml +++ /dev/null @@ -1,14 +0,0 @@ -name: Publish Docker -on: [push] -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Publish to Registry - uses: elgohr/Publish-Docker-Github-Action@v5 - with: - name: yuvipanda/hubploy - snapshot: true - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file From bf0c963fa2d1e453d954ef8772d43f4fcfa66fe9 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 29 Jun 2024 10:36:32 -0700 Subject: [PATCH 13/40] utils.py is no longer needed --- hubploy/utils.py | 77 ------------------------------------------------ 1 file changed, 77 deletions(-) delete mode 100644 hubploy/utils.py diff --git a/hubploy/utils.py b/hubploy/utils.py deleted file mode 100644 index 5fe1060..0000000 --- a/hubploy/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Utils to extract information (last_modified_commit, path_touched) from the git -history. -""" -import subprocess - - -def first_alpha(s): - """ - Returns the length of the shortest substring of the input that - contains an alpha character. - """ - for i, c in enumerate(s): - if c.isalpha(): - return i + 1 - raise Exception("No alpha characters in string: {}".format(s)) - - -def substring_with_alpha(s, min_len=7): - """ - Returns the shortest substring of the input that - contains an alpha character. - - Used to avoid helm/go bug that converts a string with all digit - characters into an exponential. - """ - return s[:max(min_len, first_alpha(s))] - - -def last_modified_commit(*paths, n=1, **kwargs): - """Get the last commit to modify the given paths""" - commit_hash = subprocess.check_output([ - 'git', - 'log', - '-n', str(n), - '--pretty=format:%H', - '--', - *paths - ], **kwargs).decode('utf-8').split('\n')[-1] - return substring_with_alpha(commit_hash) - - -def last_modified_date(*paths, **kwargs): - """Return the last modified date (as a string) for the given paths""" - return subprocess.check_output([ - 'git', - 'log', - '-n', '1', - '--pretty=format:%cd', - '--date=iso', - '--', - *paths - ], **kwargs).decode('utf-8') - - -def path_touched(*paths, commit_range): - """Return whether the given paths have been changed in the commit range - - Used to determine if a build is necessary - - Args: - *paths (str): - paths to check for changes - commit_range (str): - range of commits to check if paths have changed - """ - return subprocess.check_output([ - 'git', 'diff', '--name-only', commit_range, '--', *paths - ]).decode('utf-8').strip() != '' - - -def is_commit(ref): - try: - subprocess.check_call(['git', 'cat-file', 'commit', ref]) - return True - except subprocess.CalledProcessError: - return False From 4e72251e66c32590c4b27fd291f679d18984c6b1 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 29 Jun 2024 10:37:22 -0700 Subject: [PATCH 14/40] commitrange.py is no longer needed --- hubploy/commitrange.py | 44 ------------------------------------------ 1 file changed, 44 deletions(-) delete mode 100644 hubploy/commitrange.py diff --git a/hubploy/commitrange.py b/hubploy/commitrange.py deleted file mode 100644 index 85052bd..0000000 --- a/hubploy/commitrange.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Util to acquire a git commit range (get_commit_range) that represents the -changes that have triggered a certain CI system to run. - -Current CI systems supported: GitHub Actions. -""" -import os -import json - -from hubploy.utils import is_commit - -def get_commit_range(): - """ - Auto detect commit range and return it if we can. - Else return None - """ - if 'GITHUB_ACTIONS' in os.environ: - return get_commit_range_github() - - -def get_commit_range_github(): - """ - Auto detects commit range for pull requests and pushes from within a GitHub - Action job using environment variables and .json file describing the event - triggering the job. - - About env vars: https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables - About event file: https://developer.github.com/webhooks/event-payloads/ - """ - with open(os.environ['GITHUB_EVENT_PATH']) as f: - event = json.load(f) - - # pull_request ref: https://developer.github.com/webhooks/event-payloads/#pull_request - if 'pull_request' in event: - base = event['pull_request']['base']['sha'] - return f'{base}...HEAD' - - # push ref: https://developer.github.com/webhooks/event-payloads/#push - if 'before' in event: - if not is_commit(event['before']): - print(f"A GitHub Actions environment was detected, but the constructed commit range ({event['before']}...HEAD) was invalid. This can happen if a git push --force has been run.") - return None - else: - return f"{event['before']}...HEAD" From 22cc0d79986b771a1d1291af1608de6c65a29999 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 29 Jun 2024 10:39:28 -0700 Subject: [PATCH 15/40] removing unnecessary imports --- hubploy/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 3af8f05..11c58f8 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) import hubploy -from hubploy import helm, auth, commitrange +from hubploy import helm from argparse import RawTextHelpFormatter From 8bdaea7776d0d600d2a52501c480a2e34ab8d1aa Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 1 Jul 2024 10:28:29 -0700 Subject: [PATCH 16/40] every CLI arg now has help text! --- hubploy/__main__.py | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 11c58f8..06d4cb3 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -40,32 +40,48 @@ def main(): ) deploy_parser.add_argument( - "deployment" + "deployment", + help="The name of the hub to deploy." ) deploy_parser.add_argument( - "chart" + "chart", + help="The path to the main hub chart." ) deploy_parser.add_argument( "environment", - choices=["develop", "staging", "prod"] + choices=["develop", "staging", "prod"], + help="The environment to deploy to." ) deploy_parser.add_argument( "--namespace", - default=None + default=None, + help="Helm option: the namespace to deploy to. If not specified, " + + "the namespace will be derived from the environment" ) deploy_parser.add_argument( "--set", action="append", + help="Helm option: set values on the command line (can specify " + + "multiple or separate values with commas: key1=val1,key2=val2)" ) deploy_parser.add_argument( "--set-string", action="append", + help="Helm option: set STRING values on the command line (can " + + "specify multiple or separate values with commas: key1=val1,key2=val2)" ) deploy_parser.add_argument( "--version", + help="Helm option: specify a version constraint for the chart " + + "version to use. This constraint can be a specific tag (e.g. 1.1.1) " + + "or it may reference a valid range (e.g. ^2.0.0). If this is not " + + "specified, the latest version is used." ) deploy_parser.add_argument( - "--timeout" + "--timeout", + help="Helm option: time in seconds to wait for any individual " + + "Kubernetes operation (like Jobs for hooks, etc). Defaults to 300 " + + "seconds." ) deploy_parser.add_argument( "--force", @@ -73,11 +89,16 @@ def main(): ) deploy_parser.add_argument( "--atomic", - action="store_true" + action="store_true", + help="Helm option: if set, upgrade process rolls back changes made " + + "in case of failed upgrade. The --wait flag will be set automatically " + + "if --atomic is used." ) deploy_parser.add_argument( "--cleanup-on-fail", - action="store_true" + action="store_true", + help="Helm option: allow deletion of new resources created in this " + + "upgrade when upgrade fails." ) deploy_parser.add_argument( "--dry-run", @@ -88,13 +109,15 @@ def main(): deploy_parser.add_argument( "--image-overrides", nargs="+", - help=textwrap.dedent("""\ + help=textwrap.dedent( + """\ Override one or more images and tags to deploy. Format is:\n : : ...\n \n IMPORTANT: The order of images passed in must match the order in which they appear in hubploy.yaml and separated by spaces without quotes. You must always specify a tag when overriding images. - """) + """ + ) ) args = argparser.parse_args() From 0cd4295975901dcf70911a6194e6b0d6a7e7a24a Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 1 Jul 2024 10:36:49 -0700 Subject: [PATCH 17/40] reorder imports --- hubploy/auth.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 836a1a6..d67cfeb 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -14,13 +14,12 @@ import tempfile import boto3 -from hubploy.config import get_config from contextlib import contextmanager - +from hubploy.config import get_config from ruamel.yaml import YAML from ruamel.yaml.scanner import ScannerError -yaml = YAML(typ="rt") +yaml = YAML(typ="rt") @contextmanager def registry_auth(deployment, push, check_registry): From 3b3b5fc34ef1ec68d4436281dcb35b8207aeaa0b Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 1 Jul 2024 11:56:56 -0700 Subject: [PATCH 18/40] ran through ruff delinter and fixed everything --- hubploy/__main__.py | 13 ++++++++----- hubploy/auth.py | 7 +++---- hubploy/config.py | 3 +-- hubploy/helm.py | 26 +++++++++++++------------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 06d4cb3..fd26f51 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -1,16 +1,15 @@ import argparse +import hubploy import logging import sys import textwrap -logging.basicConfig(stream=sys.stdout, level=logging.WARNING) -logger = logging.getLogger(__name__) - -import hubploy from hubploy import helm - from argparse import RawTextHelpFormatter +logging.basicConfig(stream=sys.stdout, level=logging.WARNING) +logger = logging.getLogger(__name__) + def main(): argparser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) subparsers = argparser.add_subparsers(dest="command") @@ -135,6 +134,10 @@ def main(): debug=False, verbose=False ) + if not config: + raise hubploy.config.DeploymentNotFoundError( + "Deployment '{}' not found in hubploy.yaml".format(args.deployment) + ) except hubploy.config.DeploymentNotFoundError as e: print(e, file=sys.stderr) sys.exit(1) diff --git a/hubploy/auth.py b/hubploy/auth.py index d67cfeb..499fa8a 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -5,20 +5,19 @@ Current cloud providers supported: gcloud, aws, and azure. """ -import logging -logger = logging.getLogger(__name__) - +import boto3 import json +import logging import os import subprocess import tempfile -import boto3 from contextlib import contextmanager from hubploy.config import get_config from ruamel.yaml import YAML from ruamel.yaml.scanner import ScannerError +logger = logging.getLogger(__name__) yaml = YAML(typ="rt") @contextmanager diff --git a/hubploy/config.py b/hubploy/config.py index 08261a8..74cfe38 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -4,11 +4,10 @@ absolute. """ import logging -logger = logging.getLogger(__name__) - import os from ruamel.yaml import YAML +logger = logging.getLogger(__name__) yaml = YAML(typ="safe") class DeploymentNotFoundError(Exception): diff --git a/hubploy/helm.py b/hubploy/helm.py index b26172d..a5d1274 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -17,11 +17,9 @@ Util to deploy a Helm chart (deploy) given hubploy configuration and Helm chart configuration located in accordance to hubploy conventions. """ -import logging -logger = logging.getLogger(__name__) - import itertools import kubernetes.config +import logging import os import subprocess @@ -32,6 +30,7 @@ from hubploy.config import get_config from hubploy.auth import decrypt_file, cluster_auth +logger = logging.getLogger(__name__) HELM_EXECUTABLE = os.environ.get("HELM_EXECUTABLE", "helm") @@ -58,7 +57,6 @@ def helm_upgrade( logger.setLevel(logging.DEBUG) logger.info(f"Deploying {name} in namespace {namespace}") - logger.debug(f"Running helm dep up in subdirectory '{chart}'") subprocess.check_call([ HELM_EXECUTABLE, "dep", "up" @@ -67,14 +65,16 @@ def helm_upgrade( # Create namespace explicitly, since helm3 removes support for it # See https://github.com/helm/helm/issues/6794 # helm2 only creates the namespace if it doesn't exist, so we should be fine - logger.debug("Loading kubeconfig for k8s access") kubeconfig = os.environ.get("KUBECONFIG", None) - + logger.debug("Loading kubeconfig for k8s access") try: kubernetes.config.load_kube_config(config_file=kubeconfig) - except: + logger.info(f"Loaded kubeconfig: {kubeconfig}") + except Exception as e: + logger.info(f"Failed to load kubeconfig {kubeconfig} with " + + f"exception:\n{e}\nTrying in-cluster config...") kubernetes.config.load_incluster_config() - + logger.info("Loaded in-cluster kubeconfig") logger.debug( f"Checking for namespace {namespace} and creating if it doesn't exist" ) @@ -211,13 +211,13 @@ def deploy( if num_images != num_overrides: raise ValueError( f"Number of image overrides ({num_overrides}) must match " + - f"number of images found in " + + "number of images found in " + f"deployments/{deployment}/hubploy.yaml ({num_images})" ) for override in image_overrides: if ":" not in override: raise ValueError( - f"Image override must be in the format " + + "Image override must be in the format " + f":. Got: {override}" ) @@ -228,6 +228,7 @@ def deploy( # We default to one sublevel, but we can do multiple levels. if image_overrides is not None: override = image_overrides[count] + override_image, override_tag = override.split(":") print( f"Overriding image {image.name}:{image.tag} to " + f"{override_image}:{override_tag}" @@ -252,7 +253,7 @@ def deploy( ] # Just in time for k8s access, activate the cluster credentials - logger.debug(f"Activating cluster credentials for deployment " + + logger.debug("Activating cluster credentials for deployment " + f"{deployment}" ) stack.enter_context(cluster_auth(deployment, debug, verbose)) @@ -271,6 +272,5 @@ def deploy( debug, verbose, helm_debug, - dry_run, - image_overrides + dry_run ) From f0d2bc1ed333cac7f9ae1647cb6ebd6d0cc382eb Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 1 Jul 2024 12:11:45 -0700 Subject: [PATCH 19/40] minor formatting of help strings --- hubploy/__main__.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index fd26f51..d4fd15b 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -54,8 +54,8 @@ def main(): deploy_parser.add_argument( "--namespace", default=None, - help="Helm option: the namespace to deploy to. If not specified, " + - "the namespace will be derived from the environment" + help="Helm option: the namespace to deploy to. If not specified, " + + "the namespace will be derived from the environment argument." ) deploy_parser.add_argument( "--set", @@ -66,19 +66,19 @@ def main(): deploy_parser.add_argument( "--set-string", action="append", - help="Helm option: set STRING values on the command line (can " + + help="Helm option: set STRING values on the command line (can " + "specify multiple or separate values with commas: key1=val1,key2=val2)" ) deploy_parser.add_argument( "--version", - help="Helm option: specify a version constraint for the chart " + + help="Helm option: specify a version constraint for the chart " + "version to use. This constraint can be a specific tag (e.g. 1.1.1) " + "or it may reference a valid range (e.g. ^2.0.0). If this is not " + "specified, the latest version is used." ) deploy_parser.add_argument( "--timeout", - help="Helm option: time in seconds to wait for any individual " + + help="Helm option: time in seconds to wait for any individual " + "Kubernetes operation (like Jobs for hooks, etc). Defaults to 300 " + "seconds." ) @@ -89,14 +89,14 @@ def main(): deploy_parser.add_argument( "--atomic", action="store_true", - help="Helm option: if set, upgrade process rolls back changes made " + + help="Helm option: if set, upgrade process rolls back changes made " + "in case of failed upgrade. The --wait flag will be set automatically " + "if --atomic is used." ) deploy_parser.add_argument( "--cleanup-on-fail", action="store_true", - help="Helm option: allow deletion of new resources created in this " + + help="Helm option: allow deletion of new resources created in this " + "upgrade when upgrade fails." ) deploy_parser.add_argument( From 1c2f459031d02bf36eb2da6c2732f0908115f25f Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 1 Jul 2024 15:11:03 -0700 Subject: [PATCH 20/40] add help text for --force --- hubploy/__main__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index d4fd15b..61efa3e 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -84,7 +84,8 @@ def main(): ) deploy_parser.add_argument( "--force", - action="store_true" + action="store_true", + help="Helm option: force resource updates through a replacement strategy." ) deploy_parser.add_argument( "--atomic", From cc534ccfcdb3d330534854fd445c81fcf79c3206 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Tue, 2 Jul 2024 14:27:07 -0700 Subject: [PATCH 21/40] bump version, and udpate package metadata --- setup.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 7098e7a..ad2c81b 100644 --- a/setup.py +++ b/setup.py @@ -2,13 +2,11 @@ setuptools.setup( name='hubploy', - version='0.3', - url="https://github.com/yuvipanda/hubploy", - author="Yuvi Panda", + version='0.4', + url="https://github.com/berkeley-dsep-infra/hubploy", + author="Shane Knapp", packages=setuptools.find_packages(), install_requires=[ - 'docker', - 'jupyter-repo2docker>=0.11', 'kubernetes', 'boto3' ], From fa7122f142081593b72cd87c5a66f776ce8fcd5c Mon Sep 17 00:00:00 2001 From: shane knapp Date: Wed, 3 Jul 2024 11:19:11 -0700 Subject: [PATCH 22/40] remove all tests and circleci workflows --- .circleci/config.yml | 55 ------------ .codecov.yml | 2 - Dockerfile | 56 ------------ orb/orb.yml | 102 ---------------------- tests/test_imagebuilder.py | 169 ------------------------------------- 5 files changed, 384 deletions(-) delete mode 100644 .circleci/config.yml delete mode 100644 .codecov.yml delete mode 100644 Dockerfile delete mode 100644 orb/orb.yml delete mode 100644 tests/test_imagebuilder.py diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index cdfd272..0000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: 2.1 -orbs: - orb-tools: circleci/orb-tools@8.27.5 -jobs: - build: - machine: - image: ubuntu-2204:2023.07.2 - - working_directory: ~/repo - - steps: - # We are explictly not caching any of the dependencies / venv - # pyenv + venv + caching don't seem to get along well. - # See https://circleci.com/gh/yuvipanda/hubploy/154?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link - # for an example, causing a setuptools version mismatch - - checkout - - - run: - name: setup dependencies - command: | - sudo apt-get install tk-dev - pyenv install 3.11.0 - pyenv global 3.11.0 - python3 -m venv venv - source venv/bin/activate - pip install --upgrade pip setuptools - pip install -r dev-requirements.txt - pip install -e . - git config --global user.email "ci@circleci" - git config --global user.name "ci" - - - run: - name: run tests - command: | - source venv/bin/activate - mkdir -p test-reports/unit - py.test --cov=hubploy --junitxml=test-reports/unit/results.xml tests/ || true - - - run: - name: upload coverage info to codecov - command: | - source venv/bin/activate - codecov || true - - - store_test_results: - path: test-reports - -workflows: - test-code: - jobs: - - build - test-orb: - jobs: - - orb-tools/lint: - lint-dir: orb/ diff --git a/.codecov.yml b/.codecov.yml deleted file mode 100644 index 462dc90..0000000 --- a/.codecov.yml +++ /dev/null @@ -1,2 +0,0 @@ -# show coverage in CI status, not as a comment. -comment: off diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index b9ae13a..0000000 --- a/Dockerfile +++ /dev/null @@ -1,56 +0,0 @@ -FROM python:3.11-slim-buster - -# Software in Dockerfile to manually bump versions on: -# - gcloud: https://cloud.google.com/sdk/docs/downloads-versioned-archives -# - helm: https://github.com/helm/helm/releases -# - sops: https://github.com/mozilla/sops/releases - -RUN apt-get update \ - && apt-get install --yes --no-install-recommends \ - amazon-ecr-credential-helper \ - curl \ - file \ - git \ - git-crypt \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# Install gcloud CLI -# Force gcloud to run on python3 ugh -ENV CLOUDSDK_PYTHON python3 -ENV PATH=/opt/google-cloud-sdk/bin:${PATH} -RUN curl -sSL https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-298.0.0-linux-x86_64.tar.gz | tar -xzf - -C /opt/ - -# Install aws CLI -ENV PATH=/opt/awscli/bin:${PATH} -RUN cd /tmp && \ - curl -sSL https://d1vvhvl2y92vvt.cloudfront.net/awscli-exe-linux-x86_64.zip -o awscliv2.zip && \ - unzip -qq awscliv2.zip && \ - ./aws/install -i /opt/awscli -b /opt/awscli/bin - -# Install SOPS -RUN cd /tmp && \ - curl -sSL https://github.com/mozilla/sops/releases/download/v3.6.1/sops_3.6.1_amd64.deb -o sops.deb && \ - dpkg -i ./sops.deb && \ - rm sops.deb - -# Download helm v2/v3 to helm2/helm3 and symlink helm2 to helm. Make hubploy use -# a specific binary with HELM_EXECUTABLE environment variable. -RUN cd /tmp && mkdir helm && \ - curl -sSL https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz | tar -xzf - -C helm && \ - mv helm/linux-amd64/helm /usr/local/bin/helm2 && \ - curl -sSL https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz | tar -xzf - -C helm && \ - mv helm/linux-amd64/helm /usr/local/bin/helm3 && \ - rm -rf helm && \ - ln -s /usr/local/bin/helm2 /usr/local/bin/helm - -# Setup a virtual environment -ENV VENV_PATH=/opt/venv -ENV PATH=${VENV_PATH}:${PATH} -RUN python3 -m venv ${VENV_PATH} - -# Install hubploy -COPY . /srv/repo -RUN python3 -m pip install --no-cache-dir /srv/repo - -ENTRYPOINT ["hubploy"] diff --git a/orb/orb.yml b/orb/orb.yml deleted file mode 100644 index 8395997..0000000 --- a/orb/orb.yml +++ /dev/null @@ -1,102 +0,0 @@ -version: 2.1 -description: "Hubploy orb" - -jobs: - build-image: - description: "Build an image via hubploy" - parameters: - deployment: - type: string - push: - type: boolean - default: false - - docker: - - image: python:3.11.9-slim-bullseye - - working_directory: ~/repo - - steps: - - checkout - - run: - name: Install git, git-crypt & curl - command: | - apt-get update && apt-get install --yes --no-install-recommends git curl git-crypt - - - restore_cache: - keys: - - v3.11-dependencies-{{ checksum "requirements.txt" }} - # fallback to using the latest cache if no exact match is found - - v3.11-dependencies- - - - run: - name: install dependencies - command: | - python3 -m venv venv - source venv/bin/activate - pip install --upgrade -r requirements.txt - echo 'export PATH="${HOME}/repo/venv/bin:$PATH"' >> ${BASH_ENV} - - - unless: - condition: << parameters.push >> - steps: - - run: - name: Determine range of commits we are building - command: | - # CircleCI doesn't have equivalent to Travis' COMMIT_RANGE - COMMIT_RANGE=$(./.circleci/get-commit-range.py) - echo ${COMMIT_RANGE} - echo "export COMMIT_RANGE='${COMMIT_RANGE}'" >> ${BASH_ENV} - - - when: - condition: << parameters.push >> - steps: - - run: - name: Install google cloud sdk - command: | - curl -sSL https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-265.0.0-linux-x86_64.tar.gz | tar -xzf - - # Be careful with quote ordering here. ${PATH} must not be expanded - # Don't use ~ here - bash can interpret PATHs containing ~, but most other things can't. - # Always use full PATHs in PATH! - echo 'export PATH="${HOME}/repo/google-cloud-sdk/bin:${PATH}"' >> ${BASH_ENV} - # Try to tell cloud sdk to use python3 - echo 'export CLOUDSDK_PYTHON=python3' >> ${BASH_ENV} - - run: - name: Install AWS CLI - command: | - cd /tmp - curl "https://d1vvhvl2y92vvt.cloudfront.net/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" - unzip awscliv2.zip - ./aws/install -i ${HOME}/awscli -b ${HOME}/awscli/bin - # Be careful with quote ordering here. ${PATH} must not be expanded - # Don't use ~ here - bash can interpret PATHs containing ~, but most other things can't. - # Always use full PATHs in PATH! - echo 'export PATH="${HOME}/awscli/bin:${PATH}"' >> ${BASH_ENV} - - - - setup_remote_docker - - save_cache: - paths: - - ./venv - key: v3.7-dependencies-{{ checksum "requirements.txt" }} - - - when: - condition: << parameters.push >> - steps: - - run: - name: Unlock our secrets - command: | - echo "${GIT_CRYPT_KEY}" | base64 -d > ~/repo/key - git crypt unlock ~/repo/key - rm ~/repo/key - - - run: - name: Build image if needed - command: | - if [ "<< parameters.push >>" == "true" ]; then - HUBPLOY_ARGS="--check-registry --push" - else - HUBPLOY_ARGS="--commit-range ${COMMIT_RANGE}" - fi - hubploy build << parameters.deployment >> ${HUBPLOY_ARGS} - no_output_timeout: 60m diff --git a/tests/test_imagebuilder.py b/tests/test_imagebuilder.py deleted file mode 100644 index 218a90e..0000000 --- a/tests/test_imagebuilder.py +++ /dev/null @@ -1,169 +0,0 @@ -import socket -import tempfile -import contextlib -import pathlib -import os -import pytest -import subprocess -import docker -import time -import docker.errors - -from hubploy import config, utils - - -@pytest.fixture -def git_repo(): - """ - Fixture to create a git repo - """ - with tempfile.TemporaryDirectory() as d: - subprocess.check_output(['git', 'init'], cwd=d) - yield pathlib.Path(d) - - -def git(repo_dir, *cmd): - with cwd(repo_dir): - subprocess.check_call(['git'] + list(cmd)) - - -@pytest.fixture -def open_port(): - """ - Fixture providing an open port on the host system - """ - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.bind(("",0)) - return s.getsockname()[1] - finally: - s.close() - - -@pytest.fixture -def local_registry(open_port): - """ - Fixture to create a local docker registry - """ - if 'DOCKER_REGISTRY' in os.environ: - # We are running in CI, where we already have a local registry - yield os.environ['DOCKER_REGISTRY'] - return - client = docker.from_env() - container = client.containers.run( - 'registry:2', - detach=True, - ports={'5000/tcp': open_port} - ) - time.sleep(2) - try: - yield f'localhost:{open_port}' - finally: - container.stop() - container.remove() - - -@contextlib.contextmanager -def cwd(new_dir): - curdir = os.getcwd() - try: - os.chdir(new_dir) - yield - finally: - os.chdir(curdir) - - -def commit_file(repo_dir, path, contents): - full_path = repo_dir / path - os.makedirs(os.path.dirname(full_path), exist_ok=True) - - with open(full_path, 'w') as f: - f.write(contents) - - git(repo_dir, 'add', path) - git(repo_dir, 'commit', '-m', f'Added {path}') - - -#def test_tag_generation(git_repo): -# """ -# Tag should be last commit of modified image dir -# """ -# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') -# commit_file(git_repo, 'unrelated/file', 'unrelated') - -# with cwd(git_repo): -# image = config.LocalImage('test-image', 'image') -# assert image.tag == utils.last_modified_commit('image') -# # Make sure tag isn't influenced by changes outside of iamge dir -# assert image.tag != utils.last_modified_commit('unrelated') - -# # Change the Dockerfile and see that the tag changes -# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox:latest') -# new_image = config.LocalImage('test-image', 'image') -# assert new_image.tag == utils.last_modified_commit('image') -# assert new_image.tag != image.tag - - -#def test_build_image(git_repo, local_registry): -# """ -# Test building a small image, pushing it and testing it exists -# """ -# commit_file(git_repo, 'image/Dockerfile', 'FROM busybox') - -# with cwd(git_repo): -# image = config.LocalImage(f'{local_registry}/test-build-image', 'image') -# image.build() - -# assert not image.exists_in_registry() - -# image.push() - -# assert image.exists_in_registry() - - -#def test_parent_image_fetching(git_repo, local_registry): -# """ -# Previous tags of images should be fetched before building new one -# """ -# image_name = f'{local_registry}/parent-image-fetching' - -# with cwd(git_repo): - # Create an image directory with a simple dockerfile -# commit_file(git_repo, 'image/Dockerfile', -# """ -# FROM busybox -# RUN echo 1 > /number -# """) -# first_image = config.LocalImage(image_name, 'image') -# first_image.build() - -# # Image shouldn't exist in registry until we push it -# assert not first_image.exists_in_registry() -# first_image.push() - -# assert first_image.exists_in_registry() - -# client = docker.from_env() - - # Remove it locally after pushing it, and make sure it is removed - # This lets us test if the pulling actually worked -# client.images.remove(first_image.image_spec) - -# with pytest.raises(docker.errors.ImageNotFound): -# client.images.get(first_image.image_spec) - - # Update the image directory -# commit_file(git_repo, 'image/Dockerfile', -# """ -# FROM busybox -# RUN echo 2 > /number -# """) - -# second_image = config.LocalImage(image_name, 'image') - -# # We must be able to tell that the first image tag is a possible parent of the second -# assert first_image.tag in second_image.get_possible_parent_tags() - -# # Fetching the parents of the second image should bring the first docker image locally -# second_image.fetch_parent_image() -# assert client.images.get(first_image.image_spec) \ No newline at end of file From c3e5b43ce6de210b6adf6319469f6b929416eeca Mon Sep 17 00:00:00 2001 From: shane knapp Date: Wed, 7 Aug 2024 09:53:30 -0700 Subject: [PATCH 23/40] better handling of no image tags --- hubploy/config.py | 35 +++++++++++++++++------------------ hubploy/helm.py | 32 ++++++++++++++++++++++---------- 2 files changed, 39 insertions(+), 28 deletions(-) diff --git a/hubploy/config.py b/hubploy/config.py index 74cfe38..e651262 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -26,14 +26,14 @@ class RemoteImage: """ def __init__(self, name, - tag, + tag=None, helm_substitution_path="jupyterhub.singleuser.image" ): """ Define an Image from the hubploy config name: Fully qualified name of image - tag: Tag of image (latest or github hash) + tag: Tag of image (github hash) helm_substitution_path: Dot separated path in a helm file that should be populated with this image spec """ @@ -47,7 +47,11 @@ def __init__(self, self.name = name self.tag = tag self.helm_substitution_path = helm_substitution_path - self.image_spec = f"{self.name}:{self.tag}" + + if self.tag is None: + self.image_spec = f"{self.name}" + else: + self.image_spec = f"{self.name}:{self.tag}" def get_config(deployment, debug=False, verbose=False): """ @@ -77,13 +81,12 @@ def get_config(deployment, debug=False, verbose=False): if "image_name" in images_config: if ":" in images_config["image_name"]: image_name, tag = images_config["image_name"].split(":") + images = [{ + "name": image_name, + "tag": tag + }] else: - image_name = images_config["image_name"] - tag = "latest" - images = [{ - "name": image_name, - "tag": tag - }] + images = [{"name": images_config["image_name"]}] else: # Multiple images are being deployed @@ -95,16 +98,12 @@ def get_config(deployment, debug=False, verbose=False): logger.info( f"Tag for {image_name}: {tag}" ) + images.append({ + "name": image_name, + "tag": tag, + }) else: - image_name = i["name"] - tag = "latest" - logger.info( - f"No tag specified for {image_name}. Using 'latest'" - ) - images.append({ - "name": image_name, - "tag": tag, - }) + images.append({"name": i["name"]}) config["images"]["images"] = [RemoteImage(**i) for i in images] diff --git a/hubploy/helm.py b/hubploy/helm.py index a5d1274..dabbed2 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -235,16 +235,28 @@ def deploy( ) image.name = override_image image.tag = override_tag - logger.info( - f"Using image {image.name}:{image.tag} for " + - f"{image.helm_substitution_path}" - ) - helm_config_overrides_string.append( - f"{image.helm_substitution_path}.tag={image.tag}" - ) - helm_config_overrides_string.append( - f"{image.helm_substitution_path}.name={image.name}" - ) + + if image.tag is not None: + logger.info( + f"Using image {image.name}:{image.tag} for " + + f"{image.helm_substitution_path}" + ) + helm_config_overrides_string.append( + f"{image.helm_substitution_path}.tag={image.tag}" + ) + helm_config_overrides_string.append( + f"{image.helm_substitution_path}.name={image.name}" + ) + else: + logger.info( + f"Using image {image.name} for " + + f"{image.helm_substitution_path}" + ) + helm_config_overrides_string.append( + f"{image.helm_substitution_path}.name={image.name}" + ) + + count+=1 with ExitStack() as stack: From 63b33f98ac36e93747b4fbc61c1f16c993c1d4d9 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 16 Aug 2024 15:15:30 -0700 Subject: [PATCH 24/40] remove container registry auth code --- hubploy/auth.py | 205 ---------------------------------------------- hubploy/config.py | 13 --- 2 files changed, 218 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 499fa8a..6967310 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -20,211 +20,6 @@ logger = logging.getLogger(__name__) yaml = YAML(typ="rt") -@contextmanager -def registry_auth(deployment, push, check_registry): - """ - Do appropriate registry authentication for given deployment - """ - - if push or check_registry: - - config = get_config(deployment) - - if "images" in config and "registry" in config["images"]: - registry = config["images"]["registry"] - provider = registry.get("provider") - if provider == "gcloud": - yield from registry_auth_gcloud( - deployment, **registry["gcloud"] - ) - elif provider == "aws": - yield from registry_auth_aws( - deployment, **registry["aws"] - ) - elif provider == "azure": - yield from registry_auth_azure( - deployment, **registry["azure"] - ) - elif provider == "dockerconfig": - yield from registry_auth_dockercfg( - deployment, **registry["dockerconfig"] - ) - else: - raise ValueError( - f"Unknown provider {provider} found in hubploy.yaml" - ) - else: - # We actually don't need to auth, but we are yielding anyway - # contextlib.contextmanager does not like it when you don't yield - yield - -def registry_auth_dockercfg(deployment, filename): - encrypted_file_path = os.path.join( - "deployments", deployment, "secrets", filename - ) - - # DOCKER_CONFIG actually points to a *directory*, not a file. - # It should contain a `.config.json` file with our auth config - # We decrypt our docker config file, symlink it inside a new - # temporary directory that we'll set DOCKER_CONFIG to - # Our temporary directory (with symlink) and the decrypted - # file will be deleted via the contextmanagers. - orig_dockercfg = os.environ.get("DOCKER_CONFIG", None) - with tempfile.TemporaryDirectory() as d: - with decrypt_file(encrypted_file_path) as auth_file_path: - try: - dst_path = os.path.join(d, "config.json") - os.symlink(auth_file_path, dst_path) - os.environ["DOCKER_CONFIG"] = d - yield - finally: - unset_env_var("DOCKER_CONFIG", orig_dockercfg) - -def registry_auth_gcloud(deployment, project, service_key): - """ - Setup GCR authentication with a service_key - - This changes *global machine state* on where docker can push to! - """ - encrypted_service_key_path = os.path.join( - "deployments", deployment, "secrets", service_key - ) - with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: - subprocess.check_call([ - "gcloud", "auth", - "activate-service-account", - "--key-file", os.path.abspath(decrypted_service_key_path) - ]) - - subprocess.check_call([ - "gcloud", "auth", "configure-docker" - ]) - - yield - - -@contextmanager -def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=None): - """ - This helper contextmanager will update AWS_SHARED_CREDENTIALS_FILE if - service_key is provided and AWS_SESSION_TOKEN if role_arn is provided. - """ - # validate arguments - if bool(service_key) == bool(role_arn): - raise Exception("AWS authentication require either service_key or role_arn, but not both.") - if role_arn: - assert role_session_name, "always pass role_session_name along with role_arn" - - try: - if service_key: - original_credential_file_loc = os.environ.get("AWS_SHARED_CREDENTIALS_FILE", None) - - # Get path to service_key and validate its around - service_key_path = os.path.join( - "deployments", deployment, "secrets", service_key - ) - if not os.path.isfile(service_key_path): - raise FileNotFoundError( - f"The service_key file {service_key_path} does not exist") - - os.environ["AWS_SHARED_CREDENTIALS_FILE"] = service_key_path - - elif role_arn: - original_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) - original_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) - original_session_token = os.environ.get("AWS_SESSION_TOKEN", None) - - sts_client = boto3.client("sts") - assumed_role_object = sts_client.assume_role( - RoleArn=role_arn, - RoleSessionName=role_session_name - ) - - creds = assumed_role_object["Credentials"] - os.environ["AWS_ACCESS_KEY_ID"] = creds["AccessKeyId"] - os.environ["AWS_SECRET_ACCESS_KEY"] = creds["SecretAccessKey"] - os.environ["AWS_SESSION_TOKEN"] = creds["SessionToken"] - - # return until context exits - yield - - finally: - if service_key: - unset_env_var("AWS_SHARED_CREDENTIALS_FILE", original_credential_file_loc) - elif role_arn: - unset_env_var("AWS_ACCESS_KEY_ID", original_access_key_id) - unset_env_var("AWS_SECRET_ACCESS_KEY", original_secret_access_key) - unset_env_var("AWS_SESSION_TOKEN", original_session_token) - - -def registry_auth_aws(deployment, account_id, region, service_key=None, role_arn=None): - """ - Setup AWS authentication to ECR container registry - - This changes *global machine state* on where docker can push to! - """ - with _auth_aws(deployment, service_key=service_key, role_arn=role_arn, role_session_name="hubploy-registry-auth"): - # FIXME: Use a temporary docker config - # Requires amazon-ecr-credential-helper to already be installed - # this adds necessary line to authenticate docker with ecr - docker_config_dir = os.path.expanduser("~/.docker") - os.makedirs(docker_config_dir, exist_ok=True) - docker_config = os.path.join(docker_config_dir, "config.json") - if os.path.exists(docker_config): - with open(docker_config, "r") as f: - config = json.load(f) - else: - config = {} - - registry = f"{account_id}.dkr.ecr.{region}.amazonaws.com" - config.setdefault("credHelpers", {})[registry] = "ecr-login" - with open(docker_config, "w") as f: - json.dump(config, f) - - yield - - -def registry_auth_azure(deployment, resource_group, registry, auth_file): - """ - Azure authentication for ACR - - In hubploy.yaml include: - - registry: - provider: azure - azure: - resource_group: resource_group_name - registry: registry_name - auth_file: azure_auth_file.yaml - - The azure_service_principal.json file should have the following - keys: appId, tenant, password. This is the format produced - by the az command when creating a service principal. - See https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal - """ - - # parse Azure auth file - auth_file_path = os.path.join("deployments", deployment, "secrets", auth_file) - with open(auth_file_path) as f: - auth = yaml.load(f) - - # log in - subprocess.check_call([ - "az", "login", "--service-principal", - "--user", auth["appId"], - "--tenant", auth["tenant"], - "--password", auth["password"] - ]) - - # log in to ACR - subprocess.check_call([ - "az", "acr", "login", - "--name", registry - ]) - - yield - - @contextmanager def cluster_auth(deployment, debug=False, verbose=False): """ diff --git a/hubploy/config.py b/hubploy/config.py index e651262..e65a7f8 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -107,19 +107,6 @@ def get_config(deployment, debug=False, verbose=False): config["images"]["images"] = [RemoteImage(**i) for i in images] - # Backwards compatibility checker for images block - if config["images"]["registry"]["provider"] == "aws" and \ - "project" in config["images"]["registry"]["aws"]: - config["images"]["registry"]["aws"]["account_id"] = \ - config["images"]["registry"]["aws"]["project"] - del config["images"]["registry"]["aws"]["project"] - - if config["images"]["registry"]["provider"] == "aws" \ - and "zone" in config["images"]["registry"]["aws"]: - config["images"]["registry"]["aws"]["region"] = \ - config["images"]["registry"]["aws"]["zone"] - del config["images"]["registry"]["aws"]["zone"] - # Backwards compatibility checker for cluster block if config["cluster"]["provider"] == "aws" and \ "project" in config["cluster"]["aws"]: From c05f093bc27092acb203c506c12f2145e30b885c Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 23 Aug 2024 15:06:49 -0700 Subject: [PATCH 25/40] exit if CI environment is detected with helm debug or dry run specified --- hubploy/__main__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 61efa3e..3a23bf8 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -1,6 +1,7 @@ import argparse import hubploy import logging +import os import sys import textwrap @@ -128,6 +129,13 @@ def main(): logger.setLevel(logging.DEBUG) logger.info(args) + is_on_ci = os.environ.get("CI", False) + if is_on_ci: + if args.helm_debug or args.dry_run: + print("--helm-debug and --dry-run are not allowed to be used in a CI environment.") + print("Exiting...") + sys.exit(1) + # Attempt to load the config early, fail if it doesn't exist or is invalid try: config = hubploy.config.get_config( From fca6412d825e33edf7bd677f01d2ecbd0389f808 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 23 Aug 2024 15:10:53 -0700 Subject: [PATCH 26/40] explain CI limitations in help strings --- hubploy/__main__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 3a23bf8..af4ca8e 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -25,7 +25,9 @@ def main(): "-D", "--helm-debug", action="store_true", - help="Enable Helm debug output." + help="Enable Helm debug output. This is not allowed to be used in a " + + "CI environment due to secrets being displated in plain text, and " + + "the script will exit." ) argparser.add_argument( "-v", @@ -105,7 +107,9 @@ def main(): "--dry-run", action="store_true", help="Dry run the helm upgrade command. This also renders the " + - "chart to STDOUT." + "chart to STDOUT. This is not allowed to be used in a " + + "CI environment due to secrets being displated in plain text, and " + + "the script will exit." ) deploy_parser.add_argument( "--image-overrides", From 9352e222d0ec71c25479d23e601e8d66c81d78d4 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Fri, 23 Aug 2024 15:19:20 -0700 Subject: [PATCH 27/40] add env var requirement for helm debug and dry-run args --- hubploy/__main__.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index af4ca8e..a4b9c82 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -27,7 +27,8 @@ def main(): action="store_true", help="Enable Helm debug output. This is not allowed to be used in a " + "CI environment due to secrets being displated in plain text, and " + - "the script will exit." + "the script will exit. To enable this option, set a local environment " + + "varible HUBPLOY_LOCAL_DEBUG=true" ) argparser.add_argument( "-v", @@ -109,7 +110,8 @@ def main(): help="Dry run the helm upgrade command. This also renders the " + "chart to STDOUT. This is not allowed to be used in a " + "CI environment due to secrets being displated in plain text, and " + - "the script will exit." + "the script will exit. To enable this option, set a local environment " + + "varible HUBPLOY_LOCAL_DEBUG=true" ) deploy_parser.add_argument( "--image-overrides", @@ -139,6 +141,14 @@ def main(): print("--helm-debug and --dry-run are not allowed to be used in a CI environment.") print("Exiting...") sys.exit(1) + else: + if args.helm_debug or args.dry_run: + if os.environ.get("HUBPLOY_LOCAL_DEBUG", False): + print("Local debug mode enabled. Proceeding with --helm-debug and --dry-run.") + else: + print("To enable local debug mode, set a local environment variable HUBPLOY_LOCAL_DEBUG=true") + print("Exiting...") + sys.exit(1) # Attempt to load the config early, fail if it doesn't exist or is invalid try: From bd9902f282e8573e5070ee7a477d10284b2c37e3 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 31 Aug 2024 15:44:08 -0700 Subject: [PATCH 28/40] add comprehensive debugging/verbose output for auth module --- hubploy/auth.py | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 6967310..071312e 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -44,6 +44,10 @@ def cluster_auth(deployment, debug=False, verbose=False): f"Attempting to authenticate to {cluster} with " + "existing kubeconfig." ) + logger.debug( + f"Using kubeconfig file " + + "deploylemts/{deployment}/secrets/{cluster['kubeconfig']['filename']}" + ) encrypted_kubeconfig_path = os.path.join( "deployments", deployment, @@ -98,18 +102,24 @@ def cluster_auth_gcloud( "deployments", deployment, "secrets", service_key ) with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: - subprocess.check_call([ + gcloud_auth_command = [ "gcloud", "auth", "activate-service-account", "--key-file", os.path.abspath(decrypted_service_key_path) - ]) + ] + logger.info(f"Activating service account for {project}") + logger.debug(f"Running gcloud command: {gcloud_auth_command}") + subprocess.check_call(gcloud_auth_command) - subprocess.check_call([ + gcloud_cluster_credential_command = [ "gcloud", "container", "clusters", f"--zone={zone}", f"--project={project}", "get-credentials", cluster - ]) + ] + logger.info(f"Getting credentials for {cluster} in {zone}") + logger.debug(f"Running gcloud command: {gcloud_cluster_credential_command}") + subprocess.check_call(gcloud_cluster_credential_command) yield @@ -215,6 +225,7 @@ def decrypt_file(encrypted_path): # We must first determine if the file is using sops # sops files are JSON/YAML with a `sops` key. So we first check # if the file is valid JSON/YAML, and then if it has a `sops` key + logger.info(f"Decrypting {encrypted_path}") with open(encrypted_path) as f: _, ext = os.path.splitext(encrypted_path) # Support the (clearly wrong) people who use .yml instead of .yaml @@ -232,14 +243,24 @@ def decrypt_file(encrypted_path): return if "sops" not in encrypted_data: + logger.info("File is not sops encrypted, returning path") yield encrypted_path return - # If file has a `sops` key, we assume it's sops encrypted - with tempfile.NamedTemporaryFile() as f: - subprocess.check_call([ + else: + # If file has a `sops` key, we assume it's sops encrypted + sops_command = [ "sops", - "--output", f.name, "--decrypt", encrypted_path - ]) - yield f.name + ] + + logger.info("File is sops encrypted, decrypting...") + logger.debug(f"Executing: {sops_command} plus output to a temporary file") + with tempfile.NamedTemporaryFile() as f: + sops_command += ["--output", f.name] + subprocess.check_call([ + "sops", + "--output", f.name, + "--decrypt", encrypted_path + ]) + yield f.name From 3bed611d360dada911c8b775e7c958f30c4d3b29 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 31 Aug 2024 15:52:34 -0700 Subject: [PATCH 29/40] remove this line --- hubploy/auth.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 071312e..15748ca 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -257,7 +257,6 @@ def decrypt_file(encrypted_path): logger.info("File is sops encrypted, decrypting...") logger.debug(f"Executing: {sops_command} plus output to a temporary file") with tempfile.NamedTemporaryFile() as f: - sops_command += ["--output", f.name] subprocess.check_call([ "sops", "--output", f.name, From 683fc68d2c05780887031f414e7eb22908313e1a Mon Sep 17 00:00:00 2001 From: shane knapp Date: Sat, 31 Aug 2024 16:26:57 -0700 Subject: [PATCH 30/40] final bits of debugging/info output --- hubploy/auth.py | 22 ++++++++++++++-------- hubploy/config.py | 1 + hubploy/helm.py | 5 +++-- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 15748ca..4f6a256 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -108,7 +108,8 @@ def cluster_auth_gcloud( "--key-file", os.path.abspath(decrypted_service_key_path) ] logger.info(f"Activating service account for {project}") - logger.debug(f"Running gcloud command: {gcloud_auth_command}") + logger.debug(f"Running gcloud command: " + + " ".join(x for x in gcloud_auth_command)) subprocess.check_call(gcloud_auth_command) gcloud_cluster_credential_command = [ @@ -118,7 +119,8 @@ def cluster_auth_gcloud( "get-credentials", cluster ] logger.info(f"Getting credentials for {cluster} in {zone}") - logger.debug(f"Running gcloud command: {gcloud_cluster_credential_command}") + logger.debug(f"Running gcloud command: " + + " ".join(x for x in gcloud_cluster_credential_command)) subprocess.check_call(gcloud_cluster_credential_command) yield @@ -255,11 +257,15 @@ def decrypt_file(encrypted_path): ] logger.info("File is sops encrypted, decrypting...") - logger.debug(f"Executing: {sops_command} plus output to a temporary file") + logger.debug("Executing: " + + " ".join(sops_command) + + "(with output to a temporary file)") with tempfile.NamedTemporaryFile() as f: - subprocess.check_call([ - "sops", - "--output", f.name, - "--decrypt", encrypted_path - ]) + subprocess.check_call( + [ + "sops", + "--output", f.name, + "--decrypt", encrypted_path + ] + ) yield f.name diff --git a/hubploy/config.py b/hubploy/config.py index e65a7f8..e417449 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -69,6 +69,7 @@ def get_config(deployment, debug=False, verbose=False): raise DeploymentNotFoundError(deployment, deployment_path) config_path = os.path.join(deployment_path, "hubploy.yaml") + logger.info(f"Loading hubploy config from {config_path}") with open(config_path) as f: # If config_path isn't found, this will raise a FileNotFoundError with # useful info diff --git a/hubploy/helm.py b/hubploy/helm.py index dabbed2..0be742f 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -121,7 +121,8 @@ def helm_upgrade( cmd += itertools.chain(*[["--set", v] for v in config_overrides_implicit]) cmd += itertools.chain(*[["--set-string", v] for v in config_overrides_string]) - logger.debug(f"Running helm upgrade with command: {cmd}") + logger.info(f"Running helm upgrade on {name}.") + logger.debug("Helm upgrade command: " + " ".join(x for x in cmd)) subprocess.check_call(cmd) def deploy( @@ -266,7 +267,7 @@ def deploy( # Just in time for k8s access, activate the cluster credentials logger.debug("Activating cluster credentials for deployment " + - f"{deployment}" + f"{deployment} and performing deployment upgrade." ) stack.enter_context(cluster_auth(deployment, debug, verbose)) helm_upgrade( From 19802d4dd16a6cdf135174b310ab1fdda3a96f6d Mon Sep 17 00:00:00 2001 From: shane knapp Date: Tue, 3 Sep 2024 11:13:20 -0700 Subject: [PATCH 31/40] fixing typo in help text --- hubploy/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index a4b9c82..6d9d27a 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -26,7 +26,7 @@ def main(): "--helm-debug", action="store_true", help="Enable Helm debug output. This is not allowed to be used in a " + - "CI environment due to secrets being displated in plain text, and " + + "CI environment due to secrets being displayed in plain text, and " + "the script will exit. To enable this option, set a local environment " + "varible HUBPLOY_LOCAL_DEBUG=true" ) From beb63432566d423bbbd9caf28c6b95bf887021be Mon Sep 17 00:00:00 2001 From: shane knapp Date: Wed, 25 Sep 2024 14:13:24 -0700 Subject: [PATCH 32/40] fixing typo in help text --- hubploy/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 6d9d27a..c3c6bd0 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -109,7 +109,7 @@ def main(): action="store_true", help="Dry run the helm upgrade command. This also renders the " + "chart to STDOUT. This is not allowed to be used in a " + - "CI environment due to secrets being displated in plain text, and " + + "CI environment due to secrets being displayed in plain text, and " + "the script will exit. To enable this option, set a local environment " + "varible HUBPLOY_LOCAL_DEBUG=true" ) From 3cdb8f79d932476325b218282d4414f784d6f2f1 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 30 Sep 2024 14:33:17 -0700 Subject: [PATCH 33/40] re-add aws auth contextmanager, coz who knows if we'll ever need it --- hubploy/auth.py | 60 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index 4f6a256..9004544 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -45,8 +45,8 @@ def cluster_auth(deployment, debug=False, verbose=False): "existing kubeconfig." ) logger.debug( - f"Using kubeconfig file " + - "deploylemts/{deployment}/secrets/{cluster['kubeconfig']['filename']}" + "Using kubeconfig file " + + f"deploylemts/{deployment}/secrets/{cluster['kubeconfig']['filename']}" ) encrypted_kubeconfig_path = os.path.join( "deployments", @@ -108,7 +108,7 @@ def cluster_auth_gcloud( "--key-file", os.path.abspath(decrypted_service_key_path) ] logger.info(f"Activating service account for {project}") - logger.debug(f"Running gcloud command: " + + logger.debug("Running gcloud command: " + " ".join(x for x in gcloud_auth_command)) subprocess.check_call(gcloud_auth_command) @@ -119,12 +119,64 @@ def cluster_auth_gcloud( "get-credentials", cluster ] logger.info(f"Getting credentials for {cluster} in {zone}") - logger.debug(f"Running gcloud command: " + + logger.debug("Running gcloud command: " + " ".join(x for x in gcloud_cluster_credential_command)) subprocess.check_call(gcloud_cluster_credential_command) yield +@contextmanager +def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=None): + """ + This helper contextmanager will update AWS_SHARED_CREDENTIALS_FILE if + service_key is provided and AWS_SESSION_TOKEN if role_arn is provided. + """ + # validate arguments + if bool(service_key) == bool(role_arn): + raise Exception("AWS authentication require either service_key or role_arn, but not both.") + if role_arn: + assert role_session_name, "always pass role_session_name along with role_arn" + + try: + if service_key: + original_credential_file_loc = os.environ.get("AWS_SHARED_CREDENTIALS_FILE", None) + + # Get path to service_key and validate its around + service_key_path = os.path.join( + 'deployments', deployment, 'secrets', service_key + ) + if not os.path.isfile(service_key_path): + raise FileNotFoundError( + f'The service_key file {service_key_path} does not exist') + + os.environ["AWS_SHARED_CREDENTIALS_FILE"] = service_key_path + + elif role_arn: + original_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", None) + original_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) + original_session_token = os.environ.get("AWS_SESSION_TOKEN", None) + + sts_client = boto3.client('sts') + assumed_role_object = sts_client.assume_role( + RoleArn=role_arn, + RoleSessionName=role_session_name + ) + + creds = assumed_role_object['Credentials'] + os.environ['AWS_ACCESS_KEY_ID'] = creds['AccessKeyId'] + os.environ['AWS_SECRET_ACCESS_KEY'] = creds['SecretAccessKey'] + os.environ['AWS_SESSION_TOKEN'] = creds['SessionToken'] + + # return until context exits + yield + + finally: + if service_key: + unset_env_var("AWS_SHARED_CREDENTIALS_FILE", original_credential_file_loc) + elif role_arn: + unset_env_var('AWS_ACCESS_KEY_ID', original_access_key_id) + unset_env_var('AWS_SECRET_ACCESS_KEY', original_secret_access_key) + unset_env_var('AWS_SESSION_TOKEN', original_session_token) def cluster_auth_aws( deployment, From d2677689ac86167d984fec8ae41166b7567d4569 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Mon, 30 Sep 2024 14:33:56 -0700 Subject: [PATCH 34/40] fixing newlines at EOF --- hubploy/__main__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index c3c6bd0..3127247 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -186,4 +186,3 @@ def main(): if __name__ == "__main__": main() - From d510ecf1e7954b8763bd3ee9cea81188fe7ac045 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Tue, 8 Oct 2024 14:32:54 -0700 Subject: [PATCH 35/40] formatting from ruff --- hubploy/__main__.py | 102 ++++++++++++------------ hubploy/auth.py | 184 ++++++++++++++++++++++---------------------- hubploy/config.py | 51 ++++++------ hubploy/helm.py | 99 ++++++++++++------------ setup.py | 14 ++-- 5 files changed, 217 insertions(+), 233 deletions(-) diff --git a/hubploy/__main__.py b/hubploy/__main__.py index 3127247..477b6a2 100644 --- a/hubploy/__main__.py +++ b/hubploy/__main__.py @@ -11,6 +11,7 @@ logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logger = logging.getLogger(__name__) + def main(): argparser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) subparsers = argparser.add_subparsers(dest="command") @@ -19,99 +20,89 @@ def main(): "-d", "--debug", action="store_true", - help="Enable tool debug output (not including helm debug)." + help="Enable tool debug output (not including helm debug).", ) argparser.add_argument( "-D", "--helm-debug", action="store_true", - help="Enable Helm debug output. This is not allowed to be used in a " + - "CI environment due to secrets being displayed in plain text, and " + - "the script will exit. To enable this option, set a local environment " + - "varible HUBPLOY_LOCAL_DEBUG=true" + help="Enable Helm debug output. This is not allowed to be used in a " + + "CI environment due to secrets being displayed in plain text, and " + + "the script will exit. To enable this option, set a local environment " + + "varible HUBPLOY_LOCAL_DEBUG=true", ) argparser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output." + "-v", "--verbose", action="store_true", help="Enable verbose output." ) deploy_parser = subparsers.add_parser( - "deploy", - help="Deploy a chart to the given environment." + "deploy", help="Deploy a chart to the given environment." ) - deploy_parser.add_argument( - "deployment", - help="The name of the hub to deploy." - ) - deploy_parser.add_argument( - "chart", - help="The path to the main hub chart." - ) + deploy_parser.add_argument("deployment", help="The name of the hub to deploy.") + deploy_parser.add_argument("chart", help="The path to the main hub chart.") deploy_parser.add_argument( "environment", choices=["develop", "staging", "prod"], - help="The environment to deploy to." + help="The environment to deploy to.", ) deploy_parser.add_argument( "--namespace", default=None, - help="Helm option: the namespace to deploy to. If not specified, " + - "the namespace will be derived from the environment argument." + help="Helm option: the namespace to deploy to. If not specified, " + + "the namespace will be derived from the environment argument.", ) deploy_parser.add_argument( "--set", action="append", - help="Helm option: set values on the command line (can specify " + - "multiple or separate values with commas: key1=val1,key2=val2)" + help="Helm option: set values on the command line (can specify " + + "multiple or separate values with commas: key1=val1,key2=val2)", ) deploy_parser.add_argument( "--set-string", action="append", - help="Helm option: set STRING values on the command line (can " + - "specify multiple or separate values with commas: key1=val1,key2=val2)" + help="Helm option: set STRING values on the command line (can " + + "specify multiple or separate values with commas: key1=val1,key2=val2)", ) deploy_parser.add_argument( "--version", - help="Helm option: specify a version constraint for the chart " + - "version to use. This constraint can be a specific tag (e.g. 1.1.1) " + - "or it may reference a valid range (e.g. ^2.0.0). If this is not " + - "specified, the latest version is used." + help="Helm option: specify a version constraint for the chart " + + "version to use. This constraint can be a specific tag (e.g. 1.1.1) " + + "or it may reference a valid range (e.g. ^2.0.0). If this is not " + + "specified, the latest version is used.", ) deploy_parser.add_argument( "--timeout", - help="Helm option: time in seconds to wait for any individual " + - "Kubernetes operation (like Jobs for hooks, etc). Defaults to 300 " + - "seconds." + help="Helm option: time in seconds to wait for any individual " + + "Kubernetes operation (like Jobs for hooks, etc). Defaults to 300 " + + "seconds.", ) deploy_parser.add_argument( "--force", action="store_true", - help="Helm option: force resource updates through a replacement strategy." + help="Helm option: force resource updates through a replacement strategy.", ) deploy_parser.add_argument( "--atomic", action="store_true", - help="Helm option: if set, upgrade process rolls back changes made " + - "in case of failed upgrade. The --wait flag will be set automatically " + - "if --atomic is used." + help="Helm option: if set, upgrade process rolls back changes made " + + "in case of failed upgrade. The --wait flag will be set automatically " + + "if --atomic is used.", ) deploy_parser.add_argument( "--cleanup-on-fail", action="store_true", - help="Helm option: allow deletion of new resources created in this " + - "upgrade when upgrade fails." + help="Helm option: allow deletion of new resources created in this " + + "upgrade when upgrade fails.", ) deploy_parser.add_argument( "--dry-run", action="store_true", - help="Dry run the helm upgrade command. This also renders the " + - "chart to STDOUT. This is not allowed to be used in a " + - "CI environment due to secrets being displayed in plain text, and " + - "the script will exit. To enable this option, set a local environment " + - "varible HUBPLOY_LOCAL_DEBUG=true" + help="Dry run the helm upgrade command. This also renders the " + + "chart to STDOUT. This is not allowed to be used in a " + + "CI environment due to secrets being displayed in plain text, and " + + "the script will exit. To enable this option, set a local environment " + + "varible HUBPLOY_LOCAL_DEBUG=true", ) deploy_parser.add_argument( "--image-overrides", @@ -124,7 +115,7 @@ def main(): they appear in hubploy.yaml and separated by spaces without quotes. You must always specify a tag when overriding images. """ - ) + ), ) args = argparser.parse_args() @@ -138,25 +129,27 @@ def main(): is_on_ci = os.environ.get("CI", False) if is_on_ci: if args.helm_debug or args.dry_run: - print("--helm-debug and --dry-run are not allowed to be used in a CI environment.") + print( + "--helm-debug and --dry-run are not allowed to be used in a CI environment." + ) print("Exiting...") sys.exit(1) else: if args.helm_debug or args.dry_run: if os.environ.get("HUBPLOY_LOCAL_DEBUG", False): - print("Local debug mode enabled. Proceeding with --helm-debug and --dry-run.") + print( + "Local debug mode enabled. Proceeding with --helm-debug and --dry-run." + ) else: - print("To enable local debug mode, set a local environment variable HUBPLOY_LOCAL_DEBUG=true") + print( + "To enable local debug mode, set a local environment variable HUBPLOY_LOCAL_DEBUG=true" + ) print("Exiting...") sys.exit(1) # Attempt to load the config early, fail if it doesn't exist or is invalid try: - config = hubploy.config.get_config( - args.deployment, - debug=False, - verbose=False - ) + config = hubploy.config.get_config(args.deployment, debug=False, verbose=False) if not config: raise hubploy.config.DeploymentNotFoundError( "Deployment '{}' not found in hubploy.yaml".format(args.deployment) @@ -181,8 +174,9 @@ def main(): args.verbose, args.helm_debug, args.dry_run, - args.image_overrides + args.image_overrides, ) + if __name__ == "__main__": main() diff --git a/hubploy/auth.py b/hubploy/auth.py index 9004544..b0b51f6 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -5,6 +5,7 @@ Current cloud providers supported: gcloud, aws, and azure. """ + import boto3 import json import logging @@ -20,6 +21,7 @@ logger = logging.getLogger(__name__) yaml = YAML(typ="rt") + @contextmanager def cluster_auth(deployment, debug=False, verbose=False): """ @@ -41,18 +43,18 @@ def cluster_auth(deployment, debug=False, verbose=False): try: if provider == "kubeconfig": logger.info( - f"Attempting to authenticate to {cluster} with " + - "existing kubeconfig." + f"Attempting to authenticate to {cluster} with " + + "existing kubeconfig." ) logger.debug( - "Using kubeconfig file " + - f"deploylemts/{deployment}/secrets/{cluster['kubeconfig']['filename']}" + "Using kubeconfig file " + + f"deploylemts/{deployment}/secrets/{cluster['kubeconfig']['filename']}" ) encrypted_kubeconfig_path = os.path.join( "deployments", deployment, "secrets", - cluster["kubeconfig"]["filename"] + cluster["kubeconfig"]["filename"], ) with decrypt_file(encrypted_kubeconfig_path) as kubeconfig_path: os.environ["KUBECONFIG"] = kubeconfig_path @@ -61,38 +63,23 @@ def cluster_auth(deployment, debug=False, verbose=False): # Temporarily kubeconfig file with tempfile.NamedTemporaryFile() as temp_kubeconfig: os.environ["KUBECONFIG"] = temp_kubeconfig.name - logger.info( - f"Attempting to authenticate with {provider}..." - ) + logger.info(f"Attempting to authenticate with {provider}...") if provider == "gcloud": - yield from cluster_auth_gcloud( - deployment, **cluster["gcloud"] - ) + yield from cluster_auth_gcloud(deployment, **cluster["gcloud"]) elif provider == "aws": - yield from cluster_auth_aws( - deployment, **cluster["aws"] - ) + yield from cluster_auth_aws(deployment, **cluster["aws"]) elif provider == "azure": - yield from cluster_auth_azure( - deployment, **cluster["azure"] - ) + yield from cluster_auth_azure(deployment, **cluster["azure"]) else: raise ValueError( - f"Unknown provider {provider} found in " + - "hubploy.yaml" + f"Unknown provider {provider} found in " + "hubploy.yaml" ) finally: unset_env_var("KUBECONFIG", orig_kubeconfig) -def cluster_auth_gcloud( - deployment, - project, - cluster, - zone, - service_key - ): +def cluster_auth_gcloud(deployment, project, cluster, zone, service_key): """ Setup GKE authentication with service_key @@ -103,28 +90,37 @@ def cluster_auth_gcloud( ) with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: gcloud_auth_command = [ - "gcloud", "auth", + "gcloud", + "auth", "activate-service-account", - "--key-file", os.path.abspath(decrypted_service_key_path) + "--key-file", + os.path.abspath(decrypted_service_key_path), ] logger.info(f"Activating service account for {project}") - logger.debug("Running gcloud command: " + - " ".join(x for x in gcloud_auth_command)) + logger.debug( + "Running gcloud command: " + " ".join(x for x in gcloud_auth_command) + ) subprocess.check_call(gcloud_auth_command) gcloud_cluster_credential_command = [ - "gcloud", "container", "clusters", + "gcloud", + "container", + "clusters", f"--zone={zone}", f"--project={project}", - "get-credentials", cluster + "get-credentials", + cluster, ] logger.info(f"Getting credentials for {cluster} in {zone}") - logger.debug("Running gcloud command: " + - " ".join(x for x in gcloud_cluster_credential_command)) + logger.debug( + "Running gcloud command: " + + " ".join(x for x in gcloud_cluster_credential_command) + ) subprocess.check_call(gcloud_cluster_credential_command) yield + @contextmanager def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=None): """ @@ -133,21 +129,26 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non """ # validate arguments if bool(service_key) == bool(role_arn): - raise Exception("AWS authentication require either service_key or role_arn, but not both.") + raise Exception( + "AWS authentication require either service_key or role_arn, but not both." + ) if role_arn: assert role_session_name, "always pass role_session_name along with role_arn" try: if service_key: - original_credential_file_loc = os.environ.get("AWS_SHARED_CREDENTIALS_FILE", None) + original_credential_file_loc = os.environ.get( + "AWS_SHARED_CREDENTIALS_FILE", None + ) # Get path to service_key and validate its around service_key_path = os.path.join( - 'deployments', deployment, 'secrets', service_key + "deployments", deployment, "secrets", service_key ) if not os.path.isfile(service_key_path): raise FileNotFoundError( - f'The service_key file {service_key_path} does not exist') + f"The service_key file {service_key_path} does not exist" + ) os.environ["AWS_SHARED_CREDENTIALS_FILE"] = service_key_path @@ -156,16 +157,15 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non original_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", None) original_session_token = os.environ.get("AWS_SESSION_TOKEN", None) - sts_client = boto3.client('sts') + sts_client = boto3.client("sts") assumed_role_object = sts_client.assume_role( - RoleArn=role_arn, - RoleSessionName=role_session_name + RoleArn=role_arn, RoleSessionName=role_session_name ) - creds = assumed_role_object['Credentials'] - os.environ['AWS_ACCESS_KEY_ID'] = creds['AccessKeyId'] - os.environ['AWS_SECRET_ACCESS_KEY'] = creds['SecretAccessKey'] - os.environ['AWS_SESSION_TOKEN'] = creds['SessionToken'] + creds = assumed_role_object["Credentials"] + os.environ["AWS_ACCESS_KEY_ID"] = creds["AccessKeyId"] + os.environ["AWS_SECRET_ACCESS_KEY"] = creds["SecretAccessKey"] + os.environ["AWS_SESSION_TOKEN"] = creds["SessionToken"] # return until context exits yield @@ -174,18 +174,14 @@ def _auth_aws(deployment, service_key=None, role_arn=None, role_session_name=Non if service_key: unset_env_var("AWS_SHARED_CREDENTIALS_FILE", original_credential_file_loc) elif role_arn: - unset_env_var('AWS_ACCESS_KEY_ID', original_access_key_id) - unset_env_var('AWS_SECRET_ACCESS_KEY', original_secret_access_key) - unset_env_var('AWS_SESSION_TOKEN', original_session_token) + unset_env_var("AWS_ACCESS_KEY_ID", original_access_key_id) + unset_env_var("AWS_SECRET_ACCESS_KEY", original_secret_access_key) + unset_env_var("AWS_SESSION_TOKEN", original_session_token) + def cluster_auth_aws( - deployment, - account_id, - cluster, - region, - service_key=None, - role_arn=None - ): + deployment, account_id, cluster, region, service_key=None, role_arn=None +): """ Setup AWS authentication with service_key or with a role @@ -195,21 +191,15 @@ def cluster_auth_aws( deployment, service_key=service_key, role_arn=role_arn, - role_session_name="hubploy-cluster-auth" + role_session_name="hubploy-cluster-auth", ): - subprocess.check_call([ - "aws", "eks", "update-kubeconfig", - "--name", cluster, "--region", region - ]) + subprocess.check_call( + ["aws", "eks", "update-kubeconfig", "--name", cluster, "--region", region] + ) yield -def cluster_auth_azure( - deployment, - resource_group, - cluster, - auth_file - ): +def cluster_auth_azure(deployment, resource_group, cluster, auth_file): """ Azure authentication for AKS @@ -225,35 +215,47 @@ def cluster_auth_azure( The azure_service_principal.json file should have the following keys: appId, tenant, password. - + This is the format produced by the az command when creating a service principal. """ # parse Azure auth file - auth_file_path = os.path.join( - "deployments", deployment, "secrets", auth_file - ) + auth_file_path = os.path.join("deployments", deployment, "secrets", auth_file) with open(auth_file_path) as f: auth = yaml.load(f) # log in - subprocess.check_call([ - "az", "login", "--service-principal", - "--user", auth["appId"], - "--tenant", auth["tenant"], - "--password", auth["password"] - ]) + subprocess.check_call( + [ + "az", + "login", + "--service-principal", + "--user", + auth["appId"], + "--tenant", + auth["tenant"], + "--password", + auth["password"], + ] + ) # get cluster credentials - subprocess.check_call([ - "az", "aks", "get-credentials", - "--name", cluster, - "--resource-group", resource_group - ]) + subprocess.check_call( + [ + "az", + "aks", + "get-credentials", + "--name", + cluster, + "--resource-group", + resource_group, + ] + ) yield + def unset_env_var(env_var, old_env_var_value): """ If the old environment variable's value exists, replace the current one @@ -265,9 +267,10 @@ def unset_env_var(env_var, old_env_var_value): if env_var in os.environ: del os.environ[env_var] - if (old_env_var_value is not None): + if old_env_var_value is not None: os.environ[env_var] = old_env_var_value + @contextmanager def decrypt_file(encrypted_path): """ @@ -303,21 +306,14 @@ def decrypt_file(encrypted_path): else: # If file has a `sops` key, we assume it's sops encrypted - sops_command = [ - "sops", - "--decrypt", encrypted_path - ] + sops_command = ["sops", "--decrypt", encrypted_path] logger.info("File is sops encrypted, decrypting...") - logger.debug("Executing: " + - " ".join(sops_command) + - "(with output to a temporary file)") + logger.debug( + "Executing: " + " ".join(sops_command) + " (with output to a temporary file)" + ) with tempfile.NamedTemporaryFile() as f: subprocess.check_call( - [ - "sops", - "--output", f.name, - "--decrypt", encrypted_path - ] + ["sops", "--output", f.name, "--decrypt", encrypted_path] ) yield f.name diff --git a/hubploy/config.py b/hubploy/config.py index e417449..b4953d6 100644 --- a/hubploy/config.py +++ b/hubploy/config.py @@ -3,6 +3,7 @@ returns it embedded with a set of LocalImage objects with filesystem paths made absolute. """ + import logging import os from ruamel.yaml import YAML @@ -10,6 +11,7 @@ logger = logging.getLogger(__name__) yaml = YAML(typ="safe") + class DeploymentNotFoundError(Exception): def __init__(self, deployment, path, *args, **kwargs): super().__init__(*args, **kwargs) @@ -24,11 +26,10 @@ class RemoteImage: """ A simple class to represent a remote image """ - def __init__(self, - name, - tag=None, - helm_substitution_path="jupyterhub.singleuser.image" - ): + + def __init__( + self, name, tag=None, helm_substitution_path="jupyterhub.singleuser.image" + ): """ Define an Image from the hubploy config @@ -41,8 +42,8 @@ def __init__(self, # FIXME: Validate name to conform to docker image name guidelines if not name or name.strip() == "": raise ValueError( - "Name of image to be built is not specified. Check " + - "hubploy.yaml of your deployment" + "Name of image to be built is not specified. Check " + + "hubploy.yaml of your deployment" ) self.name = name self.tag = tag @@ -53,6 +54,7 @@ def __init__(self, else: self.image_spec = f"{self.name}:{self.tag}" + def get_config(deployment, debug=False, verbose=False): """ Returns hubploy.yaml configuration as a Python dictionary if it exists for @@ -82,10 +84,7 @@ def get_config(deployment, debug=False, verbose=False): if "image_name" in images_config: if ":" in images_config["image_name"]: image_name, tag = images_config["image_name"].split(":") - images = [{ - "name": image_name, - "tag": tag - }] + images = [{"name": image_name, "tag": tag}] else: images = [{"name": images_config["image_name"]}] @@ -96,29 +95,31 @@ def get_config(deployment, debug=False, verbose=False): for i in image_list: if ":" in i["name"]: image_name, tag = i["name"].split(":") - logger.info( - f"Tag for {image_name}: {tag}" + logger.info(f"Tag for {image_name}: {tag}") + images.append( + { + "name": image_name, + "tag": tag, + } ) - images.append({ - "name": image_name, - "tag": tag, - }) else: images.append({"name": i["name"]}) config["images"]["images"] = [RemoteImage(**i) for i in images] # Backwards compatibility checker for cluster block - if config["cluster"]["provider"] == "aws" and \ - "project" in config["cluster"]["aws"]: - config["cluster"]["aws"]["account_id"] = \ - config["cluster"]["aws"]["project"] + if ( + config["cluster"]["provider"] == "aws" + and "project" in config["cluster"]["aws"] + ): + config["cluster"]["aws"]["account_id"] = config["cluster"]["aws"]["project"] del config["cluster"]["aws"]["project"] - if config["cluster"]["provider"] == "aws" and \ - "zone" in config["cluster"]["aws"]: - config["cluster"]["aws"]["region"] = \ - config["cluster"]["aws"]["zone"] + if ( + config["cluster"]["provider"] == "aws" + and "zone" in config["cluster"]["aws"] + ): + config["cluster"]["aws"]["region"] = config["cluster"]["aws"]["zone"] del config["cluster"]["aws"]["zone"] logger.debug(f"Config loaded and parsed: {config}") diff --git a/hubploy/helm.py b/hubploy/helm.py index 0be742f..e2188ee 100644 --- a/hubploy/helm.py +++ b/hubploy/helm.py @@ -17,6 +17,7 @@ Util to deploy a Helm chart (deploy) given hubploy configuration and Helm chart configuration located in accordance to hubploy conventions. """ + import itertools import kubernetes.config import logging @@ -49,8 +50,8 @@ def helm_upgrade( debug, verbose, helm_debug, - dry_run - ): + dry_run, +): if verbose: logger.setLevel(logging.INFO) elif debug: @@ -58,9 +59,7 @@ def helm_upgrade( logger.info(f"Deploying {name} in namespace {namespace}") logger.debug(f"Running helm dep up in subdirectory '{chart}'") - subprocess.check_call([ - HELM_EXECUTABLE, "dep", "up" - ], cwd=chart) + subprocess.check_call([HELM_EXECUTABLE, "dep", "up"], cwd=chart) # Create namespace explicitly, since helm3 removes support for it # See https://github.com/helm/helm/issues/6794 @@ -71,13 +70,13 @@ def helm_upgrade( kubernetes.config.load_kube_config(config_file=kubeconfig) logger.info(f"Loaded kubeconfig: {kubeconfig}") except Exception as e: - logger.info(f"Failed to load kubeconfig {kubeconfig} with " + - f"exception:\n{e}\nTrying in-cluster config...") + logger.info( + f"Failed to load kubeconfig {kubeconfig} with " + + f"exception:\n{e}\nTrying in-cluster config..." + ) kubernetes.config.load_incluster_config() logger.info("Loaded in-cluster kubeconfig") - logger.debug( - f"Checking for namespace {namespace} and creating if it doesn't exist" - ) + logger.debug(f"Checking for namespace {namespace} and creating if it doesn't exist") api = CoreV1Api() try: api.read_namespace(namespace) @@ -85,11 +84,7 @@ def helm_upgrade( if e.status == 404: # Create namespace print(f"Namespace {namespace} does not exist, creating it...") - api.create_namespace( - V1Namespace( - metadata=V1ObjectMeta(name=namespace) - ) - ) + api.create_namespace(V1Namespace(metadata=V1ObjectMeta(name=namespace))) else: raise @@ -125,6 +120,7 @@ def helm_upgrade( logger.debug("Helm upgrade command: " + " ".join(x for x in cmd)) subprocess.check_call(cmd) + def deploy( deployment, chart, @@ -141,8 +137,8 @@ def deploy( verbose=False, helm_debug=False, dry_run=False, - image_overrides=None - ): + image_overrides=None, +): """ Deploy a JupyterHub. @@ -181,26 +177,28 @@ def deploy( if namespace is None: namespace = name - helm_config_files = [f for f in [ - os.path.join( - "deployments", deployment, "config", "common.yaml" - ), - os.path.join( - "deployments", deployment, "config", f"{environment}.yaml" - ), - ] if os.path.exists(f)] + helm_config_files = [ + f + for f in [ + os.path.join("deployments", deployment, "config", "common.yaml"), + os.path.join("deployments", deployment, "config", f"{environment}.yaml"), + ] + if os.path.exists(f) + ] logger.debug(f"Using helm config files: {helm_config_files}") - helm_secret_files = [f for f in [ - # Support for secrets in same repo - os.path.join( - "deployments", deployment, "secrets", f"{environment}.yaml" - ), - # Support for secrets in a submodule repo - os.path.join( - "secrets", "deployments", deployment, "secrets", f"{environment}.yaml" - ), - ] if os.path.exists(f)] + helm_secret_files = [ + f + for f in [ + # Support for secrets in same repo + os.path.join("deployments", deployment, "secrets", f"{environment}.yaml"), + # Support for secrets in a submodule repo + os.path.join( + "secrets", "deployments", deployment, "secrets", f"{environment}.yaml" + ), + ] + if os.path.exists(f) + ] logger.debug(f"Using helm secret files: {helm_secret_files}") if config.get("images"): @@ -211,15 +209,15 @@ def deploy( if num_images != num_overrides: raise ValueError( - f"Number of image overrides ({num_overrides}) must match " + - "number of images found in " + - f"deployments/{deployment}/hubploy.yaml ({num_images})" + f"Number of image overrides ({num_overrides}) must match " + + "number of images found in " + + f"deployments/{deployment}/hubploy.yaml ({num_images})" ) for override in image_overrides: if ":" not in override: raise ValueError( - "Image override must be in the format " + - f":. Got: {override}" + "Image override must be in the format " + + f":. Got: {override}" ) count = 0 @@ -231,16 +229,16 @@ def deploy( override = image_overrides[count] override_image, override_tag = override.split(":") print( - f"Overriding image {image.name}:{image.tag} to " + - f"{override_image}:{override_tag}" + f"Overriding image {image.name}:{image.tag} to " + + f"{override_image}:{override_tag}" ) image.name = override_image image.tag = override_tag if image.tag is not None: logger.info( - f"Using image {image.name}:{image.tag} for " + - f"{image.helm_substitution_path}" + f"Using image {image.name}:{image.tag} for " + + f"{image.helm_substitution_path}" ) helm_config_overrides_string.append( f"{image.helm_substitution_path}.tag={image.tag}" @@ -250,15 +248,13 @@ def deploy( ) else: logger.info( - f"Using image {image.name} for " + - f"{image.helm_substitution_path}" + f"Using image {image.name} for " + f"{image.helm_substitution_path}" ) helm_config_overrides_string.append( f"{image.helm_substitution_path}.name={image.name}" ) - - count+=1 + count += 1 with ExitStack() as stack: decrypted_secret_files = [ @@ -266,8 +262,9 @@ def deploy( ] # Just in time for k8s access, activate the cluster credentials - logger.debug("Activating cluster credentials for deployment " + - f"{deployment} and performing deployment upgrade." + logger.debug( + "Activating cluster credentials for deployment " + + f"{deployment} and performing deployment upgrade." ) stack.enter_context(cluster_auth(deployment, debug, verbose)) helm_upgrade( @@ -285,5 +282,5 @@ def deploy( debug, verbose, helm_debug, - dry_run + dry_run, ) diff --git a/setup.py b/setup.py index ad2c81b..8357149 100644 --- a/setup.py +++ b/setup.py @@ -1,19 +1,15 @@ import setuptools setuptools.setup( - name='hubploy', - version='0.4', + name="hubploy", + version="0.4", url="https://github.com/berkeley-dsep-infra/hubploy", author="Shane Knapp", packages=setuptools.find_packages(), - install_requires=[ - 'kubernetes', - 'boto3' - ], + install_requires=["kubernetes", "boto3"], entry_points={ - 'console_scripts': [ - 'hubploy = hubploy.__main__:main', + "console_scripts": [ + "hubploy = hubploy.__main__:main", ], }, - ) From 4489c5a60483d840a3eeaea9677452d657e94833 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 10 Oct 2024 13:15:50 -0700 Subject: [PATCH 36/40] adding min python version req for hubploy --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 8357149..d070637 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ author="Shane Knapp", packages=setuptools.find_packages(), install_requires=["kubernetes", "boto3"], + python_requires=">=3.6", entry_points={ "console_scripts": [ "hubploy = hubploy.__main__:main", From 7c1398bd3106d23cbf3b8d7a57e1fe1154a6e0b1 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 10 Oct 2024 13:18:52 -0700 Subject: [PATCH 37/40] adding python linter --- .github/workflows/python-lint.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/python-lint.yaml diff --git a/.github/workflows/python-lint.yaml b/.github/workflows/python-lint.yaml new file mode 100644 index 0000000..f3e939f --- /dev/null +++ b/.github/workflows/python-lint.yaml @@ -0,0 +1,15 @@ +name: "python lint" +on: + - pull_request + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install ruff + run: pip install ruff==0.6.9 + + - name: Lint python files + run: ruff check . From ca5ec6185a16bb7c7bc3fd842e94886adabc8391 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 10 Oct 2024 13:20:18 -0700 Subject: [PATCH 38/40] ruff format --- hubploy/auth.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hubploy/auth.py b/hubploy/auth.py index b0b51f6..73f0d38 100644 --- a/hubploy/auth.py +++ b/hubploy/auth.py @@ -310,7 +310,9 @@ def decrypt_file(encrypted_path): logger.info("File is sops encrypted, decrypting...") logger.debug( - "Executing: " + " ".join(sops_command) + " (with output to a temporary file)" + "Executing: " + + " ".join(sops_command) + + " (with output to a temporary file)" ) with tempfile.NamedTemporaryFile() as f: subprocess.check_call( From cea3849d18d5542d681f49201698d197c25cf67f Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 10 Oct 2024 13:27:39 -0700 Subject: [PATCH 39/40] updated readme --- README.md | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ae6289d..c3f59dd 100644 --- a/README.md +++ b/README.md @@ -2,5 +2,59 @@ Toolkit to deploy many z2jh based JupyterHubs -[![CircleCI](https://circleci.com/gh/yuvipanda/hubploy.svg?style=svg)](https://circleci.com/gh/pangeo-data/pangeo-cloud-federation) -[![Documentation Status](https://readthedocs.org/projects/hubploy/badge/?version=latest)](https://hubploy.readthedocs.io/en/latest/?badge=latest) +Usage: + +``` +hubploy deploy +``` + +Help text: +``` +$ hubploy --help +usage: hubploy [-h] [-d] [-D] [-v] {deploy} ... + +positional arguments: + {deploy} + deploy Deploy a chart to the given environment. + +options: + -h, --help show this help message and exit + -d, --debug Enable tool debug output (not including helm debug). + -D, --helm-debug Enable Helm debug output. This is not allowed to be used in a CI environment due to secrets being displayed in plain text, and the script will exit. To enable this option, set a local environment varible HUBPLOY_LOCAL_DEBUG=true + -v, --verbose Enable verbose output. +``` + +Deploy help: +``` +hubploy deploy --help +usage: hubploy deploy [-h] [--namespace NAMESPACE] [--set SET] [--set-string SET_STRING] [--version VERSION] [--timeout TIMEOUT] [--force] [--atomic] + [--cleanup-on-fail] [--dry-run] [--image-overrides IMAGE_OVERRIDES [IMAGE_OVERRIDES ...]] + deployment chart {develop,staging,prod} + +positional arguments: + deployment The name of the hub to deploy. + chart The path to the main hub chart. + {develop,staging,prod} + The environment to deploy to. + +options: + -h, --help show this help message and exit + --namespace NAMESPACE + Helm option: the namespace to deploy to. If not specified, the namespace will be derived from the environment argument. + --set SET Helm option: set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --set-string SET_STRING + Helm option: set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + --version VERSION Helm option: specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a + valid range (e.g. ^2.0.0). If this is not specified, the latest version is used. + --timeout TIMEOUT Helm option: time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks, etc). Defaults to 300 seconds. + --force Helm option: force resource updates through a replacement strategy. + --atomic Helm option: if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is + used. + --cleanup-on-fail Helm option: allow deletion of new resources created in this upgrade when upgrade fails. + --dry-run Dry run the helm upgrade command. This also renders the chart to STDOUT. This is not allowed to be used in a CI environment due to secrets being + displayed in plain text, and the script will exit. To enable this option, set a local environment varible HUBPLOY_LOCAL_DEBUG=true + --image-overrides IMAGE_OVERRIDES [IMAGE_OVERRIDES ...] + Override one or more images and tags to deploy. Format is: : : ... IMPORTANT: + The order of images passed in must match the order in which they appear in hubploy.yaml and separated by spaces without quotes. You must always + specify a tag when overriding images. +``` \ No newline at end of file From f76a89781b239077d30e33d07a177c32629c1f99 Mon Sep 17 00:00:00 2001 From: shane knapp Date: Thu, 10 Oct 2024 13:37:54 -0700 Subject: [PATCH 40/40] adding some whitespace to readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c3f59dd..db6db25 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ hubploy deploy ``` Help text: + ``` $ hubploy --help usage: hubploy [-h] [-d] [-D] [-v] {deploy} ... @@ -25,6 +26,7 @@ options: ``` Deploy help: + ``` hubploy deploy --help usage: hubploy deploy [-h] [--namespace NAMESPACE] [--set SET] [--set-string SET_STRING] [--version VERSION] [--timeout TIMEOUT] [--force] [--atomic]