From 18c9e0eedcc5f43795a704e1d926ce2e356eab99 Mon Sep 17 00:00:00 2001 From: Tullio Sebastiani Date: Mon, 9 Jan 2023 15:16:57 +0100 Subject: [PATCH] resources rename resources rename container image on quay new line on Version file --- Dockerfile | 27 ++++++++------- README.md | 32 +++++++++--------- VERSION | 1 + .../{openspot_ng_logo.png => crc-cloud.png} | Bin openspot-ng.sh => crc-cloud.sh | 8 ++--- 5 files changed, 36 insertions(+), 32 deletions(-) create mode 100644 VERSION rename assets/{openspot_ng_logo.png => crc-cloud.png} (100%) rename openspot-ng.sh => crc-cloud.sh (98%) diff --git a/Dockerfile b/Dockerfile index d66ef8cd..d89d840b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,20 @@ -FROM alpine:3.17.0 +FROM quay.io/centos/centos:stream9 ENV CONTAINER true -RUN apk add --no-cache aws-cli && \ - apk add --no-cache netcat-openbsd && \ - apk add --no-cache jq && \ - apk add --no-cache netcat-openbsd && \ - apk add --no-cache bash && \ - apk add --no-cache sed && \ - apk add --no-cache curl && \ - apk add --no-cache figlet && \ - apk add --no-cache openssh-client-default +ENV PATH $PATH:/root/.awscliv2/binaries +RUN rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm &&\ + dnf install -y which &&\ + dnf install -y jq &&\ + dnf install -y nc &&\ + dnf install -y pip &&\ + dnf install -y figlet &&\ + dnf install -y openssh-clients &&\ + dnf install -y less &&\ + pip install awscliv2 &&\ + awscliv2 -i + WORKDIR /app COPY . . -RUN chmod +x openspot-ng.sh +RUN chmod +x crc-cloud.sh -ENTRYPOINT [ "/app/openspot-ng.sh" ] +ENTRYPOINT [ "/app/crc-cloud.sh" ] diff --git a/README.md b/README.md index 8643ea01..a14f148f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -# OpenSpot-NG -### Disposable OpenShift instances on AWS in minutes +# CRC-Cloud +### Disposable OpenShift instances on cloud in minutes -| ![space-1.jpg](assets/openspot_ng_logo.png) | +| ![space-1.jpg](assets/crc-cloud.png) | |:--:| | kindly created by OpenAI DALL-E (https://openai.com/dall-e-2) | @@ -25,7 +25,7 @@ For the moment only AWS is supported. Other will be added soon. #### Prerequisites -The basic requirements to run a single-node OpenShift cluster with **OpenSpot-NG** are: +The basic requirements to run a single-node OpenShift cluster with **CRC-Cloud** are: - register a Red Hat account and get a pull secret from https://console.redhat.com/openshift/create/local - create an access key for your AWS account and grab the *ACCESS_KEY_ID* and the *SECRET_ACCESS_KEY* (instructions can be found [here](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html))
@@ -41,7 +41,7 @@ Increasing or decreasing the resources will affect the deployment time together ### Containers (the easy way) -Running **OpenSpot-NG** from a container (podman/docker) is strongly recommended for the following reasons: +Running **CRC-Cloud** from a container (podman/docker) is strongly recommended for the following reasons: - Compatible with any platform (Linux/MacOs/Windows) - No need to satisfy any software dependency in you're OS since everything is packed into the container - In CI/CD systems (eg. Jenkins) won't be necessary to propagate dependencies to the agents (only podman/docker needed) @@ -49,10 +49,10 @@ Running **OpenSpot-NG** from a container (podman/docker) is strongly recommended #### Working directory -In the working directory that will be mounted into the container, **OpenSpot-NG** will store all the cluster metadata including those needed to teardown the cluster once you'll be done. -Per each run **OpenSpot-NG** will create a folder named with the run timestamp, this folder name will be referred as *TEARDOWN_RUN_ID* and will be used to cleanup the cluster in teardown mode and to store all the logs and the infos related to the cluster deployment. +In the working directory that will be mounted into the container, **CRC-Cloud** will store all the cluster metadata including those needed to teardown the cluster once you'll be done. +Per each run **CRC-Cloud** will create a folder named with the run timestamp, this folder name will be referred as *TEARDOWN_RUN_ID* and will be used to cleanup the cluster in teardown mode and to store all the logs and the infos related to the cluster deployment. -Please **be careful** on deleting the working directory content because without the metadata **OpenSpot-NG** won't be able to teardown the cluster and associated resources from AWS. +Please **be careful** on deleting the working directory content because without the metadata **CRC-Cloud** won't be able to teardown the cluster and associated resources from AWS. **NOTE (podman only):** In order to make the mounted workdir read-write accessible from the container is need to change the SELinux security context related to the folder with the following command ```chcon -Rt svirt_sandbox_file_t ``` @@ -65,7 +65,7 @@ Please **be careful** on deleting the working directory content because without -e AWS_ACCESS_KEY_ID=\ -e AWS_SECRET_ACCESS_KEY=\ -e AWS_DEFAULT_REGION=\ - -ti quay.io/tsebastiani/openspot-ng + -ti quay.io/crcont/crc-cloud ``` #### Single node cluster teardown @@ -76,7 +76,7 @@ Please **be careful** on deleting the working directory content because without -e AWS_ACCESS_KEY_ID=\ -e AWS_SECRET_ACCESS_KEY=\ -e AWS_DEFAULT_REGION=us-west-2\ - -ti quay.io/tsebastiani/openspot-ng + -ti quay.io/crcont/crc-cloud ``` (check [here](#workdir) for **TEARDOWN_RUN_ID** infos and **WORKDIR** setup instructions ) @@ -119,7 +119,7 @@ Environment variables will be passed to the container from the command line invo ### Linux Bash (the hard path) #### Dependencies -To run **OpenSpot-NG** from your command line you must be on Linux, be sure to have installed and configured the following programs in your box +To run **CRC-Cloud** from your command line you must be on Linux, be sure to have installed and configured the following programs in your box - bash (>=v4) - AWS CLI @@ -136,19 +136,19 @@ To run **OpenSpot-NG** from your command line you must be on Linux, be sure to h #### Single node cluster creation Once copied and downloaded the pull secret somewhere in your filesystem you'll be able to run the cluster with -```./openspot-ng.sh -C -p ``` +```./crc-cloud.sh -C -p ``` A folder with the run id will be created under ```/workspace``` containing all the logs, the keypair needed to login into the VM, and the VM metadata. The last run will be also linked automatically to ```/latest```

-**WARNING:** if you delete the working directory **OpenSpot-NG** won't be able to teardown the cluster so be **extremely careful** with the workspace folder content. +**WARNING:** if you delete the working directory **CRC-Cloud** won't be able to teardown the cluster so be **extremely careful** with the workspace folder content.

at the end of the process the script will print the public address of the console. Below you'll find all the options available ``` -./openspot-ng.sh -C -p pull secret path [-d developer user password] [-k kubeadmin user password] [-r redhat user password] [-a AMI ID] [-t Instance type] +./crc-cloud.sh -C -p pull secret path [-d developer user password] [-k kubeadmin user password] [-r redhat user password] [-a AMI ID] [-t Instance type] where: -C Cluster Creation mode -p pull secret file path (download from https://console.redhat.com/openshift/create/local) @@ -161,11 +161,11 @@ where: ``` #### Single node cluster teardown To teardown the single node cluster the basic command is -```./openspot-ng.sh -T``` +```./crc-cloud.sh -T``` this will refer to the *latest* run found in ```/workspace```, if you have several run folders in your workspace, you can specify the one you want to teardown with the parameter ```-v ``` where `````` corresponds to the numeric folder name containing the metadata of the cluster that will be deleted ``` -./openspot-ng.sh -T [-v run id] +./crc-cloud.sh -T [-v run id] -T Cluster Teardown mode -v The Id of the run that is gonna be destroyed, corresponds with the numeric name of the folders created in workdir (optional, default: latest) -h show this help text diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..6b3126ce --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +v1.0 diff --git a/assets/openspot_ng_logo.png b/assets/crc-cloud.png similarity index 100% rename from assets/openspot_ng_logo.png rename to assets/crc-cloud.png diff --git a/openspot-ng.sh b/crc-cloud.sh similarity index 98% rename from openspot-ng.sh rename to crc-cloud.sh index 5b8ac41d..57fd56a8 100755 --- a/openspot-ng.sh +++ b/crc-cloud.sh @@ -50,8 +50,8 @@ prepare_cluster_setup() { create_ec2_resources() { pr_info "creating EC2 resources" - RESOURCES_NAME="openspot-ng-$RANDOM_SUFFIX" - GROUPID=`aws ec2 create-security-group --group-name $RESOURCES_NAME --description "openspot-ng security group run timestamp: $RUN_TIMESTAMP" --no-paginate | $JQ -r '.GroupId'` + RESOURCES_NAME="crc-cloud-$RANDOM_SUFFIX" + GROUPID=`aws ec2 create-security-group --group-name $RESOURCES_NAME --description "crc-cloud security group run timestamp: $RUN_TIMESTAMP" --no-paginate | $JQ -r '.GroupId'` stop_if_failed $? "failed to create EC2 security group" #KEYPAIR (Created just because mandatory, will be swapped manually fore core user later on) $AWS ec2 create-key-pair --key-name $RESOURCES_NAME --no-paginate @@ -218,7 +218,7 @@ set_workdir_dependent_variables() { usage() { echo "" - echo "*********** OpenSpot-NG ***********" + echo "*********** crc-cloud ***********" usage=" Cluster Creation : @@ -355,7 +355,7 @@ fi ##ENTRYPOINT: if everything is ok, run the script. -[[ $CONTAINER ]] && figlet -f smslant -c "OpenSpot-NG" && echo -e "\n\n" +[[ $CONTAINER ]] && figlet -f slant -c "CRC-Cloud `cat VERSION`" && echo -e "\n\n" if [[ $WORKING_MODE == "C" ]] then create