Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add ocp install log, delete cluster #10703

Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 2 additions & 15 deletions ocs_ci/deployment/assisted_installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
"""

from copy import deepcopy
from datetime import datetime
import json
import logging
import os
Expand All @@ -15,6 +14,7 @@
SameNameClusterAlreadyExistsException,
)
from ocs_ci.utility import assisted_installer as ai
from ocs_ci.utility.deployment import create_openshift_install_log_file
from ocs_ci.utility.utils import download_file, TimeoutSampler
from ocs_ci.utility.retry import retry

Expand Down Expand Up @@ -435,21 +435,8 @@ def create_openshift_install_log_file(self):
Create .openshift_install.log file containing URL to OpenShift console.
It is used by our CI jobs to show the console URL in build description.
"""
# Create metadata file to store the cluster name
installer_log_file = os.path.join(self.cluster_path, ".openshift_install.log")
formatted_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
cluster_address = self.api.get_cluster_admin_credentials(self.id)["console_url"]
logger.info(f"Cluster URL: {cluster_address}")
with open(installer_log_file, "a") as fd:
fd.writelines(
[
"W/A for our CI to get URL to the cluster in jenkins job. "
"Cluster is deployed via Assisted Installer API!\n"
f'time="{formatted_time}" level=info msg="Access the OpenShift web-console here: '
f"{cluster_address}\"\n'",
]
)
logger.info("Created .openshift_install.log file")
create_openshift_install_log_file(self.cluster_path, cluster_address)

def create_kubeconfig_file(self):
"""
Expand Down
13 changes: 13 additions & 0 deletions ocs_ci/deployment/helpers/rosa_cluster_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ def __init__(self, cluster, env_prefix):
self.kubeadmin_password_path = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
self.username_path = os.path.join(
config.ENV_DATA["cluster_path"], "auth", "admin-user"
)

# Create "auth" folder if it doesn't exist.
abs_path = os.path.expanduser(self.kubeconfig_path)
Expand Down Expand Up @@ -130,6 +133,16 @@ def create_admin_and_login(self):
logger.info("It may take up to a minute for the account to become active")
time.sleep(10)
self.wait_for_cluster_admin_login_successful()
self.create_username_file()

def create_username_file(self):
"""
Creates a file with the username for the cluster.

"""

with open(self.username_path, "w+") as fd:
fd.write(config.ENV_DATA[self.username_key])

def get_admin_password(self):
"""
Expand Down
43 changes: 36 additions & 7 deletions ocs_ci/deployment/rosa.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,16 @@
)
from ocs_ci.deployment.ocp import OCPDeployment as BaseOCPDeployment
from ocs_ci.framework import config
from ocs_ci.framework.logger_helper import log_step
from ocs_ci.ocs.resources.pod import get_operator_pods
from ocs_ci.utility import openshift_dedicated as ocm, rosa
from ocs_ci.utility.aws import AWS as AWSUtil
from ocs_ci.utility.aws import AWS as AWSUtil, delete_sts_iam_roles
from ocs_ci.utility.deployment import create_openshift_install_log_file
from ocs_ci.utility.rosa import (
get_associated_oidc_config_id,
delete_account_roles,
wait_console_url,
)
from ocs_ci.utility.utils import (
ceph_health_check,
get_ocp_version,
Expand Down Expand Up @@ -95,15 +102,18 @@ def deploy(self, log_level=""):

logger.info("generate kubeconfig and kubeadmin-password files")
if config.ENV_DATA["ms_env_type"] == "staging":
cluster_path = config.ENV_DATA["cluster_path"]
kubeconfig_path = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"]
cluster_path, config.RUN["kubeconfig_location"]
)
ocm.get_kubeconfig(self.cluster_name, kubeconfig_path)
# this default admin password from secret doesn't work for ROSA HCP staging in the management-console
# but kubeconfig works for CLI operations, creating kubeadmin-password file for CLI operations via rosa cli
rosa_stage_cluster = ROSAStageEnvCluster(self.cluster_name)
rosa_stage_cluster.create_admin_and_login()
rosa_stage_cluster.generate_kubeadmin_password_file()
console_url = wait_console_url(self.cluster_name)
create_openshift_install_log_file(cluster_path, console_url)
if config.ENV_DATA["ms_env_type"] == "production":
if config.ENV_DATA.get("appliance_mode"):
logger.info(
Expand All @@ -128,12 +138,15 @@ def destroy(self, log_level="DEBUG"):

"""
try:
cluster_details = ocm.get_cluster_details(self.cluster_name)
cluster_id = cluster_details.get("id")
rosa_hcp = config.ENV_DATA.get("platform") == constants.ROSA_HCP_PLATFORM
oidc_config_id = (
get_associated_oidc_config_id(self.cluster_name) if rosa_hcp else None
)
log_step(f"Destroying ROSA cluster. Hosted CP: {rosa_hcp}")
delete_status = rosa.destroy_appliance_mode_cluster(self.cluster_name)
if not delete_status:
ocm.destroy_cluster(self.cluster_name)
logger.info("Waiting for ROSA cluster to be uninstalled")
log_step("Waiting for ROSA cluster to be uninstalled")
sample = TimeoutSampler(
timeout=14400,
sleep=300,
Expand All @@ -144,8 +157,24 @@ def destroy(self, log_level="DEBUG"):
err_msg = f"Failed to delete {self.cluster_name}"
logger.error(err_msg)
raise TimeoutExpiredError(err_msg)
rosa.delete_operator_roles(cluster_id)
rosa.delete_oidc_provider(cluster_id)
log_step("Deleting ROSA/aws associated resources")
oproles_prefix = (
f"{constants.OPERATOR_ROLE_PREFIX_ROSA_HCP}-{self.cluster_name}"
)
rosa.delete_operator_roles(prefix=oproles_prefix)
if rosa_hcp:
if oidc_config_id:
rosa.delete_oidc_config(oidc_config_id)
# use sts IAM roles for ROSA HCP is mandatory
delete_sts_iam_roles()
rosa.delete_oidc_provider(self.cluster_name)
account_roles_prefix = (
f"{constants.ACCOUNT_ROLE_PREFIX_ROSA_HCP}-{self.cluster_name}"
)
delete_account_roles(account_roles_prefix)
logger.info(
f"Cluster {self.cluster_name} and associated resources deleted successfully"
)
except CommandFailed as err:
if "There are no subscriptions or clusters with identifier or name" in str(
err
Expand Down
4 changes: 4 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2291,6 +2291,10 @@
# aws tags
AWS_CLOUDFORMATION_TAG = "aws:cloudformation:stack-name"

# aws prefixes
ACCOUNT_ROLE_PREFIX_ROSA_HCP = "accroleshcp"
OPERATOR_ROLE_PREFIX_ROSA_HCP = "oproleshcp"

# aws volume constants
AWS_VOL_PVC_NAME_TAG = "kubernetes.io/created-for/pvc/name"
AWS_VOL_PV_NAME_TAG = "kubernetes.io/created-for/pv/name"
Expand Down
28 changes: 28 additions & 0 deletions ocs_ci/utility/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import os
import re
import tempfile
from datetime import datetime

import yaml

import requests
Expand Down Expand Up @@ -228,3 +230,29 @@ def workaround_mark_disks_as_ssd():
logger.info("Workaround already applied.")
else:
raise err


def create_openshift_install_log_file(cluster_path, console_url):
"""
Workaround.
Create .openshift_install.log file containing URL to OpenShift console.
It is used by our CI jobs to show the console URL in build description.

Args:
cluster_path (str): The path to the cluster directory.
console_url (str): The address of the OpenShift cluster management-console
"""
installer_log_file = os.path.join(cluster_path, ".openshift_install.log")
formatted_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")

logger.info(f"Cluster URL: {console_url}")
with open(installer_log_file, "a") as fd:
fd.writelines(
[
"W/A for our CI to get URL to the cluster in jenkins job. "
"Cluster is deployed via Assisted Installer API!\n"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be worth to update this line based on the deployment type (or make it more generic), as this is not true for all the cases.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I did not change this line, I only pushed create_openshift_install_log_file function to common space from ocs_ci/deployment/assisted_installer.py
as far as I understand it is a mock-up for our log-parser on Jenkins pipelines.

TBH, correcting code(mocked-up log message) for our custom-built pipelines that are not validating this log message other than retrieving the console should have a very low priority. Until it can be important for something else.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the Jenkins job, the important line is only the last one ... Access the OpenShift web-console here ....

The think is, that initially it was used only for the Assisted Installer deployment, where it make sense the message, but since you are moving it to the common space and re-using it for the ROSA deployment also, it will be worth to update the message to avoid confusion.
I think it is not necessary to make it dynamic based on the actual deployment type, but just more generic. What about something like this?

"Cluster is deployed via some kind of managed deployment (Assisted Installer API or ROSA). OpenShift Installer (IPI or UPI deployment) were not used!\n"

f'time="{formatted_time}" level=info msg="Access the OpenShift web-console here: '
f"{console_url}\"\n'",
]
)
logger.info("Created '.openshift_install.log' file")
Loading
Loading