Skip to content

Commit

Permalink
Merge branch 'master' into deploy_hosted_cluster
Browse files Browse the repository at this point in the history
  • Loading branch information
dahorak authored May 27, 2024
2 parents f30ee1c + c5cb2ba commit 88023e0
Show file tree
Hide file tree
Showing 63 changed files with 1,466 additions and 309 deletions.
2 changes: 1 addition & 1 deletion conf/examples/acm_hub_unreleased_image.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
---
ENV_DATA:
acm_unreleased_image: "2.10.0-DOWNSTREAM-2024-02-28-06-06-55"
acm_unreleased_image: "2.11.0-DOWNSTREAM-2024-05-09-04-23-50"
2 changes: 1 addition & 1 deletion conf/examples/submariner_unreleased_image.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
---
ENV_DATA:
submariner_unreleased_image: "680159"
submariner_unreleased_image: "722673"
14 changes: 14 additions & 0 deletions conf/ocsci/dr_workload.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,18 @@ ENV_DATA:
{ name: "busybox-2", workload_dir: "rdr/busybox/cephfs/app-busybox-2", pod_count: 20, pvc_count: 4 },
]

dr_cnv_workload_appset: [
{ name: "vm-appset-1", workload_dir: "rdr/cnv-workload/appset/vm-appset-1",
dr_workload_app_placement_name: "vm-appset-1-placement", vm_name: "vm-workload-1",
vm_secret: "vm-secret-1", vm_username: "cirros",
dr_workload_app_pvc_selector: { 'appname': 'kubevirt' }, pod_count: 1, pvc_count: 1
},
]
dr_cnv_workload_sub: [
{ name: "vm-sub-1", workload_dir: "rdr/cnv-workload/subscription/vm-sub-1",
dr_workload_app_placement_name: "vm-sub-1-placement", vm_name: "vm-workload-1",
vm_secret: "vm-secret-1", vm_username: "cirros",
dr_workload_app_pvc_selector: { 'appname': 'kubevirt' }, pod_count: 1, pvc_count: 1
},
]
# dr_policy_name: PLACEHOLDER
1 change: 1 addition & 0 deletions conf/ocsci/submariner_downstream.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
ENV_DATA:
submariner_source: "downstream"
subctl_version: "subctl-rhel9:v0.17"
50 changes: 50 additions & 0 deletions ocs_ci/deployment/acm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,17 @@
import tempfile
import shutil
import requests
import subprocess

import semantic_version
import platform

from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import (
CommandFailed,
DRPrimaryNotFoundException,
UnsupportedPlatformError,
)
from ocs_ci.utility import templating
from ocs_ci.ocs.utils import get_non_acm_cluster_config
Expand Down Expand Up @@ -157,6 +160,53 @@ def download_binary(self):
os.path.expanduser("~/.local/bin/subctl"),
os.path.join(config.RUN["bin_dir"], "subctl"),
)
elif self.source == "downstream":
self.download_downstream_binary()

def download_downstream_binary(self):
"""
Download downstream subctl binary
Raises:
UnsupportedPlatformError : If current platform has no supported subctl binary
"""

subctl_ver = config.ENV_DATA["subctl_version"]
version_str = subctl_ver.split(":")[1]
pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
processor = platform.processor()
arch = platform.machine()
if arch == "x86_64" and processor == "x86_64":
binary_pltfrm = "amd64"
elif arch == "arm64" and processor == "arm":
binary_pltfrm = "arm64"
else:
raise UnsupportedPlatformError(
"Not a supported architecture for subctl binary"
)
cmd = (
f"oc image extract --filter-by-os linux/{binary_pltfrm} --registry-config "
f"{pull_secret_path} {constants.SUBCTL_DOWNSTREAM_URL}{subctl_ver} "
f'--path="/dist/subctl-{version_str}*-linux-{binary_pltfrm}.tar.xz":/tmp --confirm'
)
run_cmd(cmd)
decompress = (
f"tar -C /tmp/ -xf /tmp/subctl-{version_str}*-linux-{binary_pltfrm}.tar.xz"
)
p = subprocess.run(decompress, stdout=subprocess.PIPE, shell=True)
if p.returncode:
logger.error("Failed to untar subctl")
raise CommandFailed
else:
logger.info(p.stdout)
target_dir = config.RUN["bin_dir"]
install_cmd = (
f"install -m744 /tmp/subctl-{version_str}*/subctl-{version_str}*-linux-{binary_pltfrm} "
f"{target_dir} "
)
run_cmd(install_cmd, shell=True)
run_cmd(f"mv {target_dir}/subctl-* {target_dir}/subctl", shell=True)
os.environ["PATH"] = os.environ["PATH"] + ":" + os.path.abspath(f"{target_dir}")

def submariner_configure_upstream(self):
"""
Expand Down
5 changes: 3 additions & 2 deletions ocs_ci/deployment/assisted_installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,13 @@ def load_existing_cluster_configuration(self):
self.cpu_architecture = cl_config["cpu_architecture"]
self.high_availability_mode = cl_config["high_availability_mode"]
self.image_type = infra_config["type"]
self.openshift_cluster_id = cl_config["openshift_cluster_id"]
self.openshift_cluster_id = cl_config.get("openshift_cluster_id")
# load records with: 'kind': 'AddHostsCluster'
self.add_hosts_clusters = [
cl["id"]
for cl in self.api.get_clusters()
if cl["openshift_cluster_id"] == self.openshift_cluster_id
if self.openshift_cluster_id
and cl.get("openshift_cluster_id") == self.openshift_cluster_id
and cl["kind"] == "AddHostsCluster"
]
logger.debug(f"AddHostsClusters: {', '.join(self.add_hosts_clusters)}")
Expand Down
20 changes: 14 additions & 6 deletions ocs_ci/deployment/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def deploy_prereq(self):
"OCP_INSTALL_DIR": os.path.join(self.upi_script_path, "install-dir"),
"DISABLE_MASTER_MACHINESET": "yes",
"DISABLE_WORKER_MACHINESET": "yes",
"INSTALLER_BIN": "openshift-install",
"INSTALLER_BIN": f"{self.installer_filename}",
"num_workers_additional": str(
config.ENV_DATA["num_workers_additional"]
),
Expand Down Expand Up @@ -377,16 +377,22 @@ def deploy_prereq(self):
# script dir
bindir = os.path.abspath(os.path.expanduser(config.RUN["bin_dir"]))
shutil.copy2(
os.path.join(bindir, "openshift-install"),
self.upi_script_path,
os.path.join(bindir, f"{self.installer_filename}"),
f"{self.upi_script_path}/openshift-install",
)
shutil.copy2(
os.path.join(bindir, f"{self.installer_filename}"),
f"{self.upi_script_path}/{self.installer_filename}",
)
shutil.copy2(os.path.join(bindir, "oc"), self.upi_script_path)
# and another UGLY WORKAROUND: copy openshift-install also to the
# absolute_cluster_path (for more details, see
# https://github.com/red-hat-storage/ocs-ci/pull/4650)
shutil.copy2(
os.path.join(bindir, "openshift-install"),
os.path.abspath(os.path.join(self.cluster_path, "..")),
os.path.join(bindir, f"{self.installer_filename}"),
os.path.abspath(
os.path.join(self.cluster_path, f"../{self.installer_filename}")
),
)

def deploy(self, log_cli_level="DEBUG"):
Expand Down Expand Up @@ -453,7 +459,9 @@ def deploy(self, log_cli_level="DEBUG"):
# Delete openshift-install copied to cluster_dir (see WORKAROUND at
# the end of deploy_prereq method of this class)
delete_file(
os.path.abspath(os.path.join(self.cluster_path, "../openshift-install"))
os.path.abspath(
os.path.join(self.cluster_path, f"../{self.installer_filename}")
)
)

def deploy_ocp(self, log_cli_level="DEBUG"):
Expand Down
10 changes: 3 additions & 7 deletions ocs_ci/deployment/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ def deploy_gitops_operator(self, switch_ctx=None):
switch_ctx (int): The cluster index by the cluster name
"""
config.switch_ctx(switch_ctx) if switch_ctx else config.switch_acm_ctx()

logger.info("Creating GitOps Operator Subscription")
gitops_subscription_yaml_data = templating.load_yaml(
Expand Down Expand Up @@ -284,16 +285,11 @@ def do_gitops_deploy(self):
if config.multicluster:
# Gitops operator is needed on all clusters for appset type workload deployment using pull model
for cluster_index in range(config.nclusters):
config.switch_ctx(cluster_index)
self.deploy_gitops_operator()
self.deploy_gitops_operator(switch_ctx=cluster_index)

# Switching back context to ACM as below configs are specific to hub cluster
config.switch_acm_ctx()

acm_indexes = get_all_acm_indexes()
for acm_ctx in acm_indexes:
self.deploy_gitops_operator(switch_ctx=acm_ctx)
config.switch_ctx(get_active_acm_index())

logger.info("Creating GitOps CLuster Resource")
run_cmd(f"oc create -f {constants.GITOPS_CLUSTER_YAML}")

Expand Down
9 changes: 8 additions & 1 deletion ocs_ci/deployment/ocp.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.openshift_ops import OCP
from ocs_ci.utility import utils, templating, system
from ocs_ci.utility import utils, templating, system, version
from ocs_ci.utility.deployment import get_ocp_release_image
from ocs_ci.deployment.disconnected import mirror_ocp_release_images
from ocs_ci.utility.utils import create_directory_path, exec_cmd
Expand Down Expand Up @@ -69,6 +69,13 @@ def __init__(self):
self.installer = self.download_installer()
self.cluster_path = config.ENV_DATA["cluster_path"]
self.cluster_name = config.ENV_DATA["cluster_name"]
if (
config.ENV_DATA.get("fips")
and version.get_semantic_ocp_version_from_config() >= version.VERSION_4_16
):
self.installer_filename = "openshift-install-fips"
else:
self.installer_filename = "openshift-install"

def download_installer(self):
"""
Expand Down
6 changes: 3 additions & 3 deletions ocs_ci/framework/conf/ocp_version/ocp-4.16-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ DEPLOYMENT:
ENV_DATA:
# TODO: replace with 4.16 once template is available
vm_template: "rhcos-416.94.202403071059-0-vmware.x86_64"
acm_hub_channel: release-2.10
acm_version: "2.10"
submariner_version: "0.17.0"
acm_hub_channel: release-2.11
acm_version: "2.11"
submariner_version: "0.18.0"
16 changes: 16 additions & 0 deletions ocs_ci/framework/conf/ocp_version/ocp-4.17-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
# Config file for nightly OCP 4.17
RUN:
client_version: "4.17.0-0.nightly"
DEPLOYMENT:
installer_version: "4.17.0-0.nightly"
terraform_version: "1.0.11"
# ignition_version can be found here
# https://docs.openshift.com/container-platform/4.7/post_installation_configuration/machine-configuration-tasks.html#machine-config-overview-post-install-machine-configuration-tasks
ignition_version: "3.2.0"
ENV_DATA:
# TODO: replace with 4.17 once template is available
vm_template: "rhcos-416.94.202403071059-0-vmware.x86_64"
acm_hub_channel: release-2.11
acm_version: "2.11"
submariner_version: "0.17.0"
6 changes: 6 additions & 0 deletions ocs_ci/framework/conf/ocp_version/ocp-4.17-upgrade.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
# Use this config for upgrading of OCP 4.16 to 4.17 cluster
UPGRADE:
ocp_upgrade_version: "4.17.0-0.nightly"
ocp_upgrade_path: "registry.ci.openshift.org/ocp/release"
ocp_channel: "stable-4.17"
11 changes: 0 additions & 11 deletions ocs_ci/framework/conf/ocs_version/ocs-4.10.yaml

This file was deleted.

11 changes: 0 additions & 11 deletions ocs_ci/framework/conf/ocs_version/ocs-4.11.yaml

This file was deleted.

12 changes: 12 additions & 0 deletions ocs_ci/framework/conf/ocs_version/ocs-4.17.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
---
DEPLOYMENT:
default_ocs_registry_image: "quay.io/rhceph-dev/ocs-registry:latest-4.17"
# TODO: Change to lastest-stable-4.17 once we have stable build
default_latest_tag: 'latest-4.17'
ocs_csv_channel: "stable-4.17"
default_ocp_version: '4.17'
ENV_DATA:
ocs_version: '4.17'
REPORTING:
default_ocs_must_gather_latest_tag: 'latest-4.17'
default_ocs_must_gather_image: "quay.io/rhceph-dev/ocs-must-gather"
3 changes: 1 addition & 2 deletions ocs_ci/framework/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,13 +129,12 @@ def process_ocsci_conf(arguments):
action="store",
choices=[
"4.99",
"4.10",
"4.11",
"4.12",
"4.13",
"4.14",
"4.15",
"4.16",
"4.17",
],
)
parser.add_argument("--ocs-registry-image")
Expand Down
18 changes: 18 additions & 0 deletions ocs_ci/framework/pytest_customization/marks.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,13 @@
HCI_CLIENT,
MS_CONSUMER_TYPE,
HCI_PROVIDER,
BAREMETAL_PLATFORMS,
)
from ocs_ci.utility import version
from ocs_ci.utility.aws import update_config_from_s3
from ocs_ci.utility.utils import load_auth_config


# tier marks

tier1 = pytest.mark.tier1(value=1)
Expand Down Expand Up @@ -524,6 +526,10 @@
reason="This test doesn't work correctly on OCP cluster deployed via Flexy",
)

skipif_noobaa_external_pgsql = pytest.mark.skipif(
config.ENV_DATA.get("noobaa_external_pgsql") is True,
reason="This test will not run correctly in external DB deployed cluster.",
)
metrics_for_external_mode_required = pytest.mark.skipif(
version.get_semantic_ocs_version_from_config() < version.VERSION_4_6
and config.DEPLOYMENT.get("external_mode") is True,
Expand Down Expand Up @@ -621,3 +627,15 @@ def get_current_test_marks():
"""
return current_test_marks


baremetal_deployment_required = pytest.mark.skipif(
(config.ENV_DATA["platform"].lower() not in BAREMETAL_PLATFORMS)
or (not vsphere_platform_required),
reason="Test required baremetal or vsphere deployment.",
)

ui_deployment_required = pytest.mark.skipif(
not config.DEPLOYMENT.get("ui_deployment"),
reason="UI Deployment required to run the test.",
)
30 changes: 30 additions & 0 deletions ocs_ci/helpers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4725,3 +4725,33 @@ def flatten_multilevel_dict(d):
else:
leaves_list.append(value)
return leaves_list


def is_rbd_default_storage_class(custom_sc=None):
"""
Check if RDB is a default storageclass for the cluster
Args:
custom_sc: custom storageclass name.
Returns:
bool : True if RBD is set as the Default storage class for the cluster, False otherwise.
"""
default_rbd_sc = (
constants.DEFAULT_STORAGECLASS_RBD if custom_sc is None else custom_sc
)
cmd = (
f"oc get storageclass {default_rbd_sc} -o=jsonpath='{{.metadata.annotations}}' "
)
try:
check_annotations = json.loads(run_cmd(cmd))
except json.decoder.JSONDecodeError:
logger.error("Error to get annotation value from storageclass.")
return False

if check_annotations.get("storageclass.kubernetes.io/is-default-class") == "true":
logger.info(f"Storageclass {default_rbd_sc} is a default RBD StorageClass.")
return True

logger.error("Storageclass {default_rbd_sc} is not a default RBD StorageClass.")
return False
Loading

0 comments on commit 88023e0

Please sign in to comment.