diff --git a/.github/workflows/acceptance_tests_common.yml b/.github/workflows/acceptance_tests_common.yml index f9d129c37046..f63b5c9d1621 100644 --- a/.github/workflows/acceptance_tests_common.yml +++ b/.github/workflows/acceptance_tests_common.yml @@ -11,7 +11,10 @@ on: env: UYUNI_PROJECT: uyuni-project UYUNI_VERSION: master + NO_AUTH_REGISTRY: no_auth_registry CUCUMBER_PUBLISH_TOKEN: ${{ secrets.CUCUMBER_PUBLISH_TOKEN }} + AUTH_REGISTRY: "auth_registry" + AUTH_REGISTRY_CREDENTIALS: "cucutest|cucutest" jobs: paths-filter-1: runs-on: ubuntu-latest @@ -67,8 +70,8 @@ jobs: run: ./testsuite/podman_runner/01_setup_tmp_dirs.sh - name: create-podman-network run: ./testsuite/podman_runner/02_setup_network.sh - - name: start_controller - run: ./testsuite/podman_runner/03_run_controller.sh + - name: start_controller_and_registry_and_buildhost + run: ./testsuite/podman_runner/03_run_controller_and_registry_and_buildhost.sh - name: create_ssh_conf run: ./testsuite/podman_runner/04_setup_ssh_controller.sh - name: install_gems_in_controller diff --git a/.github/workflows/acceptance_tests_secondary.yml b/.github/workflows/acceptance_tests_secondary.yml index 21c6b6dbc3a5..4ab01b82562d 100644 --- a/.github/workflows/acceptance_tests_secondary.yml +++ b/.github/workflows/acceptance_tests_secondary.yml @@ -12,6 +12,8 @@ concurrency: cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} jobs: acceptance-tests-secondary: + # Skip running scheduled jobs on forks + if: (github.repository == 'uyuni-project/uyuni' || github.event_name != 'schedule') uses: ./.github/workflows/acceptance_tests_common.yml with: secondary_tests: "18_run_secondary_tests.sh" diff --git a/.github/workflows/acceptance_tests_secondary_parallel.yml b/.github/workflows/acceptance_tests_secondary_parallel.yml index 6e5d003ca3a4..8657f72e56ad 100644 --- a/.github/workflows/acceptance_tests_secondary_parallel.yml +++ b/.github/workflows/acceptance_tests_secondary_parallel.yml @@ -12,6 +12,8 @@ concurrency: cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} jobs: acceptance-tests-secondary-parallel: + # Skip running scheduled jobs on forks + if: (github.repository == 'uyuni-project/uyuni' || github.event_name != 'schedule') uses: ./.github/workflows/acceptance_tests_common.yml strategy: fail-fast: false diff --git a/testsuite/features/github_validation/init_clients/buildhost_bootstrap.feature b/testsuite/features/github_validation/init_clients/buildhost_bootstrap.feature new file mode 100644 index 000000000000..2416a0ba0d7d --- /dev/null +++ b/testsuite/features/github_validation/init_clients/buildhost_bootstrap.feature @@ -0,0 +1,31 @@ +# Copyright (c) 2016-2023 SUSE LLC +# Licensed under the terms of the MIT license. + +@buildhost +Feature: Bootstrap a build host via the GUI + + Scenario: Log in as admin user + Given I am authorized for the "Admin" section + + Scenario: Check the new bootstrapped build host in System Overview page + When I follow the left menu "Salt > Keys" + And I accept "build_host" key in the Salt master + And I wait until I do not see "Loading..." text + Then I should see a "accepted" text + When I follow the left menu "Systems > System List > All" + # the build host entitlement adds some extra minutes to apply the salt high-state + And I wait at most 500 seconds until I see the name of "build_host", refreshing the page + Then the Salt master can reach "build_host" + + Scenario: Enable "Container Build Host" system type + Given I am on the Systems overview page of this "build_host" + When I follow "Properties" in the content area + And I check "Container Build Host" + And I check "OS Image Build Host" + And I click on "Update Properties" + + Scenario: Check that the build host is a build host + Given I am on the Systems overview page of this "build_host" + Then I should see a "[Container Build Host]" text + Then I should see a "[OS Image Build Host]" text + diff --git a/testsuite/features/secondary/buildhost_docker_auth_registry.feature b/testsuite/features/secondary/buildhost_docker_auth_registry.feature index 889c6f69b8aa..2e008815541a 100644 --- a/testsuite/features/secondary/buildhost_docker_auth_registry.feature +++ b/testsuite/features/secondary/buildhost_docker_auth_registry.feature @@ -20,7 +20,8 @@ Feature: Build image with authenticated registry And I enter URI, username and password for registry And I click on "create-btn" Then I wait until I see "registry" text - + + @scc_credentials Scenario: Create a profile for the authenticated image store as Docker admin When I follow the left menu "Images > Profiles" And I follow "Create" @@ -31,6 +32,7 @@ Feature: Build image with authenticated registry And I click on "create-btn" Then I wait until I see "auth_registry_profile" text + @scc_credentials Scenario: Build an image in the authenticated image store When I follow the left menu "Images > Build" And I select "auth_registry_profile" from "profileId" @@ -46,12 +48,14 @@ Feature: Build image with authenticated registry Then table row for "auth_registry_profile" should contain "1" And the list of packages of image "auth_registry_profile" with version "latest" is not empty + @scc_credentials Scenario: Cleanup: remove Docker profile for the authenticated image store When I follow the left menu "Images > Profiles" And I check the row with the "auth_registry_profile" text And I click on "Delete" And I click on the red confirmation button And I should see a "Image profile has been deleted." text + And I wait until no Salt job is running on "build_host" Scenario: Cleanup: remove authenticated image store When I follow the left menu "Images > Stores" @@ -63,5 +67,3 @@ Feature: Build image with authenticated registry Scenario: Cleanup: delete registry image When I delete the image "auth_registry_profile" with version "latest" via API calls - Scenario: Cleanup: Make sure no job is left running on buildhost - When I wait until no Salt job is running on "build_host" diff --git a/testsuite/features/secondary/buildhost_docker_build_image.feature b/testsuite/features/secondary/buildhost_docker_build_image.feature index b3f837370895..14d7d9545b01 100644 --- a/testsuite/features/secondary/buildhost_docker_build_image.feature +++ b/testsuite/features/secondary/buildhost_docker_build_image.feature @@ -17,6 +17,7 @@ @buildhost @scope_building_container_images @no_auth_registry +@skip_if_github_validation Feature: Build container images Scenario: Log in as org admin user @@ -38,6 +39,7 @@ Feature: Build container images And I enter "Docker/serverhost" relative to profiles as "path" And I click on "create-btn" +@scc_credentials Scenario: Create an image profile with activation key When I follow the left menu "Images > Profiles" And I follow "Create" @@ -47,6 +49,7 @@ Feature: Build container images And I enter "Docker" relative to profiles as "path" And I click on "create-btn" +@scc_credentials Scenario: Create a simple real image profile with activation key When I follow the left menu "Images > Profiles" And I follow "Create" @@ -56,6 +59,7 @@ Feature: Build container images And I enter "Docker/serverhost" relative to profiles as "path" And I click on "create-btn" +@scc_credentials Scenario: Build the suse_key image with and without activation key Given I am on the Systems overview page of this "build_host" When I schedule the build of image "suse_key" via API calls @@ -75,6 +79,7 @@ Feature: Build container images And I wait at most 300 seconds until image "suse_simple" with version "latest" is inspected successfully via API Then the list of packages of image "suse_simple" with version "latest" is not empty +@scc_credentials Scenario: Build the suse_real_key image with and without activation key Given I am on the Systems overview page of this "build_host" When I schedule the build of image "suse_real_key" via API calls @@ -85,6 +90,7 @@ Feature: Build container images Then the list of packages of image "suse_real_key" with version "latest" is not empty When I wait until no Salt job is running on "build_host" +@scc_credentials Scenario: Build suse_key images with different versions When I schedule the build of image "suse_key" with version "Latest_key-activation1" via API calls And I wait at most 600 seconds until image "suse_key" with version "Latest_key-activation1" is built successfully via API @@ -99,11 +105,15 @@ Feature: Build container images Then the list of packages of image "suse_simple" with version "Latest_simple" is not empty When I wait until no Salt job is running on "build_host" - Scenario: Delete image via API calls +@scc_credentials + Scenario: Delete image via API calls with key When I delete the image "suse_key" with version "Latest_key-activation1" via API calls - And I delete the image "suse_simple" with version "Latest_simple" via API calls Then the image "suse_simple" with version "Latest_key-activation1" doesn't exist via API calls - And the image "suse_simple" with version "Latest_simple" doesn't exist via API calls + When I wait until no Salt job is running on "build_host" + + Scenario: Delete image via API calls without key + When I delete the image "suse_simple" with version "Latest_simple" via API calls + Then the image "suse_simple" with version "Latest_simple" doesn't exist via API calls When I wait until no Salt job is running on "build_host" Scenario: Rebuild suse_simple image @@ -113,6 +123,7 @@ Feature: Build container images Then the list of packages of image "suse_simple" with version "Latest_simple" is not empty When I wait until no Salt job is running on "build_host" +@scc_credentials Scenario: Rebuild suse_key image When I schedule the build of image "suse_key" with version "Latest_key-activation1" via API calls And I wait at most 600 seconds until image "suse_key" with version "Latest_key-activation1" is built successfully via API @@ -120,6 +131,7 @@ Feature: Build container images Then the list of packages of image "suse_key" with version "Latest_key-activation1" is not empty When I wait until no Salt job is running on "build_host" +@scc_credentials Scenario: Build an image via the GUI When I follow the left menu "Images > Build" And I select "suse_real_key" from "profileId" @@ -131,6 +143,7 @@ Feature: Build container images And I wait at most 600 seconds until image "suse_real_key" with version "GUI_BUILT_IMAGE" is built successfully via API And I wait at most 300 seconds until image "suse_real_key" with version "GUI_BUILT_IMAGE" is inspected successfully via API +@scc_credentials Scenario: Login as Docker image administrator and build an image Given I am authorized as "docker" with password "docker" When I follow the left menu "Images > Build" @@ -143,20 +156,23 @@ Feature: Build container images And I wait at most 600 seconds until image "suse_real_key" with version "GUI_DOCKERADMIN" is built successfully via API And I wait at most 300 seconds until image "suse_real_key" with version "GUI_DOCKERADMIN" is inspected successfully via API - Scenario: Cleanup: delete all images +@scc_credentials + Scenario: Cleanup: delete all images with key Given I am authorized as "admin" with password "admin" When I delete the image "suse_key" with version "latest" via API calls And I delete the image "suse_key" with version "Latest_key-activation1" via API calls - And I delete the image "suse_simple" with version "latest" via API calls - And I delete the image "suse_simple" with version "Latest_simple" via API calls And I delete the image "suse_real_key" with version "latest" via API calls And I delete the image "suse_real_key" with version "GUI_BUILT_IMAGE" via API calls And I delete the image "suse_real_key" with version "GUI_DOCKERADMIN" via API calls - Scenario: Cleanup: delete all profiles + Scenario: Cleanup: delete images without key + Given I am authorized as "admin" with password "admin" + When I delete the image "suse_simple" with version "latest" via API calls + And I delete the image "suse_simple" with version "Latest_simple" via API calls + +@scc_credentials + Scenario: Cleanup: delete all profiles with key When I follow the left menu "Images > Profiles" - And I check "suse_simple" in the list - And I check "suse_real_simple" in the list And I check "suse_key" in the list And I check "suse_real_key" in the list And I click on "Delete" @@ -164,5 +180,14 @@ Feature: Build container images And I click on the red confirmation button And I wait until I see "Image profiles have been deleted" text + Scenario: Cleanup: delete all profiles without key + When I follow the left menu "Images > Profiles" + And I check "suse_simple" in the list + And I check "suse_real_simple" in the list + And I click on "Delete" + And I should see a "Are you sure you want to delete selected profiles?" text + And I click on the red confirmation button + And I wait until I see "Image profiles have been deleted" text + Scenario: Cleanup: Make sure no job is left running on buildhost When I wait until no Salt job is running on "build_host" diff --git a/testsuite/features/secondary/buildhost_osimage_build_image.feature b/testsuite/features/secondary/buildhost_osimage_build_image.feature index 162d65ac1493..ccfba30c14f6 100644 --- a/testsuite/features/secondary/buildhost_osimage_build_image.feature +++ b/testsuite/features/secondary/buildhost_osimage_build_image.feature @@ -15,11 +15,11 @@ # - features/secondary/srv_docker_cve_audit.feature # If the image is not created, the message shown is "There are no entries to show." -@skip_if_github_validation @skip_if_cloud @buildhost @scope_retail @scope_building_container_images +@scc_credentials Feature: Build OS images Scenario: Log in as org admin user diff --git a/testsuite/features/secondary/min_ansible_control_node.feature b/testsuite/features/secondary/min_ansible_control_node.feature index 4c764a56cb4f..acdcff600749 100644 --- a/testsuite/features/secondary/min_ansible_control_node.feature +++ b/testsuite/features/secondary/min_ansible_control_node.feature @@ -1,7 +1,6 @@ # Copyright (c) 2021-2024 SUSE LLC # Licensed under the terms of the MIT license. -@skip_if_github_validation @scope_ansible Feature: Operate an Ansible control node in a normal minion @@ -10,13 +9,14 @@ Feature: Operate an Ansible control node in a normal minion Scenario: Pre-requisite: Deploy test playbooks and inventory file When I deploy testing playbooks and inventory files to "sle_minion" - +@skip_if_github_validation @susemanager Scenario: Pre-requisite: Enable client tools repositories When I enable the repositories "tools_update_repo tools_pool_repo" on this "sle_minion" And I refresh the metadata for "sle_minion" -# TODO: Check why tools_update_repo is not available on the openSUSE minion + # TODO: Check why tools_update_repo is not available on the openSUSE minion +@skip_if_github_validation @uyuni Scenario: Pre-requisite: Enable client tools repositories When I enable the repositories "tools_pool_repo os_pool_repo" on this "sle_minion" @@ -90,12 +90,14 @@ Feature: Operate an Ansible control node in a normal minion And I remove package "orion-dummy" from this "sle_minion" without error control And I remove "/tmp/file.txt" from "sle_minion" +@skip_if_github_validation @susemanager Scenario: Cleanup: Disable client tools repositories Given I am on the Systems overview page of this "sle_minion" When I disable the repositories "tools_update_repo tools_pool_repo" on this "sle_minion" And I refresh the metadata for "sle_minion" +@skip_if_github_validation @uyuni Scenario: Cleanup: Disable client tools repositories Given I am on the Systems overview page of this "sle_minion" diff --git a/testsuite/features/secondary/min_deblike_monitoring.feature b/testsuite/features/secondary/min_deblike_monitoring.feature index fde588686c73..41f56c056c3f 100644 --- a/testsuite/features/secondary/min_deblike_monitoring.feature +++ b/testsuite/features/secondary/min_deblike_monitoring.feature @@ -4,7 +4,6 @@ # - features/secondary/srv_monitoring.feature: as this feature disables/re-enables monitoring capabilities # - sumaform: as it is configuring monitoring to be enabled after deployment -@skip_if_github_validation @scope_monitoring @scope_res @deblike_minion @@ -13,6 +12,7 @@ Feature: Monitor SUMA environment with Prometheus on a Debian-like Salt minion As an authorized user I want to enable Prometheus exporters +@skip_if_github_validation Scenario: Pre-requisite: enable Prometheus exporters repository on the Debian-like minion When I enable the necessary repositories before installing Prometheus exporters on this "deblike_minion" @@ -39,18 +39,22 @@ Feature: Monitor SUMA environment with Prometheus on a Debian-like Salt minion And I click on "Save" Then I should see a "Formula saved" text +@skip_if_github_validation Scenario: Apply highstate for Prometheus exporters on the Debian-like minion When I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed +@skip_if_github_validation + Scenario: Wait for services + When I wait until "node" exporter service is active on "deblike_minion" + And I wait until "apache" exporter service is active on "deblike_minion" + And I wait until "postgres" exporter service is active on "deblike_minion" + Scenario: Visit monitoring endpoints on the Debian-like minion - When I wait until "node" exporter service is active on "deblike_minion" And I visit "Prometheus node exporter" endpoint of this "deblike_minion" - And I wait until "apache" exporter service is active on "deblike_minion" And I visit "Prometheus apache exporter" endpoint of this "deblike_minion" - And I wait until "postgres" exporter service is active on "deblike_minion" And I visit "Prometheus postgres exporter" endpoint of this "deblike_minion" Scenario: Cleanup: undo Prometheus exporter formulas on the Debian-like minion @@ -59,11 +63,13 @@ Feature: Monitor SUMA environment with Prometheus on a Debian-like Salt minion And I click on "Save" Then I wait until I see "Formula saved" text +@skip_if_github_validation Scenario: Cleanup: apply highstate after test monitoring on the Debian-like minion When I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed +@skip_if_github_validation Scenario: Cleanup: disable Prometheus exporters repository on the Debian-like minion When I disable the necessary repositories before installing Prometheus exporters on this "deblike_minion" without error control diff --git a/testsuite/features/secondary/min_docker_api.feature b/testsuite/features/secondary/min_docker_api.feature index 01405072e044..e0f2ac7fe048 100644 --- a/testsuite/features/secondary/min_docker_api.feature +++ b/testsuite/features/secondary/min_docker_api.feature @@ -13,9 +13,9 @@ # - features/secondary/buildhost_docker_build_image.feature # - features/secondary/buildhost_docker_auth_registry.feature -@skip_if_github_validation @skip_if_cloud @scope_building_container_images +@no_auth_registry Feature: API "image" namespace for containers and sub-namespaces Scenario: Test "image.store" namespace @@ -23,12 +23,14 @@ Feature: API "image" namespace for containers and sub-namespaces And I list image store types and image stores via API And I set and get details of image store via API + @scc_credentials Scenario: Test "image.profiles" namespace When I create and delete profiles via API And I create and delete profile custom values via API And I list image profiles via API And I set and get profile details via API + @scc_credentials Scenario: Cleanup: remove custom system info Given I am authorized When I follow the left menu "Systems > Custom System Info" diff --git a/testsuite/features/secondary/min_monitoring.feature b/testsuite/features/secondary/min_monitoring.feature index c62cad461eba..e4c7514a45d6 100644 --- a/testsuite/features/secondary/min_monitoring.feature +++ b/testsuite/features/secondary/min_monitoring.feature @@ -4,7 +4,6 @@ # - features/secondary/srv_monitoring.feature : As this feature disable/re-enable monitoring capabilities # - sumaform : As it is configuring monitoring to be enabled after deployment -@skip_if_github_validation @sle_minion @scope_monitoring Feature: Monitor SUMA environment with Prometheus on a SLE Salt minion @@ -12,6 +11,7 @@ Feature: Monitor SUMA environment with Prometheus on a SLE Salt minion As an authorized user I want to enable Prometheus exporters +@skip_if_github_validation Scenario: Pre-requisite: enable Prometheus exporters repository on the minion When I enable the necessary repositories before installing Prometheus exporters on this "sle_minion" And I refresh the metadata for "sle_minion" @@ -49,20 +49,24 @@ Feature: Monitor SUMA environment with Prometheus on a SLE Salt minion And I click on "Save" Then I should see a "Formula saved" text +@skip_if_github_validation Scenario: Apply highstate for Prometheus exporters When I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed - Scenario: Visit monitoring endpoints on the minion +@skip_if_github_validation + Scenario: Wait for services When I wait until "prometheus" service is active on "sle_minion" - And I visit "Prometheus" endpoint of this "sle_minion" And I wait until "node" exporter service is active on "sle_minion" - And I visit "Prometheus node exporter" endpoint of this "sle_minion" And I wait until "apache" exporter service is active on "sle_minion" - And I visit "Prometheus apache exporter" endpoint of this "sle_minion" And I wait until "postgres" exporter service is active on "sle_minion" + + Scenario: Visit monitoring endpoints on the minion + When I visit "Prometheus" endpoint of this "sle_minion" + And I visit "Prometheus node exporter" endpoint of this "sle_minion" + And I visit "Prometheus apache exporter" endpoint of this "sle_minion" And I visit "Prometheus postgres exporter" endpoint of this "sle_minion" Scenario: Cleanup: undo Prometheus and Prometheus exporter formulas @@ -72,11 +76,13 @@ Feature: Monitor SUMA environment with Prometheus on a SLE Salt minion And I click on "Save" Then I wait until I see "Formula saved" text +@skip_if_github_validation Scenario: Cleanup: apply highstate after test monitoring And I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed +@skip_if_github_validation Scenario: Cleanup: disable Prometheus exporters repository When I disable the necessary repositories before installing Prometheus exporters on this "sle_minion" without error control diff --git a/testsuite/features/secondary/min_rhlike_monitoring.feature b/testsuite/features/secondary/min_rhlike_monitoring.feature index c0271942c243..a61a6ebd6f88 100644 --- a/testsuite/features/secondary/min_rhlike_monitoring.feature +++ b/testsuite/features/secondary/min_rhlike_monitoring.feature @@ -4,7 +4,6 @@ # - features/secondary/srv_monitoring.feature: as this feature disables/re-enables monitoring capabilities # - sumaform: as it is configuring monitoring to be enabled after deployment -@skip_if_github_validation @scope_monitoring @scope_res @rhlike_minion @@ -13,6 +12,7 @@ Feature: Monitor SUMA environment with Prometheus on a Red Hat-like Salt minion As an authorized user I want to enable Prometheus exporters +@skip_if_github_validation Scenario: Pre-requisite: enable Prometheus exporters repository on the Red Hat-like minion When I enable the necessary repositories before installing Prometheus exporters on this "rhlike_minion" @@ -39,18 +39,22 @@ Feature: Monitor SUMA environment with Prometheus on a Red Hat-like Salt minion And I click on "Save" Then I should see a "Formula saved" text +@skip_if_github_validation Scenario: Apply highstate for Prometheus exporters on the Red Hat-like minion When I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed - Scenario: Visit monitoring endpoints on the Red Hat-like minion +@skip_if_github_validation + Scenario: Wait for service When I wait until "node" exporter service is active on "rhlike_minion" - And I visit "Prometheus node exporter" endpoint of this "rhlike_minion" And I wait until "apache" exporter service is active on "rhlike_minion" - And I visit "Prometheus apache exporter" endpoint of this "rhlike_minion" And I wait until "postgres" exporter service is active on "rhlike_minion" + + Scenario: Visit monitoring endpoints on the Red Hat-like minion + When I visit "Prometheus node exporter" endpoint of this "rhlike_minion" + And I visit "Prometheus apache exporter" endpoint of this "rhlike_minion" And I visit "Prometheus postgres exporter" endpoint of this "rhlike_minion" Scenario: Cleanup: undo Prometheus exporter formulas on the Red Hat-like minion @@ -59,11 +63,13 @@ Feature: Monitor SUMA environment with Prometheus on a Red Hat-like Salt minion And I click on "Save" Then I wait until I see "Formula saved" text +@skip_if_github_validation Scenario: Cleanup: apply highstate after test monitoring on the Red Hat-like minion When I follow "States" in the content area And I click on "Apply Highstate" Then I should see a "Applying the highstate has been scheduled." text And I wait until event "Apply highstate scheduled" is completed +@skip_if_github_validation Scenario: Cleanup: disable Prometheus exporters repository on the Red Hat-like minion When I disable the necessary repositories before installing Prometheus exporters on this "rhlike_minion" without error control diff --git a/testsuite/features/secondary/srv_dist_channel_mapping.feature b/testsuite/features/secondary/srv_dist_channel_mapping.feature index 5b1698081d14..b3eaa3d9a34d 100644 --- a/testsuite/features/secondary/srv_dist_channel_mapping.feature +++ b/testsuite/features/secondary/srv_dist_channel_mapping.feature @@ -1,7 +1,6 @@ # Copyright (c) 2022-2024 SUSE LLC # Licensed under the terms of the MIT license. -@skip_if_github_validation Feature: Distribution Channel Mapping Scenario: Log in as org admin user @@ -30,6 +29,7 @@ Feature: Distribution Channel Mapping And I click on "Create Mapping" Then I should see a "SUSE Linux Enterprise Server 15 SP 4" link in the content area +@scc_credentials @uyuni Scenario: Create new map for x86_64 openSUSE clients When I follow the left menu "Software > Distribution Channel Mapping" @@ -42,6 +42,7 @@ Feature: Distribution Channel Mapping And I click on "Create Mapping" Then I should see a "openSUSE Leap 15.5" link in the content area +@deblike_minion Scenario: Create new map for x86_64 Ubuntu clients with test base channel When I follow the left menu "Software > Distribution Channel Mapping" And I follow "Create Distribution Channel Mapping" @@ -80,6 +81,7 @@ Feature: Distribution Channel Mapping Then I should see the text "SUSE Linux Enterprise Server 15 SP 4 modified" in the Operating System field And I should see the text "sle-product-sles15-sp4-pool-x86_64" in the Channel Label field +@scc_credentials @uyuni Scenario: Update map for x86_64 openSUSE clients using test-x86_64 channel When I follow the left menu "Software > Distribution Channel Mapping" @@ -94,6 +96,7 @@ Feature: Distribution Channel Mapping Then I should see the text "openSUSE Leap 15.5 modified" in the Operating System field And I should see the text "opensuse_leap15_5-x86_64" in the Channel Label field +@deblike_minion Scenario: Update map for x86_64 Ubuntu clients using test base channel When I follow the left menu "Software > Distribution Channel Mapping" Then I should see the text "Ubuntu 22.04.01 LTS" in the Operating System field @@ -133,6 +136,7 @@ Feature: Distribution Channel Mapping When I click on "Delete Mapping" Then I should not see a "SUSE Linux Enterprise Server 15 SP 4 modified" link +@scc_credentials @uyuni Scenario: Cleanup: delete the map created for x68_64 openSUSE clients When I follow the left menu "Software > Distribution Channel Mapping" @@ -146,6 +150,7 @@ Feature: Distribution Channel Mapping When I click on "Delete Mapping" Then I should not see a "openSUSE Leap 15.5 modified" link +@deblike_minion Scenario: Cleanup: delete the map created for x68_64 Ubuntu clients When I follow the left menu "Software > Distribution Channel Mapping" Then I should see the text "Ubuntu 22.04.01 LTS modified" in the Operating System field diff --git a/testsuite/features/secondary/srv_docker_advanced_content_management.feature b/testsuite/features/secondary/srv_docker_advanced_content_management.feature index 0def316998d9..1160b0cabd36 100644 --- a/testsuite/features/secondary/srv_docker_advanced_content_management.feature +++ b/testsuite/features/secondary/srv_docker_advanced_content_management.feature @@ -14,7 +14,7 @@ Feature: Advanced content management And I enter "docker_admin" as "label" And I enter the URI of the registry as "uri" And I click on "create-btn" - + @scc_credentials Scenario: Create a profile as Docker admin When I follow the left menu "Images > Profiles" And I follow "Create" @@ -43,6 +43,7 @@ Feature: Advanced content management Scenario: Log in as docker user Given I am authorized as "docker" with password "docker" + @scc_credentials Scenario: Cleanup: remove Docker profile Given I am authorized as "docker" with password "docker" When I follow the left menu "Images > Profiles" diff --git a/testsuite/features/secondary/srv_docker_cve_audit.feature b/testsuite/features/secondary/srv_docker_cve_audit.feature index 1909d0e01a44..8749a8a03846 100644 --- a/testsuite/features/secondary/srv_docker_cve_audit.feature +++ b/testsuite/features/secondary/srv_docker_cve_audit.feature @@ -20,6 +20,7 @@ Feature: CVE audit for content management Then I should see a "bunch was scheduled" text And I wait until the table contains "FINISHED" or "SKIPPED" followed by "FINISHED" in its first rows + @scc_credentials Scenario: Audit images, searching for a known CVE number When I follow the left menu "Audit > CVE Audit" And I select "1999" from "cveIdentifierYear" diff --git a/testsuite/features/secondary/srv_manage_activationkey.feature b/testsuite/features/secondary/srv_manage_activationkey.feature index bce5aabf6825..fa0ed999877c 100644 --- a/testsuite/features/secondary/srv_manage_activationkey.feature +++ b/testsuite/features/secondary/srv_manage_activationkey.feature @@ -1,7 +1,6 @@ # Copyright (c) 2010-2024 SUSE LLC # Licensed under the terms of the MIT license. -@skip_if_github_validation Feature: Manipulate activation keys In order to register systems to the spacewalk server As the testing user @@ -82,6 +81,7 @@ Feature: Manipulate activation keys And I click on "Update Activation Key" Then I should see a "Activation key SUSE Test PKG Key x86_64 has been modified." text +@scc_credentials @uyuni Scenario: Create an activation key with a channel and a package list for x86_64 When I follow the left menu "Systems > Activation Keys" diff --git a/testsuite/features/secondary/srv_monitoring.feature b/testsuite/features/secondary/srv_monitoring.feature index feb710af5d33..a85729e09e64 100644 --- a/testsuite/features/secondary/srv_monitoring.feature +++ b/testsuite/features/secondary/srv_monitoring.feature @@ -20,8 +20,8 @@ # If this feature fails, # it could let the monitoring feature disabled for the Debian-like minion -@skip_if_github_validation @skip_if_containerized_server +@skip_if_github_validation @scope_monitoring Feature: Disable and re-enable monitoring of the server diff --git a/testsuite/features/secondary/srv_notifications.feature b/testsuite/features/secondary/srv_notifications.feature index 879df653ab8a..40e8fcace8be 100644 --- a/testsuite/features/secondary/srv_notifications.feature +++ b/testsuite/features/secondary/srv_notifications.feature @@ -2,7 +2,6 @@ # Licensed under the terms of the MIT license. @scope_visualization -@skip_if_github_validation Feature: Test the notification/notification-messages feature Scenario: Log in as org admin user diff --git a/testsuite/podman_runner/03_run_controller.sh b/testsuite/podman_runner/03_run_controller.sh deleted file mode 100755 index f470904e15b5..000000000000 --- a/testsuite/podman_runner/03_run_controller.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -ex -src_dir=$(cd $(dirname "$0")/../.. && pwd -P) - -sudo -i podman run --rm -d --network network -v /tmp/testing:/tmp --name controller -h controller -v ${src_dir}/testsuite:/testsuite ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-controller-dev:qe-develop - -sudo podman ps diff --git a/testsuite/podman_runner/03_run_controller_and_registry_and_buildhost.sh b/testsuite/podman_runner/03_run_controller_and_registry_and_buildhost.sh new file mode 100755 index 000000000000..581c556eb2e9 --- /dev/null +++ b/testsuite/podman_runner/03_run_controller_and_registry_and_buildhost.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex +src_dir=$(cd $(dirname "$0")/../.. && pwd -P) + +echo buildhostproductuuid > /tmp/buildhost_product_uuid + +AUTH_REGISTRY_USER=$(echo "$AUTH_REGISTRY_CREDENTIALS"| cut -d\| -f1) +AUTH_REGISTRY_PASSWD=$(echo "$AUTH_REGISTRY_CREDENTIALS" | cut -d\| -f2) +sudo -i podman run --rm -d --network network -v /tmp/testing:/tmp --name controller -h controller -v ${src_dir}/testsuite:/testsuite ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-controller-dev:$UYUNI_VERSION +sudo -i podman run --rm -d --network network --name $AUTH_REGISTRY -h $AUTH_REGISTRY -e AUTH_REGISTRY=${AUTH_REGISTRY} -e AUTH_REGISTRY_USER=${AUTH_REGISTRY_USER} -e AUTH_REGISTRY_PASSWD={$AUTH_REGISTRY_USER} ghcr.io/$UYUNI_PROJECT/uyuni/ci-container-registry-auth:$UYUNI_VERSION +sudo -i podman run --privileged --rm -d --network network -v ${src_dir}/testsuite:/testsuite -v /tmp/buildhost_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --volume /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro -v /var/run/docker.sock:/var/run/docker.sock --name buildhost -h buildhost ghcr.io/$UYUNI_PROJECT/uyuni/ci-buildhost:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-SUSE-KEY-x86_64" +sudo -i podman exec -d buildhost dockerd +sudo podman ps diff --git a/testsuite/podman_runner/07_start_server.sh b/testsuite/podman_runner/07_start_server.sh index 3273d83f8e1c..648ebcd410c9 100755 --- a/testsuite/podman_runner/07_start_server.sh +++ b/testsuite/podman_runner/07_start_server.sh @@ -24,9 +24,11 @@ sudo -i podman run --cap-add AUDIT_CONTROL --rm \ -h server \ -p 8443:443 \ -p 8080:80 \ + -p 9090:9090 \ -p 4505:4505 \ -p 4506:4506 \ -d --name=server \ --network network \ ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-server-all-in-one-dev:$UYUNI_VERSION +sudo -i podman exec -d server prometheus diff --git a/testsuite/podman_runner/11_setup_sshd.sh b/testsuite/podman_runner/11_setup_sshd.sh index c730a0afd7d8..cbda85c61f7d 100755 --- a/testsuite/podman_runner/11_setup_sshd.sh +++ b/testsuite/podman_runner/11_setup_sshd.sh @@ -6,4 +6,6 @@ sudo -i podman exec server bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh sudo -i podman exec opensusessh bash -c "echo 'root:linux' | chpasswd && echo 123456789 > /etc/machine-id" sudo -i podman exec opensusessh bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" sudo -i podman exec controller bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" +sudo -i podman exec buildhost bash -c "ssh-keygen -A && /usr/sbin/sshd -e" +sudo -i podman exec buildhost bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" diff --git a/testsuite/podman_runner/12_run_core_tests.sh b/testsuite/podman_runner/12_run_core_tests.sh index 4d169e30318b..196c7bb1c97c 100755 --- a/testsuite/podman_runner/12_run_core_tests.sh +++ b/testsuite/podman_runner/12_run_core_tests.sh @@ -1,3 +1,3 @@ #!/bin/bash set -xe -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && cd /testsuite && rake cucumber:github_validation_core" +sudo -i podman exec controller bash -c "export BUILD_HOST=buildhost && export AUTH_REGISTRY=${AUTH_REGISTRY} && export AUTH_REGISTRY_CREDENTIALS=\"${AUTH_REGISTRY_CREDENTIALS}\" && export NO_AUTH_REGISTRY=${NO_AUTH_REGISTRY} && export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export DEBLIKE_MINION=deblike_minion && cd /testsuite && rake cucumber:github_validation_core" diff --git a/testsuite/podman_runner/13_run_salt_sle_minion.sh b/testsuite/podman_runner/13_run_salt_sle_minion.sh index 2b4e348b3f02..8d3db4a3241d 100755 --- a/testsuite/podman_runner/13_run_salt_sle_minion.sh +++ b/testsuite/podman_runner/13_run_salt_sle_minion.sh @@ -4,6 +4,12 @@ src_dir=$(cd $(dirname "$0")/../.. && pwd -P) echo opensuseminionproductuuid > /tmp/opensuse_product_uuid -sudo -i podman run --privileged --rm -d --network network -v /tmp/opensuse_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --volume /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro --name sle_minion -h sle_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-opensuse-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-SUSE-KEY-x86_64" +sudo -i podman run --privileged --rm -d --network network -v /tmp/opensuse_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --volume /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro -p 9091:9090 --name sle_minion -h sle_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-opensuse-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-SUSE-KEY-x86_64" sudo -i podman exec sle_minion bash -c "ssh-keygen -A && /usr/sbin/sshd -e" sudo -i podman exec sle_minion bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" +sudo -i podman exec -d sle_minion prometheus +sudo -i podman exec -d sle_minion node_exporter +sudo -i podman exec -d sle_minion prometheus-apache_exporter +sudo -i podman exec -d -e DATA_SOURCE_NAME="postgresql://user:passwd@localhost:5432/database?sslmode=disable" sle_minion prometheus-postgres_exporter +sudo -i podman exec -d sle_minion bash -c "exporter_exporter -config.file /etc/exporter_exporter.yaml -config.dirs /etc/exporter_exporter.d" + diff --git a/testsuite/podman_runner/14_run_salt_rhlike_minion.sh b/testsuite/podman_runner/14_run_salt_rhlike_minion.sh index afc56be7c267..1d0a4e87a5ed 100755 --- a/testsuite/podman_runner/14_run_salt_rhlike_minion.sh +++ b/testsuite/podman_runner/14_run_salt_rhlike_minion.sh @@ -4,8 +4,12 @@ src_dir=$(cd $(dirname "$0")/../.. && pwd -P) echo rhminionproductuuid > /tmp/rh_product_uuid -sudo -i podman run --privileged --rm -d --network network -v /tmp/rh_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --name rhlike_minion -h rhlike_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-rocky-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-RH-LIKE-KEY" +sudo -i podman run --privileged --rm -d --network network -p 9092:9090 -v /tmp/rh_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --name rhlike_minion -h rhlike_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-rocky-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-RH-LIKE-KEY" # sleep 10 sudo -i podman exec rhlike_minion bash -c "ssh-keygen -A && /usr/sbin/sshd -e" sudo -i podman exec rhlike_minion bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" +# sudo -i podman exec -d rhlike_minion prometheus +sudo -i podman exec -d rhlike_minion node_exporter +sudo -i podman exec -d rhlike_minion prometheus-apache_exporter +sudo -i podman exec -d -e DATA_SOURCE_NAME="postgresql://user:passwd@localhost:5432/database?sslmode=disable" rhlike_minion prometheus-postgres_exporter diff --git a/testsuite/podman_runner/15_run_salt_deblike_minion.sh b/testsuite/podman_runner/15_run_salt_deblike_minion.sh index 652d58c38fdf..9c415b224f1d 100755 --- a/testsuite/podman_runner/15_run_salt_deblike_minion.sh +++ b/testsuite/podman_runner/15_run_salt_deblike_minion.sh @@ -4,6 +4,10 @@ src_dir=$(cd $(dirname "$0")/../.. && pwd -P) echo ubuntuminionproductuuid > /tmp/ubuntu_product_uuid -sudo -i podman run --privileged --rm -d --network network -v /tmp/ubuntu_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --name deblike_minion -h deblike_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-ubuntu-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-DEBLIKE-KEY" +sudo -i podman run --privileged --rm -d --network network -p 9093:9090 -v /tmp/ubuntu_product_uuid:/sys/class/dmi/id/product_uuid -v /tmp/testing:/tmp -v ${src_dir}/testsuite/podman_runner/salt-minion-entry-point.sh:/salt-minion-entry-point.sh --name deblike_minion -h deblike_minion ghcr.io/$UYUNI_PROJECT/uyuni/ci-test-ubuntu-minion:$UYUNI_VERSION bash -c "/salt-minion-entry-point.sh server 1-DEBLIKE-KEY" sudo -i podman exec deblike_minion bash -c "ssh-keygen -A && /usr/sbin/sshd -e" sudo -i podman exec deblike_minion bash -c "if [ ! -d /root/.ssh ];then mkdir /root/.ssh/;chmod 700 /root/.ssh;fi;cp /tmp/authorized_keys /root/.ssh/" +sudo -i podman exec -d deblike_minion prometheus-node-exporter +sudo -i podman exec -d deblike_minion prometheus-apache-exporter +sudo -i podman exec -d -e DATA_SOURCE_NAME="postgresql://user:passwd@localhost:5432/database?sslmode=disable" deblike_minion prometheus-postgres-exporter +sudo -i podman exec -d deblike_minion bash -c "prometheus-exporter-exporter -config.file /etc/exporter_exporter.yaml -config.dirs /etc/exporter_exporter.d" diff --git a/testsuite/podman_runner/17_run_init_clients_tests.sh b/testsuite/podman_runner/17_run_init_clients_tests.sh index 7e43aa437b99..12872227163b 100755 --- a/testsuite/podman_runner/17_run_init_clients_tests.sh +++ b/testsuite/podman_runner/17_run_init_clients_tests.sh @@ -1,3 +1,3 @@ #!/bin/bash set -xe -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && cd /testsuite && rake cucumber:github_validation_init_clients" +sudo -i podman exec controller bash -c "export BUILD_HOST=buildhost && export AUTH_REGISTRY=${AUTH_REGISTRY} && export AUTH_REGISTRY_CREDENTIALS=\"${AUTH_REGISTRY_CREDENTIALS}\" && export NO_AUTH_REGISTRY=${NO_AUTH_REGISTRY} && export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export DEBLIKE_MINION=deblike_minion && cd /testsuite && rake cucumber:github_validation_init_clients" diff --git a/testsuite/podman_runner/18_run_secondary_tests.sh b/testsuite/podman_runner/18_run_secondary_tests.sh index f9345c2c2c00..9ec604b3bc76 100755 --- a/testsuite/podman_runner/18_run_secondary_tests.sh +++ b/testsuite/podman_runner/18_run_secondary_tests.sh @@ -1,3 +1,3 @@ #!/bin/bash set -xe -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export TAGS=\"\\\"not @flaky\\\"\" && cd /testsuite && rake cucumber:secondary" +sudo -i podman exec controller bash -c "export BUILD_HOST=buildhost && export AUTH_REGISTRY=${AUTH_REGISTRY} && export AUTH_REGISTRY_CREDENTIALS=\"${AUTH_REGISTRY_CREDENTIALS}\" && export NO_AUTH_REGISTRY=${NO_AUTH_REGISTRY} && export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export TAGS=\"\\\"not @flaky\\\"\" && export DEBLIKE_MINION=deblike_minion && cd /testsuite && rake cucumber:secondary" diff --git a/testsuite/podman_runner/19_run_secondary_parallelizable_tests.sh b/testsuite/podman_runner/19_run_secondary_parallelizable_tests.sh index 40c8f0878f2b..a1d07c724507 100755 --- a/testsuite/podman_runner/19_run_secondary_parallelizable_tests.sh +++ b/testsuite/podman_runner/19_run_secondary_parallelizable_tests.sh @@ -1,4 +1,4 @@ #!/bin/bash set -xe sudo -i podman exec controller bash -c "zypper ref && zypper -n install expect" -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export TAGS=\"\\\"not @flaky\\\"\" && cd /testsuite && rake cucumber:secondary_parallelizable" +sudo -i podman exec controller bash -c "export BUILD_HOST=buildhost && export AUTH_REGISTRY=${AUTH_REGISTRY} && export AUTH_REGISTRY_CREDENTIALS=\"${AUTH_REGISTRY_CREDENTIALS}\" && export NO_AUTH_REGISTRY=${NO_AUTH_REGISTRY} && export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export TAGS=\"\\\"not @flaky\\\"\" && export DEBLIKE_MINION=deblike_minion && cd /testsuite && rake cucumber:secondary_parallelizable" diff --git a/testsuite/podman_runner/19_run_secondary_parallelizable_tests_subset.sh b/testsuite/podman_runner/19_run_secondary_parallelizable_tests_subset.sh index 7d40572bd5a5..9e2b620007e7 100755 --- a/testsuite/podman_runner/19_run_secondary_parallelizable_tests_subset.sh +++ b/testsuite/podman_runner/19_run_secondary_parallelizable_tests_subset.sh @@ -7,4 +7,4 @@ then exit 1 fi -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && cd /testsuite && export TAGS=\"\\\"not @flaky\\\"\" && rake cucumber:secondary_parallelizable_${1}" +sudo -i podman exec controller bash -c "export BUILD_HOST=buildhost && export AUTH_REGISTRY=${AUTH_REGISTRY} && export AUTH_REGISTRY_CREDENTIALS=\"${AUTH_REGISTRY_CREDENTIALS}\" && export NO_AUTH_REGISTRY=${NO_AUTH_REGISTRY} && export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export DEBLIKE_MINION=deblike_minion && cd /testsuite && export TAGS=\"\\\"not @flaky\\\"\" && rake cucumber:secondary_parallelizable_${1}" diff --git a/testsuite/podman_runner/21_get_client_logs.sh b/testsuite/podman_runner/21_get_client_logs.sh index 8e13410ed137..bc0c75061e8c 100755 --- a/testsuite/podman_runner/21_get_client_logs.sh +++ b/testsuite/podman_runner/21_get_client_logs.sh @@ -1,3 +1,3 @@ #!/bin/bash set -xe -sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && cd /testsuite && cucumber features/finishing/allcli_debug.feature" +sudo -i podman exec controller bash -c "export CUCUMBER_PUBLISH_TOKEN=${CUCUMBER_PUBLISH_TOKEN} && export PROVIDER=podman && export SERVER=server && export HOSTNAME=controller && export SSH_MINION=opensusessh && export MINION=sle_minion && export RHLIKE_MINION=rhlike_minion && export DEBLIKE_MINION=deblike_minion && export BUILD_HOST=buildhost && cd /testsuite && cucumber features/finishing/allcli_debug.feature" diff --git a/testsuite/podman_runner/run b/testsuite/podman_runner/run index 47cc6dd67240..d536b077f7ec 100755 --- a/testsuite/podman_runner/run +++ b/testsuite/podman_runner/run @@ -2,6 +2,9 @@ export UYUNI_PROJECT=uyuni-project export UYUNI_VERSION=master +export NO_AUTH_REGISTRY=no_auth_registry +export AUTH_REGISTRY_CREDENTIALS="cucutest|cucutest" +export AUTH_REGISTRY=auth_registry set -x set -e @@ -9,7 +12,7 @@ set -e ./00_setup_env.sh ./01_setup_tmp_dirs.sh ./02_setup_network.sh -./03_run_controller.sh +./03_run_controller_and_registry_and_buildhost.sh ./04_setup_ssh_controller.sh ./05_install_gems_in_controller.sh ./06_collect_and_tag_flaky_tests_in_controller.sh diff --git a/testsuite/run_sets/github_validation/github_validation_init_clients.yml b/testsuite/run_sets/github_validation/github_validation_init_clients.yml index 05bc1a02920c..1adcadb61a41 100644 --- a/testsuite/run_sets/github_validation/github_validation_init_clients.yml +++ b/testsuite/run_sets/github_validation/github_validation_init_clients.yml @@ -6,4 +6,5 @@ - features/github_validation/init_clients/sle_minion.feature - features/github_validation/init_clients/min_rhlike_salt.feature - features/github_validation/init_clients/min_deblike_salt.feature +- features/github_validation/init_clients/buildhost_bootstrap.feature ## Container features END ###