From 0b0f58e481b96c6e4f38d362410f9542b94a8939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 13 Feb 2023 23:16:31 +0100 Subject: [PATCH 01/80] Skip swap creation when we have overlayfs --- susemanager/bin/mgr-setup | 3 +++ 1 file changed, 3 insertions(+) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 3f4f6384d349..cc69a06dd4bb 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -129,8 +129,11 @@ if [ $SWAP -eq 0 ]; then echo "Not enough space on /. Not adding swap space. Good luck..." else FSTYPE=`df -T / | tail -1 | awk '{print $2}'` + # Ignore for overlay too if [ $FSTYPE == "btrfs" ]; then echo "Will *NOT* create swapfile on btrfs. Make sure you have enough RAM!" + elif [ $FSTYPE == "overlay" ]; then + echo "Will *NOT* create swapfile in a container!" else if [ -f /SWAPFILE ]; then swapoff /SWAPFILE From ffc52d0189a1acfb7cc8a0b128ad5dad140d3a54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 16 Feb 2023 11:14:19 +0100 Subject: [PATCH 02/80] Setup: skip btrfs checks in containers --- susemanager/bin/mgr-setup | 75 +++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index cc69a06dd4bb..de70bacd0ecd 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -319,47 +319,52 @@ check_mksubvolume() { } check_btrfs_dirs() { -DIR="/var/spacewalk" -if [ ! -d $DIR ]; then - FSTYPE=`df -T \`dirname $DIR\` | tail -1 | awk '{print $2}'` - echo -n "Filesystem type for $DIR is $FSTYPE - " - if [ $FSTYPE == "btrfs" ]; then - check_mksubvolume - echo "creating nCoW subvolume." - mksubvolume --nocow $DIR +ROOT_FSTYPE=`df -T / | tail -1 | awk '{print $2}'` +if [ $ROOT_FSTYPE == "overlay" ]; then + echo "Skipping btrfs check in containers" +else + DIR="/var/spacewalk" + if [ ! -d $DIR ]; then + FSTYPE=`df -T \`dirname $DIR\` | tail -1 | awk '{print $2}'` + echo -n "Filesystem type for $DIR is $FSTYPE - " + if [ $FSTYPE == "btrfs" ]; then + check_mksubvolume + echo "creating nCoW subvolume." + mksubvolume --nocow $DIR + else + echo "ok." + fi else - echo "ok." + echo "$DIR already exists. Leaving it untouched." fi -else - echo "$DIR already exists. Leaving it untouched." -fi -DIR="/var/cache" -if [ ! -d $DIR ]; then - mkdir $DIR -fi -FSTYPE=`df -T $DIR | tail -1 | awk '{print $2}'` -echo -n "Filesystem type for $DIR is $FSTYPE - " -if [ $FSTYPE == "btrfs" ]; then - TESTDIR=`basename $DIR` - btrfs subvolume list /var | grep "$TESTDIR" > /dev/null - if [ ! $? -eq 0 ]; then - check_mksubvolume - echo "creating subvolume." - mv $DIR ${DIR}.sav - mksubvolume $DIR - touch ${DIR}.sav/foobar.dummy - if [ ! -d $DIR ]; then - mkdir $DIR + DIR="/var/cache" + if [ ! -d $DIR ]; then + mkdir $DIR + fi + FSTYPE=`df -T $DIR | tail -1 | awk '{print $2}'` + echo -n "Filesystem type for $DIR is $FSTYPE - " + if [ $FSTYPE == "btrfs" ]; then + TESTDIR=`basename $DIR` + btrfs subvolume list /var | grep "$TESTDIR" > /dev/null + if [ ! $? -eq 0 ]; then + check_mksubvolume + echo "creating subvolume." + mv $DIR ${DIR}.sav + mksubvolume $DIR + touch ${DIR}.sav/foobar.dummy + if [ ! -d $DIR ]; then + mkdir $DIR + fi + mv ${DIR}.sav/* $DIR + rmdir ${DIR}.sav + rm -f $DIR/foobar.dummy + else + echo "subvolume for $DIR already exists. Fine." fi - mv ${DIR}.sav/* $DIR - rmdir ${DIR}.sav - rm -f $DIR/foobar.dummy else - echo "subvolume for $DIR already exists. Fine." + echo "ok." fi -else - echo "ok." fi } From f74caada3b0d65733c792df560ceda3c8ec82061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 15 Feb 2023 21:37:23 +0100 Subject: [PATCH 03/80] Print the command that is running in Setup.pm to ease debugging --- spacewalk/setup/lib/Spacewalk/Setup.pm | 1 + 1 file changed, 1 insertion(+) diff --git a/spacewalk/setup/lib/Spacewalk/Setup.pm b/spacewalk/setup/lib/Spacewalk/Setup.pm index 7ff72ddbe492..c76ee783042a 100644 --- a/spacewalk/setup/lib/Spacewalk/Setup.pm +++ b/spacewalk/setup/lib/Spacewalk/Setup.pm @@ -672,6 +672,7 @@ sub print_progress { err_code => 1, system_opts => 1, }); + print "Running " . join(" ", @{$params{system_opts}}) . "\n"; local *LOGFILE; open(LOGFILE, ">>", $params{log_file_name}) or do { From 8c8133a6329b1e6753ff706ec95c319a2b438130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 15 Feb 2023 21:33:02 +0100 Subject: [PATCH 04/80] Reportdb setup: only use fqdn when nothing provided in --host In the container world the hostname is private to the cluster and could even contain random parts. This change avoids confusion between the FQDN and HOST variables and offers a clean code path for containers. --- .../bin/uyuni-setup-reportdb | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb index 801d7727c6d4..98e87c57cf9b 100755 --- a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb +++ b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb @@ -112,7 +112,6 @@ if [ -x /usr/bin/lsof ]; then LSOF="/usr/bin/lsof" fi RUNUSER=runuser -FQDN=$(hostname -f) SSL_CERT=/etc/pki/tls/certs/spacewalk.crt SSL_KEY=/etc/pki/tls/private/pg-spacewalk.key CA_CERT=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT @@ -126,7 +125,7 @@ create() { if $LOCAL ; then ADDRESS="127.0.0.1" REMOTE="127.0.0.1/32,::1/128" - FQDN="localhost" + HOST="localhost" else [ ! -s "$SSL_CERT" ] && { echo "SSL Certificate ($SSL_CERT) is required to setup the reporting database" >&2 @@ -254,7 +253,7 @@ report_db_backend = postgresql report_db_user = $PGUSER report_db_password = $PGPASSWORD report_db_name = $PGNAME -report_db_host = $FQDN +report_db_host = $HOST report_db_port = $PORT report_db_ssl_enabled = 1 report_db_sslrootcert = $CA_CERT @@ -264,11 +263,7 @@ EOF rhn_reconfig "report_db_user" "$PGUSER" rhn_reconfig "report_db_password" "$PGPASSWORD" rhn_reconfig "report_db_name" "$PGNAME" - if [ $EXTERNALDB = "0" ] ; then - rhn_reconfig "report_db_host" "$FQDN" - else - rhn_reconfig "report_db_host" "$HOST" - fi + rhn_reconfig "report_db_host" "$HOST" rhn_reconfig "report_db_port" "$PORT" if ! $LOCAL ; then rhn_reconfig "report_db_ssl_enabled" "1" @@ -617,12 +612,14 @@ while true ; do shift done -case $HOST in - "localhost"|$(hostname -s)|$(hostname -f)|"") - EXTERNALDB=0 ;; - *) - EXTERNALDB=1 ;; -esac +EXTERNALDB=0 +if [ -n "$EXTERNALDB_ADMIN_USER" ]; then + EXTERNALDB=1 +fi + +if [ -z "$HOST" ]; then + HOST=$(hostname -f) +fi case $1 in create) create From cae29e64d9c9b46a4b87af40e18715678e8d6a32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 15 Feb 2023 21:35:18 +0100 Subject: [PATCH 05/80] reportdb setup: use pg_isactive to wait for postgres Checking the socket doesnt work in the container world, using the tool coming with postgres is much cleaner and works in both cases. --- spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb index 98e87c57cf9b..bd59cac6fe79 100755 --- a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb +++ b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb @@ -284,8 +284,7 @@ EOF if $LSOF /proc > /dev/null ; then while [ -f "$PG_PIDFILE" ] ; do # wait for postmaster to be ready - $LSOF -t -p $(head -1 "$PG_PIDFILE" 2>/dev/null) -a "$PG_SOCKET" > /dev/null \ - && break + pg_isready -q -U $(grep -oP '^db_user ?= ?\K.*' $RHN_CONF) && break sleep 1 done fi From c1f48803f049556dfe45cc6b78a2fe215277df8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 27 Feb 2023 08:49:29 +0100 Subject: [PATCH 06/80] mgr-setup: read hostname from input file When running the setup in a container running on kubernetes we don't have control on the hostname the user knows about. Rather that relying on getting it from the system, allow reading it from the setup input file. --- susemanager/bin/mgr-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index de70bacd0ecd..c292e828e234 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -445,6 +445,7 @@ report-db-user=$REPORT_DB_USER report-db-password=$REPORT_DB_PASS enable-tftp=$MANAGER_ENABLE_TFTP product_name=$PRODUCT_NAME +hostname=$HOSTNAME " > /root/spacewalk-answers if [ -n "$SCC_USER" ]; then From 9f1c2832baa1025605861eb890b331fb94a37b7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 28 Feb 2023 10:35:10 +0100 Subject: [PATCH 07/80] Setup cobbler with provided FQDN rather than from system discovery --- spacewalk/setup/bin/spacewalk-setup | 2 +- spacewalk/setup/bin/spacewalk-setup-cobbler | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/spacewalk/setup/bin/spacewalk-setup b/spacewalk/setup/bin/spacewalk-setup index 3be2d0e9d5ae..3bea4401c2dc 100755 --- a/spacewalk/setup/bin/spacewalk-setup +++ b/spacewalk/setup/bin/spacewalk-setup @@ -244,7 +244,7 @@ sub setup_cobbler { my %options = (); Spacewalk::Setup::read_config('/usr/share/rhn/config-defaults/rhn.conf',\%options); - system(COBBLER_COMMAND . " --apache2-config-directory " . $options{'httpd_config_dir'}); + system(COBBLER_COMMAND . " --apache2-config-directory " . $options{'httpd_config_dir'} . " -f " . $answers->{'hostname'}); my $skip_rhnconf = 0; open(FILE, "<" . Spacewalk::Setup::DEFAULT_RHN_CONF_LOCATION); diff --git a/spacewalk/setup/bin/spacewalk-setup-cobbler b/spacewalk/setup/bin/spacewalk-setup-cobbler index bdf99ef1790b..707a5f111170 100755 --- a/spacewalk/setup/bin/spacewalk-setup-cobbler +++ b/spacewalk/setup/bin/spacewalk-setup-cobbler @@ -24,6 +24,7 @@ parser.add_argument('--cobbler-config-directory', '-c', dest='cobbler_config_dir help='The directory where "settings" and "modules.conf" are in.') parser.add_argument('--apache2-config-directory', '-a', dest='httpd_config_directory', default="/etc/apache2/conf.d", help='The directory where the Apache config file "cobbler.conf" is in.') +parser.add_argument('--fqdn', '-f', dest='fqdn', default=None) COBBLER_CONFIG_DIRECTORY = "/etc/cobbler/" COBBLER_CONFIG_FILES = ["modules.conf", "settings.yaml"] @@ -41,20 +42,21 @@ def backup_file(file_path: str): copyfile(file_path, "%s.backup" % file_path) -def manipulate_cobbler_settings(config_dir: str, settings_yaml: str): +def manipulate_cobbler_settings(config_dir: str, settings_yaml: str, fqdn: str): """ Manipulate the main Cobbler configuration file which is in YAML format. This function backs the original configuration up and writes a new one with the required changes to the disk. :param config_dir: The directory of Cobbler where the config files are. :param settings_yaml: The name of the main YAML file of Cobbler. + :param fqdn: The FQDN of the server. If None (default), the FQDN is resolved from the system """ full_path = os.path.join(config_dir, settings_yaml) backup_file(full_path) with open(full_path) as settings_file: filecontent = yaml.safe_load(settings_file.read()) - filecontent["server"] = socket.getfqdn() + filecontent["server"] = fqdn or socket.getfqdn() # In case of failing DNS resolution, we get a OSError (socket.gaierror) try: @@ -72,7 +74,7 @@ def manipulate_cobbler_settings(config_dir: str, settings_yaml: str): exit(1) filecontent["pxe_just_once"] = True - filecontent["redhat_management_server"] = socket.getfqdn() + filecontent["redhat_management_server"] = fqdn or socket.getfqdn() yaml_dump = yaml.safe_dump(filecontent) with open(full_path, "w") as settings_file: settings_file.write(yaml_dump) @@ -128,7 +130,7 @@ def main(): """ args = parser.parse_args() sanitize_args(args) - manipulate_cobbler_settings(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[1]) + manipulate_cobbler_settings(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[1], args.fqdn) manipulate_cobbler_modules(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[0]) remove_virtual_host(HTTPD_CONFIG_DIRECTORY, COBBLER_HTTP_CONFIG) From 4b45a05f7270c26bd6a537f405b5f88df727621c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 7 Jun 2023 14:51:58 +0200 Subject: [PATCH 08/80] Use environment variables in mgr-setup if available When running the setup in a container we want to avoid the setup_env.sh script and read the values from variables. --- susemanager/bin/mgr-setup | 91 +++++++++++++++++++++++++-------------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index c292e828e234..1a9dcc0a3f6d 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -53,6 +53,7 @@ LOGFILE="0" WAIT_BETWEEN_STEPS=0 MANAGER_FORCE_INSTALL=0 PROGRAM="/usr/lib/susemanager/bin/mgr-setup" +NON_INTERACTIVE=0 MIGRATION_ENV="/root/migration_env.sh" SETUP_ENV="/root/setup_env.sh" @@ -100,6 +101,7 @@ helper script to do migration or setup of $PRODUCT_NAME -s fresh setup of the $PRODUCT_NAME installation -w wait between steps (in case you do -r -m) -l LOGFILE write a log to LOGFILE + -n Don't ask anything, use default values -h this help screen @@ -118,6 +120,19 @@ wait_step() { fi; } +ask_input() { + # Ask for input if the variable is not already defined, could be set using an env variable + # Set to an empty string if running in non-interactive mode + VARIABLE=$1 + if [ -z ${!VARIABLE+x} ]; then + if [ $NON_INTERACTIVE -eq 0 ]; then + echo -n "$VARIABLE="; read $VARIABLE + else + declare $VARIABLE= + fi + fi +} + setup_swap() { SWAP=`LANG=C free | grep Swap: | sed -e "s/ \+/\t/g" | cut -f 2` @@ -817,43 +832,43 @@ do_migration() { } do_setup() { - NO_SSL= if [ -f $SETUP_ENV ]; then . $SETUP_ENV else # ask for the needed values if the setup_env file does not exist - echo -n "MANAGER_USER="; read MANAGER_USER - echo -n "MANAGER_PASS="; read MANAGER_PASS - echo -n "MANAGER_ADMIN_EMAIL="; read MANAGER_ADMIN_EMAIL - echo -n "CERT_CNAMES=" ; read CERT_CNAMES - echo -n "CERT_O=" ; read CERT_O - echo -n "CERT_OU=" ; read CERT_OU - echo -n "CERT_CITY=" ; read CERT_CITY - echo -n "CERT_STATE=" ; read CERT_STATE - echo -n "CERT_COUNTRY=" ; read CERT_COUNTRY - echo -n "CERT_EMAIL=" ; read CERT_EMAIL - echo -n "CERT_PASS=" ; read CERT_PASS - echo -n "LOCAL_DB=" ; read LOCAL_DB - echo -n "DB_BACKEND=" ; read DB_BACKEND - echo -n "MANAGER_DB_NAME=" ; read MANAGER_DB_NAME - echo -n "MANAGER_DB_HOST=" ; read MANAGER_DB_HOST - echo -n "MANAGER_DB_PORT=" ; read MANAGER_DB_PORT - echo -n "MANAGER_DB_CA_CERT=" ; read MANAGER_DB_CA_CERT - echo -n "MANAGER_DB_PROTOCOL="; read MANAGER_DB_PROTOCOL - echo -n "MANAGER_ENABLE_TFTP="; read MANAGER_ENABLE_TFTP - echo -n "EXTERNALDB_ADMIN_USER=" ; read EXTERNALDB_ADMIN_USER - echo -n "EXTERNALDB_ADMIN_PASS=" ; read EXTERNALDB_ADMIN_PASS - echo -n "EXTERNALDB_PROVIDER="; read EXTERNALDB_PROVIDER - echo -n "SCC_USER=" ; read SCC_USER - echo -n "SCC_PASS=" ; read SCC_PASS - echo -n "ISS_PARENT=" ; read ISS_PARENT - echo -n "ACTIVATE_SLP=" ; read ACTIVATE_SLP - echo -n "REPORT_DB_NAME=" ; read REPORT_DB_NAME - echo -n "REPORT_DB_HOST=" ; read REPORT_DB_HOST - echo -n "REPORT_DB_PORT=" ; read REPORT_DB_PORT - echo -n "REPORT_DB_USER=" ; read REPORT_DB_USER - echo -n "REPORT_DB_PASS=" ; read REPORT_DB_PASS - echo -n "REPORT_DB_CA_CERT=" ; read REPORT_DB_CA_CERT + ask_input MANAGER_USER + ask_input MANAGER_PASS + ask_input MANAGER_ADMIN_EMAIL + ask_input CERT_CNAMES + ask_input CERT_O + ask_input CERT_OU + ask_input CERT_CITY + ask_input CERT_STATE + ask_input CERT_COUNTRY + ask_input CERT_EMAIL + ask_input CERT_PASS + ask_input LOCAL_DB + ask_input DB_BACKEND + ask_input MANAGER_DB_NAME + ask_input MANAGER_DB_HOST + ask_input MANAGER_DB_PORT + ask_input MANAGER_DB_CA_CERT + ask_input MANAGER_DB_PROTOCOL + ask_input MANAGER_ENABLE_TFTP + ask_input EXTERNALDB_ADMIN_USER + ask_input EXTERNALDB_ADMIN_PASS + ask_input EXTERNALDB_PROVIDER + ask_input SCC_USER + ask_input SCC_PASS + ask_input ISS_PARENT + ask_input ACTIVATE_SLP + ask_input REPORT_DB_NAME + ask_input REPORT_DB_HOST + ask_input REPORT_DB_PORT + ask_input REPORT_DB_USER + ask_input REPORT_DB_PASS + ask_input REPORT_DB_CA_CERT + ask_input UYUNI_FQDN fi; if [ -z "$SYS_DB_PASS" ]; then SYS_DB_PASS=`dd if=/dev/urandom bs=16 count=4 2> /dev/null | md5sum | cut -b 1-8` @@ -887,6 +902,13 @@ do_setup() { EXTERNALDB=1 ;; esac + if [ -z "$NO_SSL" ]; then + NO_SSL= + fi + if [ -n "$UYUNI_FQDN" ]; then + HOSTNAME=$UYUNI_FQDN + fi + check_re_install echo "Do not delete this file unless you know what you are doing!" > $MANAGER_COMPLETE setup_swap @@ -972,6 +994,9 @@ do -w) WAIT_BETWEEN_STEPS=1 ;; + -n) + NON_INTERACTIVE=1 + ;; *) echo echo "Option \"$p\" is not recognized. Type \"$PROGRAM -h\" for help." From b0f35425bfc6d8bf19ecf317f6443f965f14b8d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 31 Mar 2023 17:12:09 +0200 Subject: [PATCH 09/80] mgr-bootstrap read the hostname from rhn.conf if possible Running mgr-bootstrap in a containerized server cannot assume the hostname is the user-facing FQDN. --- spacewalk/certs-tools/rhn_bootstrap.py | 4 +++- spacewalk/certs-tools/spacewalk-certs-tools.changes | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/spacewalk/certs-tools/rhn_bootstrap.py b/spacewalk/certs-tools/rhn_bootstrap.py index 3aaffcc2e19d..b2011e93f7a5 100755 --- a/spacewalk/certs-tools/rhn_bootstrap.py +++ b/spacewalk/certs-tools/rhn_bootstrap.py @@ -66,6 +66,8 @@ initCFG('server') DOC_ROOT = CFG.DOCUMENTROOT +initCFG('java') + DEFAULT_APACHE_PUB_DIRECTORY = DOC_ROOT + '/pub' DEFAULT_OVERRIDES = 'client-config-overrides.txt' DEFAULT_SCRIPT = 'bootstrap.sh' @@ -154,7 +156,7 @@ def getDefaultOptions(): 'activation-keys': '', 'overrides': DEFAULT_OVERRIDES, 'script': DEFAULT_SCRIPT, - 'hostname': socket.getfqdn(), + 'hostname': CFG.HOSTNAME if CFG.has_key('hostname') else socket.getfqdn(), 'ssl-cert': '', # will trigger a search 'gpg-key': "", 'http-proxy': "", diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.changes b/spacewalk/certs-tools/spacewalk-certs-tools.changes index 05b2d2956e89..75043df1d282 100644 --- a/spacewalk/certs-tools/spacewalk-certs-tools.changes +++ b/spacewalk/certs-tools/spacewalk-certs-tools.changes @@ -1,4 +1,5 @@ - Add openssl3 compatibility. +- mgr-bootstrap read the hostname from rhn.conf if possible - Read CA password from a file - Also ship SUSE specific files on Enterprise Linux. - Use the CA cert in the pki config to generate build host rpm From 8d6827d01858987825e2bd529637e2e1828eea34 Mon Sep 17 00:00:00 2001 From: Ondrej Holecek Date: Tue, 18 Apr 2023 16:33:49 +0200 Subject: [PATCH 10/80] Remove server keys to allow reregistering to different master --- spacewalk/certs-tools/rhn_bootstrap_strings.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/spacewalk/certs-tools/rhn_bootstrap_strings.py b/spacewalk/certs-tools/rhn_bootstrap_strings.py index 89dc320575f9..abe2ed4e01af 100644 --- a/spacewalk/certs-tools/rhn_bootstrap_strings.py +++ b/spacewalk/certs-tools/rhn_bootstrap_strings.py @@ -605,7 +605,7 @@ def getRegistrationStackSh(): call_tukit "zypper --non-interactive update {PKG_NAME_VENV_UPDATE} ||:" fi else - if [ -z "$SNAPSHOT_ID"]; then + if [ -z "$SNAPSHOT_ID" ]; then zypper --non-interactive up {PKG_NAME_UPDATE} $RHNLIB_PKG ||: else call_tukit "zypper --non-interactive update {PKG_NAME_UPDATE} $RHNLIB_PKG ||:" @@ -896,12 +896,14 @@ def getRegistrationSaltSh(productName): fi MINION_ID_FILE="${{SNAPSHOT_PREFIX}}/etc/salt/minion_id" +MINION_PKI_CONF="${{SNAPSHOT_PREFIX}}/etc/salt/pki" MINION_CONFIG_DIR="${{SNAPSHOT_PREFIX}}/etc/salt/minion.d" SUSEMANAGER_MASTER_FILE="${{MINION_CONFIG_DIR}}/susemanager.conf" MINION_SERVICE="salt-minion" if [ $VENV_ENABLED -eq 1 ]; then MINION_ID_FILE="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/minion_id" + MINION_PKI_CONF="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/pki" MINION_CONFIG_DIR="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/minion.d" SUSEMANAGER_MASTER_FILE="${{MINION_CONFIG_DIR}}/susemanager.conf" MINION_SERVICE="venv-salt-minion" @@ -951,6 +953,11 @@ def getRegistrationSaltSh(productName): SALT_RUNNING: 1 EOF +# Remove old minion keys so reregistration do different master works +if [ -d "$MINION_PKI_CONF" ]; then + rm -r "$MINION_PKI_CONF" +fi + if [ -n "$SNAPSHOT_ID" ]; then cat <> "${{MINION_CONFIG_DIR}}/transactional_update.conf" # Enable the transactional_update executor From b032822ca4af7faaf723bbaecf2438e2371cb5e8 Mon Sep 17 00:00:00 2001 From: Artem Shiliaev Date: Fri, 27 Jan 2023 15:56:17 +0100 Subject: [PATCH 11/80] initial version uyuni server image --- containers/server-image/.env | 23 ++++++ containers/server-image/Dockerfile | 73 ++++++++++++++++++++ containers/server-image/_constraints | 7 ++ containers/server-image/_service | 4 ++ containers/server-image/add_repos.sh | 20 ++++++ containers/server-image/server-image.changes | 1 + containers/server-image/tito.props | 2 + rel-eng/packages/server-image | 1 + 8 files changed, 131 insertions(+) create mode 100644 containers/server-image/.env create mode 100644 containers/server-image/Dockerfile create mode 100644 containers/server-image/_constraints create mode 100644 containers/server-image/_service create mode 100644 containers/server-image/add_repos.sh create mode 100644 containers/server-image/server-image.changes create mode 100644 containers/server-image/tito.props create mode 100644 rel-eng/packages/server-image diff --git a/containers/server-image/.env b/containers/server-image/.env new file mode 100644 index 000000000000..cce186fe910a --- /dev/null +++ b/containers/server-image/.env @@ -0,0 +1,23 @@ +# MANAGER_USER= +# MANAGER_PASS= +# MANAGER_ADMIN_EMAIL= +# CERT_O= +# CERT_OU= +# CERT_CITY= +# CERT_STATE= +# CERT_COUNTRY= +# CERT_EMAIL= +# CERT_PASS= +# USE_EXISTING_CERTS= +# MANAGER_DB_NAME= +# MANAGER_DB_HOST= +# MANAGER_DB_PORT= +# MANAGER_DB_PROTOCOL= +# MANAGER_ENABLE_TFTP= +# SCC_USER= +# SCC_PASS= +# REPORT_DB_HOST= +# REPORT_DB_PORT= +# REPORT_DB_NAME= +# REPORT_DB_USER= +# REPORT_DB_PASS= diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile new file mode 100644 index 000000000000..60ced2c8de28 --- /dev/null +++ b/containers/server-image/Dockerfile @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest uyuni/server:4.4.0 uyuni/server:4.4.0.%RELEASE% + +ARG INIT_BASE=registry.suse.com/bci/bci-base:15.4 +FROM $INIT_BASE + +ARG PRODUCT_REPO +ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" + +# Add distro and product repos +COPY add_repos.sh /usr/bin +RUN sh add_repos.sh ${PRODUCT_REPO} + +# Main packages +RUN zypper ref && zypper --non-interactive up +RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-licenses --force-resolution \ + ${PRODUCT_PATTERN_PREFIX}_server \ + ${PRODUCT_PATTERN_PREFIX}_retail \ + susemanager-tftpsync \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + prometheus-jmx_exporter-tomcat \ + spacecmd \ + grafana-formula \ + locale-formula \ + prometheus-exporters-formula \ + prometheus-formula \ + registry-formula \ + virtualization-formulas \ + uyuni-config-formula \ + inter-server-sync \ + golang-github-lusitaniae-apache_exporter \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + spacecmd \ + javamail \ + libyui-ncurses-pkg16 \ + virtual-host-gatherer-Kubernetes \ + virtual-host-gatherer-libcloud \ + virtual-host-gatherer-Libvirt \ + virtual-host-gatherer-Nutanix \ + virtual-host-gatherer-VMware \ + vim + + +# FIXME hack to correct the report db script to work on containers +RUN cp /usr/bin/uyuni-setup-reportdb /usr/bin/uyuni-setup-reportdb.original +RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.server +LABEL org.opencontainers.image.title="${PRODUCT} server container" +LABEL org.opencontainers.image.description="All-in-one ${PRODUCT} server image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/lib/systemd/systemd"] +HEALTHCHECK --interval=5s --timeout=5s --retries=5 CMD ["/usr/bin/systemctl", "is-active", "multi-user.target"] diff --git a/containers/server-image/_constraints b/containers/server-image/_constraints new file mode 100644 index 000000000000..e6e2c4c0e019 --- /dev/null +++ b/containers/server-image/_constraints @@ -0,0 +1,7 @@ + + + + 10 + + + diff --git a/containers/server-image/_service b/containers/server-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/server-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/server-image/add_repos.sh b/containers/server-image/add_repos.sh new file mode 100644 index 000000000000..02023fb872d1 --- /dev/null +++ b/containers/server-image/add_repos.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -xe + +if [ -n "$1" ]; then + # update + zypper ar -G http://download.opensuse.org/update/leap/15.4/sle/ sle_update_repo + zypper ar -G http://download.opensuse.org/update/leap/15.4/oss/ os_update_repo + zypper ar -G http://download.opensuse.org/update/leap/15.4/backports/ backports_update_repo + + # distribution + zypper ar -G http://download.opensuse.org/distribution/leap/15.4/repo/oss/ os_pool_repo + + # product + #TODO uncomment when changes are in master branch + #zypper ar -f -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/ server_pool_repo + zypper ar -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Testing-Overlay-POOL-x86_64-Media1/ testing_overlay_devel_repo + + zypper addrepo $1 product +fi diff --git a/containers/server-image/server-image.changes b/containers/server-image/server-image.changes new file mode 100644 index 000000000000..f8f88353773a --- /dev/null +++ b/containers/server-image/server-image.changes @@ -0,0 +1 @@ +- Initialized a server image \ No newline at end of file diff --git a/containers/server-image/tito.props b/containers/server-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/rel-eng/packages/server-image b/rel-eng/packages/server-image new file mode 100644 index 000000000000..fe257db88808 --- /dev/null +++ b/rel-eng/packages/server-image @@ -0,0 +1 @@ +4.4.0 containers/server-image/ From f4b7a2c8b9c4e658cc1e64bc034bf128228981a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 14 Feb 2023 16:26:47 +0100 Subject: [PATCH 12/80] Add ant rules to deploy to a kubernetes pod --- java/manager-build.xml | 140 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 135 insertions(+), 5 deletions(-) diff --git a/java/manager-build.xml b/java/manager-build.xml index c2689a520d62..a7d9122d7419 100644 --- a/java/manager-build.xml +++ b/java/manager-build.xml @@ -21,6 +21,8 @@ + + @@ -38,6 +40,8 @@ + + @@ -226,7 +230,14 @@ yarn is not in the PATH. Please install yarn first. - + + + + + + + + @@ -235,10 +246,6 @@ - - - - @@ -294,6 +301,129 @@ + + + + + + + + kubectl is not in the PATH. Please install kubectl first. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 9cdb01cb04dbe0943298dcf93149b38380221f05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 16 Feb 2023 11:31:33 +0100 Subject: [PATCH 13/80] server: autostart setup with the container --- containers/server-image/Dockerfile | 5 +++++ containers/server-image/uyuni-setup.service | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 containers/server-image/uyuni-setup.service diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 60ced2c8de28..f836a2564cce 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -11,6 +11,9 @@ ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" COPY add_repos.sh /usr/bin RUN sh add_repos.sh ${PRODUCT_REPO} +# Copy Uyuni setup script +COPY uyuni-setup.service /usr/lib/systemd/system/ + # Main packages RUN zypper ref && zypper --non-interactive up RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-licenses --force-resolution \ @@ -51,6 +54,8 @@ RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-li RUN cp /usr/bin/uyuni-setup-reportdb /usr/bin/uyuni-setup-reportdb.original RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb +RUN systemctl enable uyuni-setup + # LABELs ARG PRODUCT=Uyuni ARG VENDOR="Uyuni project" diff --git a/containers/server-image/uyuni-setup.service b/containers/server-image/uyuni-setup.service new file mode 100644 index 000000000000..f47ff30eb362 --- /dev/null +++ b/containers/server-image/uyuni-setup.service @@ -0,0 +1,16 @@ +[Unit] +Description=Uyuni run setup + +[Install] +WantedBy=multi-user.target + +[Service] +PassEnvironment=MANAGER_USER MANAGER_PASS MANAGER_ADMIN_EMAIL +PassEnvironment=CERT_CNAMES CERT_O CERT_OU CERT_CITY CERT_STATE CERT_COUNTRY CERT_EMAIL CERT_PASS +PassEnvironment=LOCAL_DB MANAGER_DB_NAME MANAGER_DB_HOST MANAGER_DB_PORT MANAGER_DB_CA_CERT MANAGER_DB_PROTOCOL +PassEnvironment=MANAGER_ENABLE_TFTP EXTERNALDB_ADMIN_USER EXTERNALDB_ADMIN_PASS EXTERNALDB_PROVIDER +PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN +PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT_USER REPORT_DB_PASS REPORT_DB_CA_CERT +ExecStart=/usr/lib/susemanager/bin/mgr-setup -l /var/log/susemanager_setup.log -s -n +ExecStartPost=systemctl disable --now uyuni-setup.service +Type=oneshot From d381fe1407b867bf2f7703b89a31336e5d98cc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 16 Feb 2023 22:06:33 +0100 Subject: [PATCH 14/80] Add notes on how to use the server image on rke2 --- containers/doc/server-kubernetes/README.md | 190 +++++++ .../server-kubernetes/k3s-ingress-routes.yaml | 98 ++++ .../server-kubernetes/k3s-traefik-config.yaml | 48 ++ .../doc/server-kubernetes/migration-job.yaml | 169 ++++++ .../nginx-uyuni-ingress.yaml | 139 +++++ containers/doc/server-kubernetes/pvcs.yaml | 329 +++++++++++ containers/doc/server-kubernetes/pvs.yaml | 531 ++++++++++++++++++ .../rke2-ingress-nginx-config.yaml | 19 + containers/doc/server-kubernetes/server.yaml | 492 ++++++++++++++++ containers/doc/server-kubernetes/service.yaml | 61 ++ .../doc/server-kubernetes/uyuni-config.yaml | 25 + .../uyuni-ingress-traefik.yaml | 165 ++++++ 12 files changed, 2266 insertions(+) create mode 100644 containers/doc/server-kubernetes/README.md create mode 100644 containers/doc/server-kubernetes/k3s-ingress-routes.yaml create mode 100644 containers/doc/server-kubernetes/k3s-traefik-config.yaml create mode 100644 containers/doc/server-kubernetes/migration-job.yaml create mode 100644 containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml create mode 100644 containers/doc/server-kubernetes/pvcs.yaml create mode 100644 containers/doc/server-kubernetes/pvs.yaml create mode 100644 containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml create mode 100644 containers/doc/server-kubernetes/server.yaml create mode 100644 containers/doc/server-kubernetes/service.yaml create mode 100644 containers/doc/server-kubernetes/uyuni-config.yaml create mode 100644 containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md new file mode 100644 index 000000000000..5f911e979752 --- /dev/null +++ b/containers/doc/server-kubernetes/README.md @@ -0,0 +1,190 @@ +# Running the server-image on kubernetes + +## Prerequisites + +The following assumes you have a single-node rke2 or k3s cluster ready with enough resources for the Uyuni server. +It also assumes that `kubectl` is installed on your machine and configured to connect to the cluster. + +** HACK ** For now I used the SSL certificates and CA generated in one of my installation attempts. +I will assume you already have SSL certificates matching the FQDN of the cluster node. +Instructions or tools on how to generate those will come later. + +## Setting up the resources + +### RKE2 specific setup + +Copy the `rke2-ingress-nginx-config.yaml` file to `/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml` on your rke2 node. +Wait for the ingress controller to restart. +Run this command to watch it restart: + +``` +watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=rke2-ingress-nginx +``` + +Set the shell variable `INGRESS=nginx` to be used in the next steps. + +### K3s specific setup + + +Copy the `k3s-traefik-config.yaml` file to `/var/lib/rancher/k3s/server/manifests/` on your k3s node. +Wait for trafik to restart. +Run this commant to watch it restart: + +``` +watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=traefik +``` + +Set the shell variable `INGRESS=traefik` to be used in the next steps. + +***Offline installation:*** with k3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: + +``` +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +podman save --output server-image.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +or + +``` +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server-image.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +Transfer the resulting `server-image.tar` to the k3s node and load it using the following command: + +``` +k3s ctr images import server-image.tar +``` + +In order to tell k3s to not pull the image, add `imagePullPolicy: Never` to all `initContainer`s and `container` in the `server.yaml` file: + +``` +sed 's/^\( \+\)image:\(.*\)$/\1image: \2\n\1imagePullPolicy: Never/' -i server.yaml +``` + +### Migrating from a regular server + +Stop the source services: + +``` +spacewalk-service stop +systemctl stop postgresql +``` + +Create a password-less SSH key and create a kubernetes secret with it: + +``` +ssh-keygen +kubectl create secret generic migration-ssh-key --from-file=id_rsa=$HOME/.ssh/id_rsa --from-file=id_rsa.pub=$HOME/.ssh/id_rsa.pub +``` +Add the generated public key to the server to migrate authorized keys. + +Run the migration job: + +``` +kubectl apply -f migration-job.yaml +``` + +To follow the progression of the process, check the generated container log: + +``` +kubectl logs (kubectl get pod -ljob-name=uyuni-migration -o custom-columns=NAME:.metadata.name --no-hea +ders) +``` + +Once done, both the job and its pod will remain until the user deletes them to allow checking logs. + +Proceed with the next steps. + +***Hostname***: this procedure doesn't handle any hostname change. +Certificates migration also needs to be documented, but that can be guessed for now with the instructions to setup a server from scratch. + +### Deploy the pod and its resources + +Create the TLS secret holding the server SSL certificates: + +``` +kubectl create secret tls uyuni-cert --key /server.key --cert /server.crt +``` + +Create a `ConfigMap` with the CA certificate: + +``` +kubectl create configmap uyuni-ca --from-file=ca.crt=/RHN-ORG-TRUSTED-SSL-CERT +``` + +Define the persistent volumes by running `kubectl apply -f pvs.yaml`. +The volumes are folders on the cluster node and need to be manually created: + +``` +mkdir -p `kubectl get pv -o jsonpath='{.items[*].spec.local.path}'` +``` + +In my setup, the cluster node is named `uyuni-dev` and its FQDN is `uyuni-dev.world-co.com`. +You will need to replace those values in the yaml files. + +Once done, run the following commands: + +``` +for YAML in pvcs service uyuni-config server $INGRESS-uyuni-ingress; do + kubectl apply -f $YAML.yaml +done +``` +The pod takes a while to start as it needs to initialize the mounts and run the setup. +Run `kubectl get pod uyuni` and wait for it to be in `RUNNING` state. +Even after this, give it time to complete the setup during first boot. + +You can monitor the progress of the setup with `kubectl exec uyuni -- tail -f /var/log/susemanager_setup.log` + +## Using the pod + +To getting a shell in the pod run `kubectl exec -ti uyuni -- sh`. +Note that the part after the `--` can be any command to run inside the server. + +To copy files to the server, use the `kubectl cp uyuni:` command. +Run `kubectl cp --help` for more details on how to use it. + +## Developping with the pod + +### Deploying code + +To deploy java code on the pod change to the `java` directory and run: + +``` +ant -f manager-build.xml refresh-branding-jar deploy-restart-kube +``` + +In case you changed the pod name and namespace while deploying it, pass the corresponding `-Ddeploy.namespace=` and `-Ddeploy.pod=` parameters. + +**Note** To deploy TSX or Salt code, use the `deploy-static-resources-kube` and `deploy-salt-files-kube` tasks of the ant file. + +### Attaching a java debugger + +First enable the JDWP options in both tomcat and taskomatic using the following command: + +``` +ant -f manager-build.xml enable-java-debug-kube +``` + +Then restart tomcat and taskomatic using ant too: + +``` +ant -f manager-build.xml restart-tomcat-kube restart-taskomatic-kube +``` + +The debugger can now be attached to the usual ports (8000 for tomcat and 8001 for taskomatic) on the host FQDN. + +## Throwing everything away + +If you want to create from a fresh pod, run `kubectl delete pod uyuni`. + +Then run this command on the cluster node to cleanup the volumes: + +``` +for v in `ls /var/uyuni/`; do + rm -r /var/uyuni/$v; mkdir /var/uyuni/$v +done +``` + +To create the pod again, just run `kubectl apply -f server.yaml` and wait. diff --git a/containers/doc/server-kubernetes/k3s-ingress-routes.yaml b/containers/doc/server-kubernetes/k3s-ingress-routes.yaml new file mode 100644 index 000000000000..1c9fa2a693ea --- /dev/null +++ b/containers/doc/server-kubernetes/k3s-ingress-routes.yaml @@ -0,0 +1,98 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: uyuni-https-redirect +spec: + redirectScheme: + scheme: https + permanent: true +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgresql-router +spec: + entryPoints: + - postgres + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 5432 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-publish-router +spec: + entryPoints: + - salt-publish + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4505 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-request-router +spec: + entryPoints: + - salt-request + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4506 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: cobbler-router +spec: + entryPoints: + - cobbler + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 25151 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tomcat-debug-router +spec: + entryPoints: + - tomcat-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8000 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tasko-debug-router +spec: + entryPoints: + - tasko-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8001 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteUDP +metadata: + name: tftp-router +spec: + entryPoints: + - tftp + routes: + - services: + - name: uyuni-udp + port: 69 diff --git a/containers/doc/server-kubernetes/k3s-traefik-config.yaml b/containers/doc/server-kubernetes/k3s-traefik-config.yaml new file mode 100644 index 000000000000..55ae3802a3b2 --- /dev/null +++ b/containers/doc/server-kubernetes/k3s-traefik-config.yaml @@ -0,0 +1,48 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: traefik + namespace: kube-system +spec: + valuesContent: |- + logs: + general: + level: DEBUG + access: + enabled: true + ports: + postgres: + port: 5432 + expose: true + exposedPort: 5432 + protocol: TCP + salt-publish: + port: 4505 + expose: true + exposedPort: 4505 + protocol: TCP + salt-request: + port: 4506 + expose: true + exposedPort: 4506 + protocol: TCP + cobbler: + port: 25151 + expose: true + exposedPort: 25151 + protocol: TCP + tomcat-debug: + port: 8080 + expose: true + exposedPort: 8000 + protocol: TCP + tasko-debug: + port: 8081 + expose: true + exposedPort: 8001 + protocol: TCP + tftp: + port: 69 + expose: true + exposedPort: 69 + protocol: UDP diff --git a/containers/doc/server-kubernetes/migration-job.yaml b/containers/doc/server-kubernetes/migration-job.yaml new file mode 100644 index 000000000000..f695ef8079cc --- /dev/null +++ b/containers/doc/server-kubernetes/migration-job.yaml @@ -0,0 +1,169 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: uyuni-migration +spec: + backoffLimit: 1 + template: + spec: + restartPolicy: Never + containers: + - name: rsync-var-pgsql + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + mkdir /root/.ssh; + ssh-keyscan -t rsa uyuni.world-co.com >>~/.ssh/known_hosts; + ln -s /root/keys/id_rsa /root/.ssh/id_rsa; + ln -s /root/keys/id_rsa.pub /root/.ssh/id_rsa.pub; + for folder in /var/lib/pgsql \ + /var/cache \ + /var/spacewalk \ + /var/log \ + /srv/salt \ + /srv/www/htdocs/pub \ + /srv/www/cobbler \ + /srv/www/os-images \ + /srv/tftpboot \ + /srv/formula_metadata \ + /srv/pillar \ + /srv/susemanager \ + /srv/spacewalk \ + /root \ + /etc/apache2 \ + /etc/rhn \ + /etc/systemd/system/multi-user.target.wants \ + /etc/salt \ + /etc/tomcat \ + /etc/cobbler \ + /etc/sysconfig; + do + rsync -avz uyuni.world-co.com:$folder/ $folder; + done; + rm -f /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT; + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT; + echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; + sed 's/address=[^:]*:/address=uyuni:/' -i /etc/rhn/taskomatic.conf; + sed 's/address=[^:]*:/address=uyuni:/' -i /etc/sysconfig/tomcat; + volumeMounts: + - mountPath: /var/lib/pgsql + name: var-pgsql + - mountPath: /var/cache + name: var-cache + - mountPath: /var/spacewalk + name: var-spacewalk + - mountPath: /var/log + name: var-log + - mountPath: /srv/salt + name: srv-salt + - mountPath: /srv/www/htdocs/pub + name: srv-www-pub + - mountPath: /srv/www/cobbler + name: srv-www-cobbler + - mountPath: /srv/www/os-images + name: srv-www-osimages + - mountPath: /srv/tftpboot + name: srv-tftpboot + - mountPath: /srv/formula_metadata + name: srv-formulametadata + - mountPath: /srv/pillar + name: srv-pillar + - mountPath: /srv/susemanager + name: srv-susemanager + - mountPath: /srv/spacewalk + name: srv-spacewalk + - mountPath: /root + name: root + - mountPath: /etc/apache2 + name: etc-apache2 + - mountPath: /etc/rhn + name: etc-rhn + - mountPath: /etc/systemd/system/multi-user.target.wants + name: etc-systemd + - mountPath: /etc/salt + name: etc-salt + - mountPath: /etc/tomcat + name: etc-tomcat + - mountPath: /etc/cobbler + name: etc-cobbler + - mountPath: /etc/sysconfig + name: etc-sysconfig + - mountPath: /root/keys + name: ssh-key + volumes: + - name: var-pgsql + persistentVolumeClaim: + claimName: var-pgsql + - name: var-cache + persistentVolumeClaim: + claimName: var-cache + - name: var-spacewalk + persistentVolumeClaim: + claimName: var-spacewalk + - name: var-log + persistentVolumeClaim: + claimName: var-log + - name: srv-salt + persistentVolumeClaim: + claimName: srv-salt + - name: srv-www-pub + persistentVolumeClaim: + claimName: srv-www-pub + - name: srv-www-cobbler + persistentVolumeClaim: + claimName: srv-www-cobbler + - name: srv-www-osimages + persistentVolumeClaim: + claimName: srv-www-osimages + - name: srv-tftpboot + persistentVolumeClaim: + claimName: srv-tftpboot + - name: srv-formulametadata + persistentVolumeClaim: + claimName: srv-formulametadata + - name: srv-pillar + persistentVolumeClaim: + claimName: srv-pillar + - name: srv-susemanager + persistentVolumeClaim: + claimName: srv-susemanager + - name: srv-spacewalk + persistentVolumeClaim: + claimName: srv-spacewalk + - name: root + persistentVolumeClaim: + claimName: root + - name: etc-apache2 + persistentVolumeClaim: + claimName: etc-apache2 + - name: etc-rhn + persistentVolumeClaim: + claimName: etc-rhn + - name: etc-systemd + persistentVolumeClaim: + claimName: etc-systemd + - name: etc-salt + persistentVolumeClaim: + claimName: etc-salt + - name: etc-tomcat + persistentVolumeClaim: + claimName: etc-tomcat + - name: etc-cobbler + persistentVolumeClaim: + claimName: etc-cobbler + - name: etc-sysconfig + persistentVolumeClaim: + claimName: etc-sysconfig + - name: ssh-key + secret: + secretName: migration-ssh-key + items: + - key: id_rsa + mode: 0600 + path: id_rsa + - key: id_rsa.pub + mode: 0644 + path: id_rsa.pub diff --git a/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml b/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml new file mode 100644 index 000000000000..b17a50a7bc14 --- /dev/null +++ b/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml @@ -0,0 +1,139 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl +spec: + tls: + - hosts: + - uyuni-dev.world-co.com + secretName: uyuni-cert + rules: + - host: uyuni-dev.world-co.com + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-nossl + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" +spec: + rules: + - host: uyuni-dev.world-co.com + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /pub + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/([^/])+/DownloadFile + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /(rhn/)?rpc/api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/errors + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/ty/TinyUrl + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/websocket + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/metrics + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler_api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cblr + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /httpboot + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /os-images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /tftp + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /docs + pathType: Prefix diff --git a/containers/doc/server-kubernetes/pvcs.yaml b/containers/doc/server-kubernetes/pvcs.yaml new file mode 100644 index 000000000000..62b89e050f9a --- /dev/null +++ b/containers/doc/server-kubernetes/pvcs.yaml @@ -0,0 +1,329 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-pgsql +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Gi + selector: + matchLabels: + data: var-pgsql +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cache +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Gi + selector: + matchLabels: + data: var-cache +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-spacewalk +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Gi + selector: + matchLabels: + data: var-spacewalk +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-log +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 2Gi + selector: + matchLabels: + data: var-log +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-salt +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-salt +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-pub +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-www-pub +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-cobbler +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-www-cobbler +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-osimages +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-www-osimages +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-tftpboot +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-tftpboot +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-formulametadata +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-formulametadata +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-pillar +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-pillar +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-susemanager +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-susemanager +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-spacewalk +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 100Mi + selector: + matchLabels: + data: srv-spacewalk +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: root +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: root +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-apache2 +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: etc-apache2 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-rhn +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: etc-rhn +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-systemd +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: etc-systemd +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-salt +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: etc-salt +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tomcat +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 10Mi + selector: + matchLabels: + data: etc-tomcat +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-cobbler +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 1Mi + selector: + matchLabels: + data: etc-cobbler +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-sysconfig +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 1Mi + selector: + matchLabels: + data: etc-sysconfig +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tls +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-storage + resources: + requests: + storage: 1Mi + selector: + matchLabels: + data: etc-tls diff --git a/containers/doc/server-kubernetes/pvs.yaml b/containers/doc/server-kubernetes/pvs.yaml new file mode 100644 index 000000000000..e75a64c99c42 --- /dev/null +++ b/containers/doc/server-kubernetes/pvs.yaml @@ -0,0 +1,531 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: Immediate +reclaimPolicy: Delete +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: var-pgsql + labels: + data: var-pgsql +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteOnce + storageClassName: local-storage + local: + path: /var/uyuni/var-pgsql + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: var-cache + labels: + data: var-cache +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/var-cache + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: var-spacewalk + labels: + data: var-spacewalk +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/var-spacewalk + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: var-log + labels: + data: var-log +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/var-log + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-salt + labels: + data: srv-salt +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-salt + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-www-pub + labels: + data: srv-www-pub +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-www-pub + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-www-cobbler + labels: + data: srv-www-cobbler +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-www-cobbler + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-www-osimages + labels: + data: srv-www-osimages +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-www-osimages + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-tftpboot + labels: + data: srv-tftpboot +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-tftpboot + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-formulametadata + labels: + data: srv-formulametadata +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-formulametadata + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-pillar + labels: + data: srv-pillar +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-pillar + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-susemanager + labels: + data: srv-susemanager +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-susemanager + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: srv-spacewalk + labels: + data: srv-spacewalk +spec: + capacity: + storage: 100Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/srv-spacewalk + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: root + labels: + data: root +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/root + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-apache2 + labels: + data: etc-apache2 +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-apache2 + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-rhn + labels: + data: etc-rhn +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-rhn + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-systemd + labels: + data: etc-systemd +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-systemd + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-salt + labels: + data: etc-salt +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-salt + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-tomcat + labels: + data: etc-tomcat +spec: + capacity: + storage: 10Mi + accessModes: + - ReadWriteOnce + storageClassName: local-storage + local: + path: /var/uyuni/etc-tomcat + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-cobbler + labels: + data: etc-cobbler +spec: + capacity: + storage: 1Mi + accessModes: + - ReadWriteOnce + storageClassName: local-storage + local: + path: /var/uyuni/etc-cobbler + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-sysconfig + labels: + data: etc-sysconfig +spec: + capacity: + storage: 1Mi + accessModes: + - ReadWriteOnce + storageClassName: local-storage + local: + path: /var/uyuni/etc-sysconfig + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-tls + labels: + data: etc-tls +spec: + capacity: + storage: 1Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-tls + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev diff --git a/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml b/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml new file mode 100644 index 000000000000..2180adcf6c87 --- /dev/null +++ b/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml @@ -0,0 +1,19 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-ingress-nginx + namespace: kube-system +spec: + valuesContent: |- + controller: + config: + hsts: "false" + tcp: + 4505: "default/uyuni-tcp:4505" + 4506: "default/uyuni-tcp:4506" + 5432: "default/uyuni-tcp:5432" + 8000: "default/uyuni-tcp:8000" + 8001: "default/uyuni-tcp:8001" + 25151: "default/uyuni-tcp:25151" + udp: + 69: "default/uyuni-udp:69" diff --git a/containers/doc/server-kubernetes/server.yaml b/containers/doc/server-kubernetes/server.yaml new file mode 100644 index 000000000000..28a1aeca1f88 --- /dev/null +++ b/containers/doc/server-kubernetes/server.yaml @@ -0,0 +1,492 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + app: uyuni + name: uyuni +spec: + initContainers: + - name: init-etc-tls + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/pki/tls /mnt; + chmod --reference=/etc/pki/tls /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/pki/tls/. /mnt; + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/pg-spacewalk.key; + chown postgres:postgres /mnt/private/pg-spacewalk.key; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tls + - name: tls-key + mountPath: /etc/pki/spacewalk-tls + - name: init-var-pgsql + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/pgsql /mnt; + chmod --reference=/var/lib/pgsql /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/pgsql/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-pgsql + - name: init-var-cache + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/var/cache /mnt; + chmod --reference=/var/cache /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/cache/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cache + - name: init-var-log + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/var/log /mnt; + chmod --reference=/var/log /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/log/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-log + - name: init-srv-salt + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/salt /mnt; + chmod --reference=/srv/salt /mnt + volumeMounts: + - mountPath: /mnt + name: srv-salt + - name: init-srv-www-pub + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/htdocs/pub /mnt; + chmod --reference=/srv/www/htdocs/pub /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/htdocs/pub/. /mnt; + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt/RHN-ORG-TRUSTED-SSL-CERT; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-pub + - name: init-srv-www-cobbler + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/cobbler /mnt; + chmod --reference=/srv/www/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-cobbler + - name: init-srv-www-osimages + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/os-images /mnt; + chmod --reference=/srv/www/os-images /mnt + volumeMounts: + - mountPath: /mnt + name: srv-www-osimages + - name: init-srv-tftpboot + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/tftpboot /mnt; + chmod --reference=/srv/tftpboot /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/tftpboot/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-tftpboot + - name: init-srv-formulametadata + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/formula_metadata /mnt; + chmod --reference=/srv/formula_metadata /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/formula_metadata/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-formulametadata + - name: init-srv-pillar + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/pillar /mnt; + chmod --reference=/srv/pillar /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/pillar/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-pillar + - name: init-srv-susemanager + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/susemanager /mnt; + chmod --reference=/srv/susemanager /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/susemanager/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-susemanager + - name: init-srv-spacewalk + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/srv/spacewalk /mnt; + chmod --reference=/srv/spacewalk /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/spacewalk/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-spacewalk + - name: init-root + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/root /mnt; + chmod --reference=/root /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /root/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: root + - name: init-etc-apache2 + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/apache2 /mnt; + chmod --reference=/etc/apache2 /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/apache2/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-apache2 + - name: init-etc-rhn + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/rhn /mnt; + chmod --reference=/etc/rhn /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/rhn/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-rhn + - name: init-etc-systemd + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/systemd/system/multi-user.target.wants /mnt; + chmod --reference=/etc/systemd/system/multi-user.target.wants /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/systemd/system/multi-user.target.wants/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-systemd + - name: init-etc-salt + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/salt /mnt; + chmod --reference=/etc/salt /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/salt/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-salt + - name: init-etc-tomcat + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/tomcat /mnt; + chmod --reference=/etc/tomcat /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/tomcat/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tomcat + - name: init-etc-cobbler + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/cobbler /mnt; + chmod --reference=/etc/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-cobbler + - name: init-etc-sysconfig + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + command: + - sh + - -x + - -c + - > + chown --reference=/etc/sysconfig /mnt; + chmod --reference=/etc/sysconfig /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/sysconfig/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-sysconfig + containers: + - name: uyuni + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + securityContext: + capabilities: + add: + - SYS_ADMIN + ports: + - containerPort: 443 + - containerPort: 80 + - containerPort: 4505 + - containerPort: 4506 + - containerPort: 69 + protocol: UDP + - containerPort: 25151 + - containerPort: 5432 + - containerPort: 8000 + - containerPort: 8001 + envFrom: + - configMapRef: + name: uyuni-config + - secretRef: + name: uyuni-secret + volumeMounts: + - mountPath: /run + name: tmp + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /var/lib/pgsql + name: var-pgsql + - mountPath: /var/cache + name: var-cache + - mountPath: /var/spacewalk + name: var-spacewalk + - mountPath: /var/log + name: var-log + - mountPath: /srv/salt + name: srv-salt + - mountPath: /srv/www/htdocs/pub + name: srv-www-pub + - mountPath: /srv/www/cobbler + name: srv-www-cobbler + - mountPath: /srv/www/os-images + name: srv-www-osimages + - mountPath: /srv/tftpboot + name: srv-tftpboot + - mountPath: /srv/formula_metadata + name: srv-formulametadata + - mountPath: /srv/pillar + name: srv-pillar + - mountPath: /srv/susemanager + name: srv-susemanager + - mountPath: /srv/spacewalk + name: srv-spacewalk + - mountPath: /root + name: root + - mountPath: /etc/apache2 + name: etc-apache2 + - mountPath: /etc/rhn + name: etc-rhn + - mountPath: /etc/systemd/system/multi-user.target.wants + name: etc-systemd + - mountPath: /etc/salt + name: etc-salt + - mountPath: /etc/tomcat + name: etc-tomcat + - mountPath: /etc/cobbler + name: etc-cobbler + - mountPath: /etc/sysconfig + name: etc-sysconfig + - mountPath: /etc/pki/tls + name: etc-tls + - name: ca-cert + mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + readOnly: true + subPath: ca.crt + - name: tls-key + mountPath: /etc/pki/spacewalk-tls + volumes: + - name: tmp + emptyDir: + medium: Memory + sizeLimit: 256Mi + - name: cgroup + hostPath: + path: /sys/fs/cgroup + type: Directory + - name: var-pgsql + persistentVolumeClaim: + claimName: var-pgsql + - name: var-cache + persistentVolumeClaim: + claimName: var-cache + - name: var-spacewalk + persistentVolumeClaim: + claimName: var-spacewalk + - name: var-log + persistentVolumeClaim: + claimName: var-log + - name: srv-salt + persistentVolumeClaim: + claimName: srv-salt + - name: srv-www-pub + persistentVolumeClaim: + claimName: srv-www-pub + - name: srv-www-cobbler + persistentVolumeClaim: + claimName: srv-www-cobbler + - name: srv-www-osimages + persistentVolumeClaim: + claimName: srv-www-osimages + - name: srv-tftpboot + persistentVolumeClaim: + claimName: srv-tftpboot + - name: srv-formulametadata + persistentVolumeClaim: + claimName: srv-formulametadata + - name: srv-pillar + persistentVolumeClaim: + claimName: srv-pillar + - name: srv-susemanager + persistentVolumeClaim: + claimName: srv-susemanager + - name: srv-spacewalk + persistentVolumeClaim: + claimName: srv-spacewalk + - name: root + persistentVolumeClaim: + claimName: root + - name: etc-apache2 + persistentVolumeClaim: + claimName: etc-apache2 + - name: etc-rhn + persistentVolumeClaim: + claimName: etc-rhn + - name: etc-systemd + persistentVolumeClaim: + claimName: etc-systemd + - name: etc-salt + persistentVolumeClaim: + claimName: etc-salt + - name: etc-tomcat + persistentVolumeClaim: + claimName: etc-tomcat + - name: etc-cobbler + persistentVolumeClaim: + claimName: etc-cobbler + - name: etc-sysconfig + persistentVolumeClaim: + claimName: etc-sysconfig + - name: ca-cert + configMap: + name: uyuni-ca + - name: etc-tls + persistentVolumeClaim: + claimName: etc-tls + - name: tls-key + secret: + secretName: uyuni-cert + items: + - key: tls.crt + path: spacewalk.crt + - key: tls.key + path: spacewalk.key + mode: 0600 + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/containers/doc/server-kubernetes/service.yaml b/containers/doc/server-kubernetes/service.yaml new file mode 100644 index 000000000000..f17adc1610c4 --- /dev/null +++ b/containers/doc/server-kubernetes/service.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app: uyuni + name: uyuni-tcp +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: salt-publish + port: 4505 + protocol: TCP + targetPort: 4505 + - name: salt-request + port: 4506 + protocol: TCP + targetPort: 4506 + - name: cobbler + port: 25151 + protocol: TCP + targetPort: 25151 + - name: postgresql + port: 5432 + protocol: TCP + targetPort: 5432 + - name: tomcat-debug + port: 8000 + protocol: TCP + targetPort: 8000 + - name: tasko-debug + port: 8001 + protocol: TCP + targetPort: 8001 + selector: + app: uyuni + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app: uyuni + name: uyuni-udp +spec: + ports: + - name: tftp + port: 69 + protocol: UDP + targetPort: 69 + selector: + app: uyuni + type: ClusterIP +status: + loadBalancer: {} diff --git a/containers/doc/server-kubernetes/uyuni-config.yaml b/containers/doc/server-kubernetes/uyuni-config.yaml new file mode 100644 index 000000000000..e6bc94b489a8 --- /dev/null +++ b/containers/doc/server-kubernetes/uyuni-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +data: + MANAGER_USER: "spacewalk" + MANAGER_ADMIN_EMAIL: "galaxy-noise@suse.de" + MANAGER_DB_NAME: susemanager + MANAGER_DB_HOST: localhost + MANAGER_DB_PORT: "5432" + MANAGER_DB_PROTOCOL: "TCP" + MANAGER_ENABLE_TFTP: "Y" + REPORT_DB_HOST: "uyuni-dev.world-co.com" + NO_SSL: "Y" + MANAGER_MAIL_FROM: "notifications@uyuni-dev.world-co.com" + UYUNI_CONFIG: "uyuni-dev.world-co.com" +kind: ConfigMap +metadata: + name: uyuni-config +--- +apiVersion: v1 +data: + MANAGER_PASS: c3BhY2V3YWxr + SCC_USER: "" + SCC_PASS: "" +kind: Secret +metadata: + name: uyuni-secret diff --git a/containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml b/containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml new file mode 100644 index 000000000000..a47f7b7e89a2 --- /dev/null +++ b/containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml @@ -0,0 +1,165 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl + annotations: + traefik.ingress.kubernetes.io/router.tls: "true" + traefik.ingress.kubernetes.io/router.tls.domains.n.main: "uyuni-dev.world-co.com" + traefik.ingress.kubernetes.io/router.entrypoints: "websecure,web" +spec: + tls: + - hosts: + - uyuni-dev.world-co.com + secretName: uyuni-cert + rules: + - host: uyuni-dev.world-co.com + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl-redirect + annotations: + traefik.ingress.kubernetes.io/router.middlewares: "default-uyuni-https-redirect@kubernetescrd" + traefik.ingress.kubernetes.io/router.entrypoints: "web" +spec: + rules: + - host: uyuni-dev.world-co.com + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-nossl + annotations: + traefik.ingress.kubernetes.io/router.tls: "false" + traefik.ingress.kubernetes.io/router.entrypoints: "web" +spec: + rules: + - host: uyuni-dev.world-co.com + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /pub + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/([^/])+/DownloadFile + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /(rhn/)?rpc/api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/errors + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/ty/TinyUrl + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/websocket + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/metrics + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler_api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cblr + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /httpboot + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /os-images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /tftp + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /docs + pathType: Prefix From 60040bacd6e7854eca276a2ac3e40cc7c0a94dcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Sun, 12 Feb 2023 14:11:21 +0100 Subject: [PATCH 15/80] create first draft of server-image and uyuni-server-systemd-services --- containers/server-image/Dockerfile | 10 +- containers/server-image/add_repos.sh | 6 +- containers/server-image/remove_unused.sh | 7 + containers/server-image/server-image.changes | 2 +- containers/server-systemd-services/README.md | 15 ++ .../uyuni-server-services.config | 38 ++++ .../uyuni-server-systemd-services.changes | 1 + .../uyuni-server-systemd-services.spec | 110 +++++++++++ .../uyuni-server.service | 80 ++++++++ .../server-systemd-services/uyuni-server.sh | 175 ++++++++++++++++++ .../packages/uyuni-server-systemd-services | 1 + 11 files changed, 435 insertions(+), 10 deletions(-) create mode 100755 containers/server-image/remove_unused.sh create mode 100644 containers/server-systemd-services/README.md create mode 100644 containers/server-systemd-services/uyuni-server-services.config create mode 100644 containers/server-systemd-services/uyuni-server-systemd-services.changes create mode 100644 containers/server-systemd-services/uyuni-server-systemd-services.spec create mode 100644 containers/server-systemd-services/uyuni-server.service create mode 100644 containers/server-systemd-services/uyuni-server.sh create mode 100644 rel-eng/packages/uyuni-server-systemd-services diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index f836a2564cce..14bef32943c6 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: MIT -#!BuildTag: uyuni/server:latest uyuni/server:4.4.0 uyuni/server:4.4.0.%RELEASE% +#!BuildTag: uyuni/server:latest uyuni/server:%PKG_VERSION% uyuni/server:%PKG_VERSION%.%RELEASE% ARG INIT_BASE=registry.suse.com/bci/bci-base:15.4 FROM $INIT_BASE @@ -9,11 +9,14 @@ ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" # Add distro and product repos COPY add_repos.sh /usr/bin -RUN sh add_repos.sh ${PRODUCT_REPO} +RUN sh add_repos.sh # Copy Uyuni setup script COPY uyuni-setup.service /usr/lib/systemd/system/ +COPY remove_unused.sh . +RUN echo "rpm.install.excludedocs = yes" >>/etc/zypp/zypp.conf + # Main packages RUN zypper ref && zypper --non-interactive up RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-licenses --force-resolution \ @@ -49,9 +52,6 @@ RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-li virtual-host-gatherer-VMware \ vim - -# FIXME hack to correct the report db script to work on containers -RUN cp /usr/bin/uyuni-setup-reportdb /usr/bin/uyuni-setup-reportdb.original RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb RUN systemctl enable uyuni-setup diff --git a/containers/server-image/add_repos.sh b/containers/server-image/add_repos.sh index 02023fb872d1..a77e8b755b18 100644 --- a/containers/server-image/add_repos.sh +++ b/containers/server-image/add_repos.sh @@ -1,19 +1,17 @@ #!/bin/bash -set -xe - if [ -n "$1" ]; then # update zypper ar -G http://download.opensuse.org/update/leap/15.4/sle/ sle_update_repo zypper ar -G http://download.opensuse.org/update/leap/15.4/oss/ os_update_repo zypper ar -G http://download.opensuse.org/update/leap/15.4/backports/ backports_update_repo - + # distribution zypper ar -G http://download.opensuse.org/distribution/leap/15.4/repo/oss/ os_pool_repo # product #TODO uncomment when changes are in master branch - #zypper ar -f -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/ server_pool_repo + #zypper ar -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/ server_pool_repo zypper ar -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Testing-Overlay-POOL-x86_64-Media1/ testing_overlay_devel_repo zypper addrepo $1 product diff --git a/containers/server-image/remove_unused.sh b/containers/server-image/remove_unused.sh new file mode 100755 index 000000000000..ac95e70ecf0f --- /dev/null +++ b/containers/server-image/remove_unused.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Removes any unnecessary files and packages before moving to the next build stage + +set -xe + +zypper clean --all +rpm -e zypper diff --git a/containers/server-image/server-image.changes b/containers/server-image/server-image.changes index f8f88353773a..3d281727b618 100644 --- a/containers/server-image/server-image.changes +++ b/containers/server-image/server-image.changes @@ -1 +1 @@ -- Initialized a server image \ No newline at end of file +- Initialized a server image diff --git a/containers/server-systemd-services/README.md b/containers/server-systemd-services/README.md new file mode 100644 index 000000000000..c1ae371556cd --- /dev/null +++ b/containers/server-systemd-services/README.md @@ -0,0 +1,15 @@ +# General usage + +Start the services by running `systemctl start uyuni-server.service`. + +Edit the `/etc/sysconfig/uyuni-server-systemd-services` file if you need to add more options to the `podman` pod running command. + +# Advanced options + +In order to change the default images registry, namespace and tag, edit the `NAMESPACE` and `TAG` variables in `/etc/sysconfig/uyuni-server-systemd-services` file. +Restart the `uyuni-server` service is required to apply the change. + +# Getting logs + +You can get logs from the `journalctl -xeu uyuni-server.service` services using `journalctl`. +You can also use `podman logs` using the same names. diff --git a/containers/server-systemd-services/uyuni-server-services.config b/containers/server-systemd-services/uyuni-server-services.config new file mode 100644 index 000000000000..f8fd4f2904ce --- /dev/null +++ b/containers/server-systemd-services/uyuni-server-services.config @@ -0,0 +1,38 @@ +# This file is expected to be found in `/etc/sysconfig/container-server-services.config`, +# the EnvironmentFile services property is pointing there + +# Where to get the images from if not defined otherwise in a service-specific configuration +# It should contain the registry FQDN and path to the server-* images without trailing slash +NAMESPACE=registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni + +# Tag of the images to pull if not defined otherwise in a service-specific configuration +TAG=latest + +# Pass additional parameters to the pod start command. +# +# Example: +# EXTRA_POD_ARGS='--add-host=server.tf.local:192.168.122.254 --add-host=client.tf.local:192.168.122.89' +# Add -p 8000:8000 -p 8001:8001 to enable java remote debugging +EXTRA_POD_ARGS='' + +# Initial setup configuration options +MANAGER_USER="spacewalk" +MANAGER_PASS="spacewalk" +MANAGER_ADMIN_EMAIL="galaxy-noise@suse.de" +CERT_O="SUSE" +CERT_OU="SUSE" +CERT_CITY="Nuernberg" +CERT_STATE="Bayern" +CERT_COUNTRY="DE" +CERT_EMAIL="galaxy-noise@suse.de" +CERT_PASS="spacewalk" +USE_EXISTING_CERTS="N" +MANAGER_DB_NAME="susemanager" +MANAGER_DB_HOST="localhost" +MANAGER_DB_PORT="5432" +MANAGER_DB_PROTOCOL="TCP" +MANAGER_ENABLE_TFTP="Y" +SCC_USER="" +SCC_PASS="" +REPORT_DB_HOST="uyuni-server" +UYUNI_FQDN="uyuni-server" diff --git a/containers/server-systemd-services/uyuni-server-systemd-services.changes b/containers/server-systemd-services/uyuni-server-systemd-services.changes new file mode 100644 index 000000000000..b89bb5cefb2b --- /dev/null +++ b/containers/server-systemd-services/uyuni-server-systemd-services.changes @@ -0,0 +1 @@ +- create first draft of uyuni-server-systemd-services diff --git a/containers/server-systemd-services/uyuni-server-systemd-services.spec b/containers/server-systemd-services/uyuni-server-systemd-services.spec new file mode 100644 index 000000000000..70c2800b2990 --- /dev/null +++ b/containers/server-systemd-services/uyuni-server-systemd-services.spec @@ -0,0 +1,110 @@ +# +# spec file for package uyuni-server-systemd-services +# +# Copyright (c) 2022 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + +Name: uyuni-server-systemd-services +Summary: Uyuni Server systemd services containers +License: GPL-2.0-only +Group: Applications/Internet +Version: 4.4.1 +Release: 1 +URL: https://github.com/uyuni-project/uyuni +Source0: %{name}-%{version}-1.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-build +BuildArch: noarch +Requires: podman +%if 0%{?suse_version} +Requires(post): %fillup_prereq +%endif +BuildRequires: systemd-rpm-macros + +%description +This package contains systemd services to run the Uyuni server containers using podman. + +%prep +%setup -q + +%build + +%install +install -d -m 755 %{buildroot}/%{_sysconfdir}/uyuni/server +install -d -m 755 %{buildroot}%{_sbindir} + +#TODO currently removed but it can be useful in future +#%if "%{?susemanager_container_images_path}" != "" +#sed 's|^NAMESPACE=.*$|NAMESPACE=%{susemanager_container_images_path}|' -i uyuni-server-services.config +#%endif + +%if !0%{?is_opensuse} +PRODUCT_VERSION=$(echo %{version} | sed 's/^\([0-9]\+\.[0-9]\+\).*$/\1/') +%endif +%if 0%{?rhel} +install -D -m 644 uyuni-server-services.config %{buildroot}%{_sysconfdir}/sysconfig/uyuni-server-systemd-services.config +%else +install -D -m 644 uyuni-server-services.config %{buildroot}%{_fillupdir}/sysconfig.%{name} +%endif + +install -D -m 644 uyuni-server.service %{buildroot}%{_unitdir}/uyuni-server.service +ln -s /usr/sbin/service %{buildroot}%{_sbindir}/rcuyuni-server + +install -m 755 uyuni-server.sh %{buildroot}%{_sbindir}/uyuni-server.sh + +%check + +%pre +%if !0%{?rhel} + %service_add_pre uyuni-server.service +%endif + +%post +%if 0%{?suse_version} +%fillup_only +%endif + +%if 0%{?rhel} + %systemd_post uyuni-server.service +%else + %service_add_post uyuni-server +%endif + +%preun +%if 0%{?rhel} + %systemd_preun uyuni-server.service +%else + %service_del_preun uyuni-server +%endif + +%postun +%if 0%{?rhel} + %systemd_postun uyuni-server.service +%else + %service_del_postun uyuni-server +%endif + +%files +%defattr(-,root,root) +%doc README.md +%{_unitdir}/*.service +%{_sbindir}/rcuyuni-* +%if 0%{?rhel} +%{_sysconfdir}/sysconfig/uyuni-server-systemd-services.config +%else +%{_fillupdir}/sysconfig.%{name} +%endif +%{_sysconfdir}/uyuni +%{_sbindir}/uyuni-server.sh + +%changelog diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service new file mode 100644 index 000000000000..9993e7f63b35 --- /dev/null +++ b/containers/server-systemd-services/uyuni-server.service @@ -0,0 +1,80 @@ +# container-uyuni-server.service +# autogenerated by Podman 4.3.1 +# Tue Feb 28 17:20:52 CET 2023 + +[Unit] +Description=Uyuni server image container service +Wants=network.target +After=network-online.target +RequiresMountsFor=%t/containers + +[Service] +Environment=PODMAN_SYSTEMD_UNIT=%n +EnvironmentFile=-/etc/sysconfig/uyuni-server-systemd-services +Restart=on-failure +ExecStartPre=/bin/rm \ + -f %t/uyuni-server.pid %t/%n.ctr-id +ExecStart=/usr/bin/podman run \ + --conmon-pidfile %t/uyuni-server.pid \ + --cidfile=%t/%n.ctr-id \ + --cgroups=no-conmon \ + --rm \ + --sdnotify=conmon \ + -d \ + --replace \ + --tmpfs /run \ + -p 443:443 \ + -p 80:80 \ + -p 4505:4505 \ + -p 4506:4506 \ + -p 69:69 \ + -p 25151:25151 \ + -p 5432:5432 \ + -v cgroup:/sys/fs/cgroup:rw \ + -v pgsql:/var/lib/pgsql \ + -v var-cache:/var/cache \ + -v var-spacewalk:/var/spacewalk \ + -v srv-salt:/srv/salt \ + -v srv-www-pub:/srv/www/htdocs/pub \ + -v srv-www-cobbler:/srv/www/cobbler \ + -v srv-www-osimages:/srv/www/os-images \ + -v srv-tftpboot:/srv/tftpboot \ + -v srv-formulametadata:/srv/formula_metadata \ + -v srv-pillar:/srv/pillar \ + -v srv-susemanager:/srv/susemanager \ + -v srv-spacewalk:/srv/spacewalk \ + -v etc-rhn:/etc/rhn \ + -v etc-systemd:/etc/systemd/system/multi-user.target.wants \ + -v var-log-rhn:/var/log/rhn \ + -v etc-salt:/etc/salt \ + -v apache2:/etc/apache2 \ + -v tomcat:/etc/tomcat \ + -v etc-tls:/etc/pki/tls \ + -v ca-cert:/etc/pki/trust/anchors/ \ + -v tls-key:/etc/pki/spacewalk-tls \ + -v uyuni-config:/root \ + -v tomcat-monitoring:/usr/lib/systemd/system/tomcat.service.d \ + -v taskomatic-monitoring:/usr/lib/systemd/system/taskomatic.service.d \ + -v etc-cobbler:/etc/cobbler \ + -v var-lib-cobbler:/var/lib/cobbler \ + -v home:/home \ + -v etc-sysconfig:/etc/sysconfig \ + --env-host \ + --hostname ${UYUNI_FQDN} \ + $EXTRA_POD_ARGS \ + --name uyuni-server ${NAMESPACE}/server:${TAG} +ExecStop=/usr/bin/podman stop \ + --ignore -t 10 \ + --cidfile=%t/%n.ctr-id +ExecStopPost=/usr/bin/podman rm \ + -f \ + --ignore -t 10 \ + --cidfile=%t/%n.ctr-id + +PIDFile=%t/uyuni-server.pid +TimeoutStopSec=180 +TimeoutStartSec=900 +Type=forking + +[Install] +WantedBy=multi-user.target default.target diff --git a/containers/server-systemd-services/uyuni-server.sh b/containers/server-systemd-services/uyuni-server.sh new file mode 100644 index 000000000000..dae5f3b96842 --- /dev/null +++ b/containers/server-systemd-services/uyuni-server.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +############################# SETUP ############################# + +set -Eeuo pipefail +trap cleanup SIGINT SIGTERM ERR EXIT + +script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) + +cleanup() { + trap - SIGINT SIGTERM ERR EXIT +} + +msg() { + echo >&2 -e "${1-}" +} + +die() { + local msg=$1 + local code=${2-1} + msg "$msg" + exit "$code" +} + +############################# ROOT ############################# + +usage_root() { + cat </etc/sysconfig/uyuni-server-services.config < Date: Fri, 10 Mar 2023 14:03:38 +0100 Subject: [PATCH 16/80] Add server helm chart The helm chart intentionnaly doesn't deploy the PersistentVolumes and the SSL certificates for now. The documentation has also been adapted to use the helm chart. From now on the pod is running as a deployment --- containers/doc/server-kubernetes/README.md | 49 +- .../cert-manager-selfsigned-issuer.yaml | 44 ++ .../server-kubernetes/k3s-traefik-config.yaml | 5 - .../nginx-uyuni-ingress.yaml | 139 ----- containers/doc/server-kubernetes/pvcs.yaml | 329 ----------- containers/doc/server-kubernetes/server.yaml | 492 ---------------- .../doc/server-kubernetes/uyuni-config.yaml | 25 - containers/server-helm/.helmignore | 23 + containers/server-helm/Chart.yaml | 11 + containers/server-helm/_service | 3 + containers/server-helm/charts/.gitkeep | 0 containers/server-helm/server-helm.changes | 4 + containers/server-helm/templates/_helpers.tpl | 11 + containers/server-helm/templates/config.yaml | 40 ++ .../server-helm/templates/deployment.yaml | 539 ++++++++++++++++++ .../templates/ingress.yaml} | 30 +- .../templates}/k3s-ingress-routes.yaml | 12 + .../templates}/service.yaml | 18 +- containers/server-helm/templates/volumes.yaml | 527 +++++++++++++++++ containers/server-helm/tito.props | 2 + containers/server-helm/values.yaml | 108 ++++ containers/server-image/uyuni-setup.service | 2 +- java/manager-build.xml | 20 +- rel-eng/packages/server-helm | 1 + susemanager/bin/mgr-setup | 5 + 25 files changed, 1410 insertions(+), 1029 deletions(-) create mode 100644 containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml delete mode 100644 containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml delete mode 100644 containers/doc/server-kubernetes/pvcs.yaml delete mode 100644 containers/doc/server-kubernetes/server.yaml delete mode 100644 containers/doc/server-kubernetes/uyuni-config.yaml create mode 100644 containers/server-helm/.helmignore create mode 100644 containers/server-helm/Chart.yaml create mode 100644 containers/server-helm/_service create mode 100644 containers/server-helm/charts/.gitkeep create mode 100644 containers/server-helm/server-helm.changes create mode 100644 containers/server-helm/templates/_helpers.tpl create mode 100644 containers/server-helm/templates/config.yaml create mode 100644 containers/server-helm/templates/deployment.yaml rename containers/{doc/server-kubernetes/uyuni-ingress-traefik.yaml => server-helm/templates/ingress.yaml} (82%) rename containers/{doc/server-kubernetes => server-helm/templates}/k3s-ingress-routes.yaml (81%) rename containers/{doc/server-kubernetes => server-helm/templates}/service.yaml (70%) create mode 100644 containers/server-helm/templates/volumes.yaml create mode 100644 containers/server-helm/tito.props create mode 100644 containers/server-helm/values.yaml create mode 100644 rel-eng/packages/server-helm diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md index 5f911e979752..3cf21e8edec1 100644 --- a/containers/doc/server-kubernetes/README.md +++ b/containers/doc/server-kubernetes/README.md @@ -21,8 +21,6 @@ Run this command to watch it restart: watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=rke2-ingress-nginx ``` -Set the shell variable `INGRESS=nginx` to be used in the next steps. - ### K3s specific setup @@ -34,8 +32,6 @@ Run this commant to watch it restart: watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=traefik ``` -Set the shell variable `INGRESS=traefik` to be used in the next steps. - ***Offline installation:*** with k3s it is possible to preload the container images and avoid it to be fetched from a registry. For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. For example: @@ -114,6 +110,12 @@ Create a `ConfigMap` with the CA certificate: kubectl create configmap uyuni-ca --from-file=ca.crt=/RHN-ORG-TRUSTED-SSL-CERT ``` +Change the hostname associated to the persistent volumes to match the hostname of your node: + +``` +sed 's/uyuni-dev/youhostname/' -i pvs.yaml +``` + Define the persistent volumes by running `kubectl apply -f pvs.yaml`. The volumes are folders on the cluster node and need to be manually created: @@ -121,28 +123,37 @@ The volumes are folders on the cluster node and need to be manually created: mkdir -p `kubectl get pv -o jsonpath='{.items[*].spec.local.path}'` ``` -In my setup, the cluster node is named `uyuni-dev` and its FQDN is `uyuni-dev.world-co.com`. -You will need to replace those values in the yaml files. - -Once done, run the following commands: - +Install the helm chart from the source's `containers` folder: +Replace the `uyuni-dev.world-co.com` by your FQDN. ``` -for YAML in pvcs service uyuni-config server $INGRESS-uyuni-ingress; do - kubectl apply -f $YAML.yaml -done +helm install uyuni server-helm \ + --set repository=registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni \ + --set storageClass=local-storage \ + --set exposeJavaDebug=true \ + --set uyuniMailFrom=notifications@uyuni-dev.world-co.com \ + --set fqdn=uyuni-dev.world-co.com ``` + +If deploying on `rke2`, add the `--set ingres=nginx` parameter to the `helm install` command. + +You can also set more variables like `sccUser` or `sccPass`. +Check the `server-helm/values.yaml` file for the complete list. + +Note that the Helm chart installs a deployment with one replica. +The pod name is automatically generated by kubernetes and changes at every start. + The pod takes a while to start as it needs to initialize the mounts and run the setup. -Run `kubectl get pod uyuni` and wait for it to be in `RUNNING` state. +Run `kubectl get pod -lapp=uyuni` and wait for it to be in `RUNNING` state. Even after this, give it time to complete the setup during first boot. -You can monitor the progress of the setup with `kubectl exec uyuni -- tail -f /var/log/susemanager_setup.log` +You can monitor the progress of the setup with `kubectl exec $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}) -- tail -f /var/log/susemanager_setup.log` ## Using the pod -To getting a shell in the pod run `kubectl exec -ti uyuni -- sh`. +To getting a shell in the pod run `kubectl exec -ti $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}) -- sh`. Note that the part after the `--` can be any command to run inside the server. -To copy files to the server, use the `kubectl cp uyuni:` command. +To copy files to the server, use the `kubectl cp $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}):` command. Run `kubectl cp --help` for more details on how to use it. ## Developping with the pod @@ -155,7 +166,7 @@ To deploy java code on the pod change to the `java` directory and run: ant -f manager-build.xml refresh-branding-jar deploy-restart-kube ``` -In case you changed the pod name and namespace while deploying it, pass the corresponding `-Ddeploy.namespace=` and `-Ddeploy.pod=` parameters. +In case you changed the pod namespace, pass the corresponding `-Ddeploy.namespace=` parameter. **Note** To deploy TSX or Salt code, use the `deploy-static-resources-kube` and `deploy-salt-files-kube` tasks of the ant file. @@ -177,7 +188,7 @@ The debugger can now be attached to the usual ports (8000 for tomcat and 8001 fo ## Throwing everything away -If you want to create from a fresh pod, run `kubectl delete pod uyuni`. +If you want to create from a fresh pod, run `helm uninstall uyuni`. Then run this command on the cluster node to cleanup the volumes: @@ -187,4 +198,4 @@ for v in `ls /var/uyuni/`; do done ``` -To create the pod again, just run `kubectl apply -f server.yaml` and wait. +To create the pod again, just run the Helm install again and wait. diff --git a/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml b/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml new file mode 100644 index 000000000000..afa7660e0b66 --- /dev/null +++ b/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml @@ -0,0 +1,44 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: uyuni-issuer + namespace: default +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: uyuni-ca + namespace: default +spec: + isCA: true + subject: + countries: ["FR"] + provinces: ["Burgundy"] + localities: ["Macon"] + organizations: ["SUSE"] + organizationalUnits: ["BCL"] + emailAddresses: + - sylvestre@world-co.com + commonName: uyuni-dev.world-co.com + dnsNames: + - uyuni-dev.world-co.com + secretName: uyuni-ca + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: uyuni-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: uyuni-ca-issuer + namespace: default +spec: + ca: + secretName: + uyuni-ca diff --git a/containers/doc/server-kubernetes/k3s-traefik-config.yaml b/containers/doc/server-kubernetes/k3s-traefik-config.yaml index 55ae3802a3b2..e27590046c67 100644 --- a/containers/doc/server-kubernetes/k3s-traefik-config.yaml +++ b/containers/doc/server-kubernetes/k3s-traefik-config.yaml @@ -5,11 +5,6 @@ metadata: namespace: kube-system spec: valuesContent: |- - logs: - general: - level: DEBUG - access: - enabled: true ports: postgres: port: 5432 diff --git a/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml b/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml deleted file mode 100644 index b17a50a7bc14..000000000000 --- a/containers/doc/server-kubernetes/nginx-uyuni-ingress.yaml +++ /dev/null @@ -1,139 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - creationTimestamp: null - name: uyuni-ingress-ssl -spec: - tls: - - hosts: - - uyuni-dev.world-co.com - secretName: uyuni-cert - rules: - - host: uyuni-dev.world-co.com - http: - paths: - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: / - pathType: Prefix ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - creationTimestamp: null - name: uyuni-ingress-nossl - annotations: - nginx.ingress.kubernetes.io/ssl-redirect: "false" -spec: - rules: - - host: uyuni-dev.world-co.com - http: - paths: - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /pub - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /rhn/([^/])+/DownloadFile - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /(rhn/)?rpc/api - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /rhn/errors - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /rhn/ty/TinyUrl - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /rhn/websocket - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /rhn/metrics - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /cobbler_api - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /cblr - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /httpboot - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /images - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /cobbler - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /os-images - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /tftp - pathType: Prefix - - backend: - service: - name: uyuni-tcp - port: - number: 80 - path: /docs - pathType: Prefix diff --git a/containers/doc/server-kubernetes/pvcs.yaml b/containers/doc/server-kubernetes/pvcs.yaml deleted file mode 100644 index 62b89e050f9a..000000000000 --- a/containers/doc/server-kubernetes/pvcs.yaml +++ /dev/null @@ -1,329 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: var-pgsql -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Gi - selector: - matchLabels: - data: var-pgsql ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: var-cache -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Gi - selector: - matchLabels: - data: var-cache ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: var-spacewalk -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Gi - selector: - matchLabels: - data: var-spacewalk ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: var-log -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 2Gi - selector: - matchLabels: - data: var-log ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-salt -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-salt ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-www-pub -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-www-pub ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-www-cobbler -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-www-cobbler ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-www-osimages -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-www-osimages ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-tftpboot -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-tftpboot ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-formulametadata -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-formulametadata ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-pillar -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-pillar ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-susemanager -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-susemanager ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: srv-spacewalk -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 100Mi - selector: - matchLabels: - data: srv-spacewalk ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: root -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: root ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-apache2 -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: etc-apache2 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-rhn -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: etc-rhn ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-systemd -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: etc-systemd ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-salt -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: etc-salt ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-tomcat -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 10Mi - selector: - matchLabels: - data: etc-tomcat ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-cobbler -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 1Mi - selector: - matchLabels: - data: etc-cobbler ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-sysconfig -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 1Mi - selector: - matchLabels: - data: etc-sysconfig ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: etc-tls -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-storage - resources: - requests: - storage: 1Mi - selector: - matchLabels: - data: etc-tls diff --git a/containers/doc/server-kubernetes/server.yaml b/containers/doc/server-kubernetes/server.yaml deleted file mode 100644 index 28a1aeca1f88..000000000000 --- a/containers/doc/server-kubernetes/server.yaml +++ /dev/null @@ -1,492 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - creationTimestamp: null - labels: - app: uyuni - name: uyuni -spec: - initContainers: - - name: init-etc-tls - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/pki/tls /mnt; - chmod --reference=/etc/pki/tls /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/pki/tls/. /mnt; - ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/certs/spacewalk.crt; - ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/spacewalk.key; - cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/pg-spacewalk.key; - chown postgres:postgres /mnt/private/pg-spacewalk.key; - fi - volumeMounts: - - mountPath: /mnt - name: etc-tls - - name: tls-key - mountPath: /etc/pki/spacewalk-tls - - name: init-var-pgsql - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/var/lib/pgsql /mnt; - chmod --reference=/var/lib/pgsql /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /var/lib/pgsql/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: var-pgsql - - name: init-var-cache - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/var/cache /mnt; - chmod --reference=/var/cache /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /var/cache/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: var-cache - - name: init-var-log - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/var/log /mnt; - chmod --reference=/var/log /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /var/log/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: var-log - - name: init-srv-salt - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/salt /mnt; - chmod --reference=/srv/salt /mnt - volumeMounts: - - mountPath: /mnt - name: srv-salt - - name: init-srv-www-pub - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/www/htdocs/pub /mnt; - chmod --reference=/srv/www/htdocs/pub /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/www/htdocs/pub/. /mnt; - ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt/RHN-ORG-TRUSTED-SSL-CERT; - fi - volumeMounts: - - mountPath: /mnt - name: srv-www-pub - - name: init-srv-www-cobbler - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/www/cobbler /mnt; - chmod --reference=/srv/www/cobbler /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/www/cobbler/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-www-cobbler - - name: init-srv-www-osimages - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/www/os-images /mnt; - chmod --reference=/srv/www/os-images /mnt - volumeMounts: - - mountPath: /mnt - name: srv-www-osimages - - name: init-srv-tftpboot - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/tftpboot /mnt; - chmod --reference=/srv/tftpboot /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/tftpboot/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-tftpboot - - name: init-srv-formulametadata - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/formula_metadata /mnt; - chmod --reference=/srv/formula_metadata /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/formula_metadata/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-formulametadata - - name: init-srv-pillar - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/pillar /mnt; - chmod --reference=/srv/pillar /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/pillar/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-pillar - - name: init-srv-susemanager - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/susemanager /mnt; - chmod --reference=/srv/susemanager /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/susemanager/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-susemanager - - name: init-srv-spacewalk - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/srv/spacewalk /mnt; - chmod --reference=/srv/spacewalk /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /srv/spacewalk/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: srv-spacewalk - - name: init-root - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/root /mnt; - chmod --reference=/root /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /root/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: root - - name: init-etc-apache2 - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/apache2 /mnt; - chmod --reference=/etc/apache2 /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/apache2/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-apache2 - - name: init-etc-rhn - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/rhn /mnt; - chmod --reference=/etc/rhn /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/rhn/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-rhn - - name: init-etc-systemd - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/systemd/system/multi-user.target.wants /mnt; - chmod --reference=/etc/systemd/system/multi-user.target.wants /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/systemd/system/multi-user.target.wants/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-systemd - - name: init-etc-salt - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/salt /mnt; - chmod --reference=/etc/salt /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/salt/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-salt - - name: init-etc-tomcat - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/tomcat /mnt; - chmod --reference=/etc/tomcat /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/tomcat/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-tomcat - - name: init-etc-cobbler - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/cobbler /mnt; - chmod --reference=/etc/cobbler /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/cobbler/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-cobbler - - name: init-etc-sysconfig - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - chown --reference=/etc/sysconfig /mnt; - chmod --reference=/etc/sysconfig /mnt; - if [ -z "$(ls -A /mnt)" ]; then - cp -a /etc/sysconfig/. /mnt; - fi - volumeMounts: - - mountPath: /mnt - name: etc-sysconfig - containers: - - name: uyuni - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - securityContext: - capabilities: - add: - - SYS_ADMIN - ports: - - containerPort: 443 - - containerPort: 80 - - containerPort: 4505 - - containerPort: 4506 - - containerPort: 69 - protocol: UDP - - containerPort: 25151 - - containerPort: 5432 - - containerPort: 8000 - - containerPort: 8001 - envFrom: - - configMapRef: - name: uyuni-config - - secretRef: - name: uyuni-secret - volumeMounts: - - mountPath: /run - name: tmp - - mountPath: /sys/fs/cgroup - name: cgroup - - mountPath: /var/lib/pgsql - name: var-pgsql - - mountPath: /var/cache - name: var-cache - - mountPath: /var/spacewalk - name: var-spacewalk - - mountPath: /var/log - name: var-log - - mountPath: /srv/salt - name: srv-salt - - mountPath: /srv/www/htdocs/pub - name: srv-www-pub - - mountPath: /srv/www/cobbler - name: srv-www-cobbler - - mountPath: /srv/www/os-images - name: srv-www-osimages - - mountPath: /srv/tftpboot - name: srv-tftpboot - - mountPath: /srv/formula_metadata - name: srv-formulametadata - - mountPath: /srv/pillar - name: srv-pillar - - mountPath: /srv/susemanager - name: srv-susemanager - - mountPath: /srv/spacewalk - name: srv-spacewalk - - mountPath: /root - name: root - - mountPath: /etc/apache2 - name: etc-apache2 - - mountPath: /etc/rhn - name: etc-rhn - - mountPath: /etc/systemd/system/multi-user.target.wants - name: etc-systemd - - mountPath: /etc/salt - name: etc-salt - - mountPath: /etc/tomcat - name: etc-tomcat - - mountPath: /etc/cobbler - name: etc-cobbler - - mountPath: /etc/sysconfig - name: etc-sysconfig - - mountPath: /etc/pki/tls - name: etc-tls - - name: ca-cert - mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT - readOnly: true - subPath: ca.crt - - name: tls-key - mountPath: /etc/pki/spacewalk-tls - volumes: - - name: tmp - emptyDir: - medium: Memory - sizeLimit: 256Mi - - name: cgroup - hostPath: - path: /sys/fs/cgroup - type: Directory - - name: var-pgsql - persistentVolumeClaim: - claimName: var-pgsql - - name: var-cache - persistentVolumeClaim: - claimName: var-cache - - name: var-spacewalk - persistentVolumeClaim: - claimName: var-spacewalk - - name: var-log - persistentVolumeClaim: - claimName: var-log - - name: srv-salt - persistentVolumeClaim: - claimName: srv-salt - - name: srv-www-pub - persistentVolumeClaim: - claimName: srv-www-pub - - name: srv-www-cobbler - persistentVolumeClaim: - claimName: srv-www-cobbler - - name: srv-www-osimages - persistentVolumeClaim: - claimName: srv-www-osimages - - name: srv-tftpboot - persistentVolumeClaim: - claimName: srv-tftpboot - - name: srv-formulametadata - persistentVolumeClaim: - claimName: srv-formulametadata - - name: srv-pillar - persistentVolumeClaim: - claimName: srv-pillar - - name: srv-susemanager - persistentVolumeClaim: - claimName: srv-susemanager - - name: srv-spacewalk - persistentVolumeClaim: - claimName: srv-spacewalk - - name: root - persistentVolumeClaim: - claimName: root - - name: etc-apache2 - persistentVolumeClaim: - claimName: etc-apache2 - - name: etc-rhn - persistentVolumeClaim: - claimName: etc-rhn - - name: etc-systemd - persistentVolumeClaim: - claimName: etc-systemd - - name: etc-salt - persistentVolumeClaim: - claimName: etc-salt - - name: etc-tomcat - persistentVolumeClaim: - claimName: etc-tomcat - - name: etc-cobbler - persistentVolumeClaim: - claimName: etc-cobbler - - name: etc-sysconfig - persistentVolumeClaim: - claimName: etc-sysconfig - - name: ca-cert - configMap: - name: uyuni-ca - - name: etc-tls - persistentVolumeClaim: - claimName: etc-tls - - name: tls-key - secret: - secretName: uyuni-cert - items: - - key: tls.crt - path: spacewalk.crt - - key: tls.key - path: spacewalk.key - mode: 0600 - dnsPolicy: ClusterFirst - restartPolicy: Always diff --git a/containers/doc/server-kubernetes/uyuni-config.yaml b/containers/doc/server-kubernetes/uyuni-config.yaml deleted file mode 100644 index e6bc94b489a8..000000000000 --- a/containers/doc/server-kubernetes/uyuni-config.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v1 -data: - MANAGER_USER: "spacewalk" - MANAGER_ADMIN_EMAIL: "galaxy-noise@suse.de" - MANAGER_DB_NAME: susemanager - MANAGER_DB_HOST: localhost - MANAGER_DB_PORT: "5432" - MANAGER_DB_PROTOCOL: "TCP" - MANAGER_ENABLE_TFTP: "Y" - REPORT_DB_HOST: "uyuni-dev.world-co.com" - NO_SSL: "Y" - MANAGER_MAIL_FROM: "notifications@uyuni-dev.world-co.com" - UYUNI_CONFIG: "uyuni-dev.world-co.com" -kind: ConfigMap -metadata: - name: uyuni-config ---- -apiVersion: v1 -data: - MANAGER_PASS: c3BhY2V3YWxr - SCC_USER: "" - SCC_PASS: "" -kind: Secret -metadata: - name: uyuni-secret diff --git a/containers/server-helm/.helmignore b/containers/server-helm/.helmignore new file mode 100644 index 000000000000..0e8a0eb36f4c --- /dev/null +++ b/containers/server-helm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/containers/server-helm/Chart.yaml b/containers/server-helm/Chart.yaml new file mode 100644 index 000000000000..84bebb20b9db --- /dev/null +++ b/containers/server-helm/Chart.yaml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest +#!BuildTag: uyuni/server:4.4.0 +#!BuildTag: uyuni/server:4.4.0-build%RELEASE% +apiVersion: v2 +name: server +description: Uyuni server containers. +type: application +home: https://www.uyuni-project.org/ +icon: https://www.uyuni-project.org/img/uyuni-logo.svg +version: 4.4.0 diff --git a/containers/server-helm/_service b/containers/server-helm/_service new file mode 100644 index 000000000000..dc713a1f9381 --- /dev/null +++ b/containers/server-helm/_service @@ -0,0 +1,3 @@ + + + diff --git a/containers/server-helm/charts/.gitkeep b/containers/server-helm/charts/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/containers/server-helm/server-helm.changes b/containers/server-helm/server-helm.changes new file mode 100644 index 000000000000..efc36b9365e0 --- /dev/null +++ b/containers/server-helm/server-helm.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Mar 9 13:43:51 UTC 2023 - Cédric Bosdonnat + +- Initial version diff --git a/containers/server-helm/templates/_helpers.tpl b/containers/server-helm/templates/_helpers.tpl new file mode 100644 index 000000000000..2cba9281b6b8 --- /dev/null +++ b/containers/server-helm/templates/_helpers.tpl @@ -0,0 +1,11 @@ +{{- define "deployment.container.image" -}} +{{- $imageName := .name -}} +{{- $uri := (printf "%s/%s:%s" .global.Values.repository $imageName .global.Values.version) | default .global.Chart.AppVersion -}} +{{- if .global.Values.images -}} +{{- $image := (get .global.Values.images $imageName) -}} +{{- if $image -}} +{{- $uri = $image -}} +{{- end -}} +{{- end -}} +{{- $uri -}} +{{- end -}} \ No newline at end of file diff --git a/containers/server-helm/templates/config.yaml b/containers/server-helm/templates/config.yaml new file mode 100644 index 000000000000..3b1e3f48f0b9 --- /dev/null +++ b/containers/server-helm/templates/config.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +data: + MANAGER_USER: {{ .Values.uyuniUser | default "spacewalk" }} + MANAGER_ADMIN_EMAIL: {{ .Values.uyuniAdminEmail }} + MANAGER_DB_NAME: "susemanager" + MANAGER_DB_HOST: "localhost" + MANAGER_DB_PORT: "5432" + MANAGER_DB_PROTOCOL: "TCP" + MANAGER_ENABLE_TFTP: '{{ .Values.uyuniEnableTftp | default true | ternary "Y" "N" }}' + REPORT_DB_HOST: {{ .Values.reportDbHost | default .Values.fqdn }} +{{- if .Values.reportDbPort }} + REPORT_DB_PORT: {{ .Values.reportDbPort }} +{{- end }} +{{- if .Values.reportDbUser }} + REPORT_DB_USER: {{ .Values.reportDbUser }} +{{- end }} +{{- if .Values.reportDbPass }} + REPORT_DB_PASS: {{ .Values.reportDbPass }} +{{- end }} +{{- if .Values.reportDbName }} + REPORT_DB_NAME: {{ .Values.reportDbName }} +{{- end }} + NO_SSL: "Y" + MANAGER_MAIL_FROM: {{ .Values.uyuniMailFrom }} + UYUNI_FQDN: {{ .Values.fqdn }} +kind: ConfigMap +metadata: + name: uyuni-config + namespace: "{{ .Release.Namespace }}" +--- +apiVersion: v1 +data: + MANAGER_PASS: {{ .Values.uyuniPass | default "spacewalk" | b64enc }} + SCC_USER: {{ .Values.sccUser | default "" | b64enc }} + SCC_PASS: {{ .Values.sccPass | default "" | b64enc }} +kind: Secret +metadata: + name: uyuni-secret + namespace: "{{ .Release.Namespace }}" + diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml new file mode 100644 index 000000000000..bdf859b2c46d --- /dev/null +++ b/containers/server-helm/templates/deployment.yaml @@ -0,0 +1,539 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: uyuni + namespace: "{{ .Release.Namespace }}" +spec: + replicas: 1 + selector: + matchLabels: + app: uyuni + template: + metadata: + labels: + app: uyuni + spec: + initContainers: + - name: init-etc-tls + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/pki/tls /mnt; + chmod --reference=/etc/pki/tls /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/pki/tls/. /mnt; + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/pg-spacewalk.key; + chown postgres:postgres /mnt/private/pg-spacewalk.key; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tls + - name: tls-key + mountPath: /etc/pki/spacewalk-tls + - name: init-var-pgsql + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/pgsql /mnt; + chmod --reference=/var/lib/pgsql /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/pgsql/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-pgsql + - name: init-var-cache + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/cache /mnt; + chmod --reference=/var/cache /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/cache/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cache + - name: init-var-log + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/log /mnt; + chmod --reference=/var/log /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/log/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-log + - name: init-srv-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/salt /mnt; + chmod --reference=/srv/salt /mnt + volumeMounts: + - mountPath: /mnt + name: srv-salt + - name: init-srv-www-pub + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/htdocs/pub /mnt; + chmod --reference=/srv/www/htdocs/pub /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/htdocs/pub/. /mnt; + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt/RHN-ORG-TRUSTED-SSL-CERT; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-pub + - name: init-srv-www-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/cobbler /mnt; + chmod --reference=/srv/www/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-cobbler + - name: init-srv-www-osimages + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/os-images /mnt; + chmod --reference=/srv/www/os-images /mnt + volumeMounts: + - mountPath: /mnt + name: srv-www-osimages + - name: init-srv-tftpboot + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/tftpboot /mnt; + chmod --reference=/srv/tftpboot /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/tftpboot/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-tftpboot + - name: init-srv-formulametadata + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/formula_metadata /mnt; + chmod --reference=/srv/formula_metadata /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/formula_metadata/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-formulametadata + - name: init-srv-pillar + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/pillar /mnt; + chmod --reference=/srv/pillar /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/pillar/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-pillar + - name: init-srv-susemanager + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/susemanager /mnt; + chmod --reference=/srv/susemanager /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/susemanager/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-susemanager + - name: init-srv-spacewalk + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/spacewalk /mnt; + chmod --reference=/srv/spacewalk /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/spacewalk/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-spacewalk + - name: init-root + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/root /mnt; + chmod --reference=/root /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /root/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: root + - name: init-etc-apache2 + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/apache2 /mnt; + chmod --reference=/etc/apache2 /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/apache2/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-apache2 + - name: init-etc-rhn + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/rhn /mnt; + chmod --reference=/etc/rhn /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/rhn/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-rhn + - name: init-etc-systemd + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/systemd/system/multi-user.target.wants /mnt; + chmod --reference=/etc/systemd/system/multi-user.target.wants /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/systemd/system/multi-user.target.wants/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-systemd + - name: init-etc-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/salt /mnt; + chmod --reference=/etc/salt /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/salt/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-salt + - name: init-etc-tomcat + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/tomcat /mnt; + chmod --reference=/etc/tomcat /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/tomcat/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tomcat + - name: init-etc-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/cobbler /mnt; + chmod --reference=/etc/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-cobbler + - name: init-etc-sysconfig + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/sysconfig /mnt; + chmod --reference=/etc/sysconfig /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/sysconfig/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-sysconfig + containers: + - name: uyuni + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: 443 + - containerPort: 80 + - containerPort: 4505 + - containerPort: 4506 + - containerPort: 69 + protocol: UDP + - containerPort: 25151 + - containerPort: 5432 +{{- if .Values.exposeJavaDebug | default false }} + - containerPort: 8000 + - containerPort: 8001 +{{- end }} +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + env: + - name: MIRROR_PATH + value: /mirror +{{- end }} + envFrom: + - configMapRef: + name: uyuni-config + - secretRef: + name: uyuni-secret + volumeMounts: + - mountPath: /run + name: tmp + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /var/lib/pgsql + name: var-pgsql + - mountPath: /var/cache + name: var-cache + - mountPath: /var/spacewalk + name: var-spacewalk + - mountPath: /var/log + name: var-log + - mountPath: /srv/salt + name: srv-salt + - mountPath: /srv/www/htdocs/pub + name: srv-www-pub + - mountPath: /srv/www/cobbler + name: srv-www-cobbler + - mountPath: /srv/www/os-images + name: srv-www-osimages + - mountPath: /srv/tftpboot + name: srv-tftpboot + - mountPath: /srv/formula_metadata + name: srv-formulametadata + - mountPath: /srv/pillar + name: srv-pillar + - mountPath: /srv/susemanager + name: srv-susemanager + - mountPath: /srv/spacewalk + name: srv-spacewalk + - mountPath: /root + name: root + - mountPath: /etc/apache2 + name: etc-apache2 + - mountPath: /etc/rhn + name: etc-rhn + - mountPath: /etc/systemd/system/multi-user.target.wants + name: etc-systemd + - mountPath: /etc/salt + name: etc-salt + - mountPath: /etc/tomcat + name: etc-tomcat + - mountPath: /etc/cobbler + name: etc-cobbler + - mountPath: /etc/sysconfig + name: etc-sysconfig + - mountPath: /etc/pki/tls + name: etc-tls + - name: ca-cert + mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + readOnly: true + subPath: ca.crt + - name: tls-key + mountPath: /etc/pki/spacewalk-tls +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + - name: mirror + mountPath: /mirror +{{- end }} + volumes: + - name: tmp + emptyDir: + medium: Memory + sizeLimit: 256Mi + - name: cgroup + hostPath: + path: /sys/fs/cgroup + type: Directory + - name: var-pgsql + persistentVolumeClaim: + claimName: var-pgsql + - name: var-cache + persistentVolumeClaim: + claimName: var-cache + - name: var-spacewalk + persistentVolumeClaim: + claimName: var-spacewalk + - name: var-log + persistentVolumeClaim: + claimName: var-log + - name: srv-salt + persistentVolumeClaim: + claimName: srv-salt + - name: srv-www-pub + persistentVolumeClaim: + claimName: srv-www-pub + - name: srv-www-cobbler + persistentVolumeClaim: + claimName: srv-www-cobbler + - name: srv-www-osimages + persistentVolumeClaim: + claimName: srv-www-osimages + - name: srv-tftpboot + persistentVolumeClaim: + claimName: srv-tftpboot + - name: srv-formulametadata + persistentVolumeClaim: + claimName: srv-formulametadata + - name: srv-pillar + persistentVolumeClaim: + claimName: srv-pillar + - name: srv-susemanager + persistentVolumeClaim: + claimName: srv-susemanager + - name: srv-spacewalk + persistentVolumeClaim: + claimName: srv-spacewalk + - name: root + persistentVolumeClaim: + claimName: root + - name: etc-apache2 + persistentVolumeClaim: + claimName: etc-apache2 + - name: etc-rhn + persistentVolumeClaim: + claimName: etc-rhn + - name: etc-systemd + persistentVolumeClaim: + claimName: etc-systemd + - name: etc-salt + persistentVolumeClaim: + claimName: etc-salt + - name: etc-tomcat + persistentVolumeClaim: + claimName: etc-tomcat + - name: etc-cobbler + persistentVolumeClaim: + claimName: etc-cobbler + - name: etc-sysconfig + persistentVolumeClaim: + claimName: etc-sysconfig + - name: ca-cert + configMap: + name: uyuni-ca + - name: etc-tls + persistentVolumeClaim: + claimName: etc-tls + - name: tls-key + secret: + secretName: uyuni-cert + items: + - key: tls.crt + path: spacewalk.crt + - key: tls.key + path: spacewalk.key + mode: 0600 +{{- if .Values.mirror }} + {{- if .Values.mirror.claimName }} + - name: mirror + persistentVolumeClaim: + claimName: {{ .Values.mirror.claimName }} + {{- else if .Values.mirror.hostPath }} + - name: mirror + hostPath: + path: {{ .Values.mirror.hostPath }} + {{- end }} +{{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml b/containers/server-helm/templates/ingress.yaml similarity index 82% rename from containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml rename to containers/server-helm/templates/ingress.yaml index a47f7b7e89a2..0d24433f18b1 100644 --- a/containers/doc/server-kubernetes/uyuni-ingress-traefik.yaml +++ b/containers/server-helm/templates/ingress.yaml @@ -3,17 +3,25 @@ kind: Ingress metadata: creationTimestamp: null name: uyuni-ingress-ssl + namespace: "{{ .Release.Namespace }}" annotations: +{{- if eq .Values.ingress "traefik" }} traefik.ingress.kubernetes.io/router.tls: "true" - traefik.ingress.kubernetes.io/router.tls.domains.n.main: "uyuni-dev.world-co.com" + traefik.ingress.kubernetes.io/router.tls.domains.n.main: "{{ .Values.fqdn }}" traefik.ingress.kubernetes.io/router.entrypoints: "websecure,web" +{{- end }} +{{- if .Values.ingressSslAnnotations }} +{{ toYaml .Values.ingressSslAnnotations | indent 4 }} +{{- end }} + labels: + app: uyuni spec: tls: - hosts: - - uyuni-dev.world-co.com + - {{ .Values.fqdn }} secretName: uyuni-cert rules: - - host: uyuni-dev.world-co.com + - host: {{ .Values.fqdn }} http: paths: - backend: @@ -23,18 +31,22 @@ spec: number: 80 path: / pathType: Prefix +{{- if eq .Values.ingress "traefik" }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: creationTimestamp: null name: uyuni-ingress-ssl-redirect + namespace: "{{ .Release.Namespace }}" annotations: traefik.ingress.kubernetes.io/router.middlewares: "default-uyuni-https-redirect@kubernetescrd" traefik.ingress.kubernetes.io/router.entrypoints: "web" + labels: + app: uyuni spec: rules: - - host: uyuni-dev.world-co.com + - host: {{ .Values.fqdn }} http: paths: - backend: @@ -44,18 +56,26 @@ spec: number: 80 path: / pathType: Prefix +{{- end }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: creationTimestamp: null name: uyuni-ingress-nossl + namespace: "{{ .Release.Namespace }}" annotations: +{{- if eq .Values.ingress "nginx" }} + nginx.ingress.kubernetes.io/ssl-redirect: "false" +{{- else if eq .Values.ingress "traefik" }} traefik.ingress.kubernetes.io/router.tls: "false" traefik.ingress.kubernetes.io/router.entrypoints: "web" +{{- end }} + labels: + app: uyuni spec: rules: - - host: uyuni-dev.world-co.com + - host: {{ .Values.fqdn }} http: paths: - backend: diff --git a/containers/doc/server-kubernetes/k3s-ingress-routes.yaml b/containers/server-helm/templates/k3s-ingress-routes.yaml similarity index 81% rename from containers/doc/server-kubernetes/k3s-ingress-routes.yaml rename to containers/server-helm/templates/k3s-ingress-routes.yaml index 1c9fa2a693ea..70ebaba9b3bd 100644 --- a/containers/doc/server-kubernetes/k3s-ingress-routes.yaml +++ b/containers/server-helm/templates/k3s-ingress-routes.yaml @@ -1,7 +1,9 @@ +{{- if eq .Values.ingress "traefik" }} apiVersion: traefik.containo.us/v1alpha1 kind: Middleware metadata: name: uyuni-https-redirect + namespace: "{{ .Release.Namespace }}" spec: redirectScheme: scheme: https @@ -11,6 +13,7 @@ apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: postgresql-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - postgres @@ -24,6 +27,7 @@ apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: salt-publish-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - salt-publish @@ -37,6 +41,7 @@ apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: salt-request-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - salt-request @@ -50,6 +55,7 @@ apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: cobbler-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - cobbler @@ -58,11 +64,13 @@ spec: services: - name: uyuni-tcp port: 25151 +{{- if .Values.exposeJavaDebug }} --- apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: tomcat-debug-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - tomcat-debug @@ -76,6 +84,7 @@ apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: tasko-debug-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - tasko-debug @@ -84,11 +93,13 @@ spec: services: - name: uyuni-tcp port: 8001 +{{- end }} --- apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteUDP metadata: name: tftp-router + namespace: "{{ .Release.Namespace }}" spec: entryPoints: - tftp @@ -96,3 +107,4 @@ spec: - services: - name: uyuni-udp port: 69 +{{- end }} diff --git a/containers/doc/server-kubernetes/service.yaml b/containers/server-helm/templates/service.yaml similarity index 70% rename from containers/doc/server-kubernetes/service.yaml rename to containers/server-helm/templates/service.yaml index f17adc1610c4..f74d4ba9e136 100644 --- a/containers/doc/server-kubernetes/service.yaml +++ b/containers/server-helm/templates/service.yaml @@ -1,10 +1,14 @@ apiVersion: v1 kind: Service metadata: - creationTimestamp: null labels: app: uyuni name: uyuni-tcp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} spec: ports: - name: http @@ -27,6 +31,7 @@ spec: port: 5432 protocol: TCP targetPort: 5432 +{{- if .Values.exposeJavaDebug | default false }} - name: tomcat-debug port: 8000 protocol: TCP @@ -35,19 +40,22 @@ spec: port: 8001 protocol: TCP targetPort: 8001 +{{- end }} selector: app: uyuni type: ClusterIP -status: - loadBalancer: {} --- apiVersion: v1 kind: Service metadata: - creationTimestamp: null labels: app: uyuni name: uyuni-udp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} spec: ports: - name: tftp @@ -57,5 +65,3 @@ spec: selector: app: uyuni type: ClusterIP -status: - loadBalancer: {} diff --git a/containers/server-helm/templates/volumes.yaml b/containers/server-helm/templates/volumes.yaml new file mode 100644 index 000000000000..d750805f1501 --- /dev/null +++ b/containers/server-helm/templates/volumes.yaml @@ -0,0 +1,527 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-pgsql + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-pgsql +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cache + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cache +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-log + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 2Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-log +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-pub + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-pub +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-osimages + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-osimages +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-tftpboot + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-tftpboot +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-formulametadata + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-formulametadata +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-pillar + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-pillar +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-susemanager + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-susemanager +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: root + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: root +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-apache2 + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-apache2 +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-rhn + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-rhn +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-systemd + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-systemd +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tomcat + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tomcat +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-sysconfig + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-sysconfig +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tls + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tls +{{- end }} diff --git a/containers/server-helm/tito.props b/containers/server-helm/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-helm/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml new file mode 100644 index 000000000000..7c4bf2a69573 --- /dev/null +++ b/containers/server-helm/values.yaml @@ -0,0 +1,108 @@ +# The default repository and image version if not defined otherwise +repository: registry.opensuse.org/uyuni +version: latest + +## Allows to override the default URI for an image if defined +## Requires a full URI in a form of /: +## +images: + # server: // + + +## Ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +pullPolicy: "IfNotPresent" + +## uyuni server overall Persistent Volume access modes +## Must match those of existing PV or dynamic provisioner +## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +accessModes: + - ReadWriteOnce + +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +# storageClass: "-" + +## matchPvByLabel adds selectors on each claim to select a PV with a 'data' label matching the PVC name. +## This can be helpful for static PV management. +matchPvByLabel: false + +## mirror defines a volume or host path to mount in the container as server.susemanager.fromdir value. +## Use either claimName or hostPath to reference the volume source. +## +## When using claimName, both claims and PVs need to be defined before running the chart +## Note that hostPath will not work on multi-node cluster +## +## If the value is set before the first run of the server, the rhn.conf file will be adjusted during the setup. +#mirror: +# claimName: mirror +# hostPath: /srv/mirror + +# TODO Parametrize big volumes sizes + +## servicesAnnotations are annotations to set on both TCP and UDP services. +## This can be useful to share the same IP when using metallb +# servicesAnnotations: + +## exposeJavaDebug will expose the 8000 and 8001 ports to connect a Java debugger +## to tomcat and taskomatic respectively +# exposeJavaDebug: true + +## ingress defines the ingress that is used in the cluster. +## It can be either "nginx", "traefik" or any other value. +ingress: "traefik" + +## ingressSsl are annotations to pass the SSL ingress. +## This can be used to set a cert-manager issuer like: +## ingressSslAnnotations: +## cert-manager.io/cluster-issuer: uyuniIssuer +# ingressSslAnnotations: + +## uyuniUser is the login of the user accessing the database +uyuniUser: "spacewalk" +## uyuniPass is the password of the user accessing the database +uyuniPass: "spacewalk" + +## uyuniEnableTftp toggles TFTP service +uyuniEnableTftp: true + +## reportDbHost is the report database FQDN. +## Only set when using an external report database +# reportDbHost: "uyuni.local" + +## reportDbPort is the report database port to connect to. +## Only set when using an external report database +# reportDbPort: "5432" + +## reportDbUser is the username to use to connect to the report database. +## Only set when using an external report database +# reportDbUser: "" + +## reportDbPass is the password to use to connect to the report database. +## Only set when using an external report database +# reportDbPass: " + +## reportDbName is the report database name. +## Only set when using an external report database +# reportDbName: "" + +# TODO Add reportDbCaCert value and handle it + +## uyuniAdminEmail is the email where all Uyuni notifications are sent +uyuniAdminEmail: "galaxy-noise@suse.de" +## uyuniMailFrom is the address in the from field of the emails sent by Uyuni +uyuniMailFrom: "notifications@uyuni.local" + +## fqdn is the user accessible fully qualified domain name of the uyuni server +fqdn: "uyuni.local" + +## sccUser is the SUSE Customer Center login +# sccUser: "" + +## sccPass is the SUSE Customer Center password +# sccPass: "" diff --git a/containers/server-image/uyuni-setup.service b/containers/server-image/uyuni-setup.service index f47ff30eb362..db98e02db7cc 100644 --- a/containers/server-image/uyuni-setup.service +++ b/containers/server-image/uyuni-setup.service @@ -9,7 +9,7 @@ PassEnvironment=MANAGER_USER MANAGER_PASS MANAGER_ADMIN_EMAIL PassEnvironment=CERT_CNAMES CERT_O CERT_OU CERT_CITY CERT_STATE CERT_COUNTRY CERT_EMAIL CERT_PASS PassEnvironment=LOCAL_DB MANAGER_DB_NAME MANAGER_DB_HOST MANAGER_DB_PORT MANAGER_DB_CA_CERT MANAGER_DB_PROTOCOL PassEnvironment=MANAGER_ENABLE_TFTP EXTERNALDB_ADMIN_USER EXTERNALDB_ADMIN_PASS EXTERNALDB_PROVIDER -PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN +PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN MIRROR_PATH PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT_USER REPORT_DB_PASS REPORT_DB_CA_CERT ExecStart=/usr/lib/susemanager/bin/mgr-setup -l /var/log/susemanager_setup.log -s -n ExecStartPost=systemctl disable --now uyuni-setup.service diff --git a/java/manager-build.xml b/java/manager-build.xml index a7d9122d7419..c9eedcc229a6 100644 --- a/java/manager-build.xml +++ b/java/manager-build.xml @@ -22,7 +22,6 @@ - @@ -40,8 +39,6 @@ - - @@ -302,9 +299,16 @@ - - + + + + + + + + + @@ -407,19 +411,19 @@ - + - + - + diff --git a/rel-eng/packages/server-helm b/rel-eng/packages/server-helm new file mode 100644 index 000000000000..7ffc33013560 --- /dev/null +++ b/rel-eng/packages/server-helm @@ -0,0 +1 @@ +4.4.0 containers/server-helm/ diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 1a9dcc0a3f6d..35468b9fefa5 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -919,6 +919,11 @@ do_setup() { setup_spacewalk + # In the container case, we have the MIRROR_PATH environment variable at setup + if [ -n "$MIRROR_PATH" ]; then + echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf + fi + if [ -n "$ISS_PARENT" ]; then local certname=`echo "MASTER-$ISS_PARENT-TRUSTED-SSL-CERT" | sed 's/\./_/g'` curl -s -S -o /usr/share/rhn/$certname "http://$ISS_PARENT/pub/RHN-ORG-TRUSTED-SSL-CERT" From a9dc0287c5c145b7566bd192735ec1247fc38088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Sat, 11 Mar 2023 10:15:21 +0100 Subject: [PATCH 17/80] Document server-image known issues --- containers/server-image/README.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 containers/server-image/README.md diff --git a/containers/server-image/README.md b/containers/server-image/README.md new file mode 100644 index 000000000000..5739e65d5f95 --- /dev/null +++ b/containers/server-image/README.md @@ -0,0 +1,5 @@ +# Known issues + +* Apache fails to start to start in the container is apparmor is enabled on the host. +* Avahi names are not resolved inside the container + From 3e2a2cf854bcf8a0dcf823bcc40045a7c97813cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 14 Mar 2023 14:29:46 +0100 Subject: [PATCH 18/80] k8s: document SSL certificates creation Document two different options to setup SSL for Uyuni server on k8s: using `rhn-ssl-tool` and manual certificates configuration or using `cert-manager` with automatically generated CA and certificates. --- containers/doc/server-kubernetes/README.md | 103 ++++++++++++++---- .../doc/server-kubernetes/rhn-ssl-tool.yaml | 54 +++++++++ 2 files changed, 137 insertions(+), 20 deletions(-) create mode 100644 containers/doc/server-kubernetes/rhn-ssl-tool.yaml diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md index 3cf21e8edec1..33fb428efb2e 100644 --- a/containers/doc/server-kubernetes/README.md +++ b/containers/doc/server-kubernetes/README.md @@ -3,11 +3,7 @@ ## Prerequisites The following assumes you have a single-node rke2 or k3s cluster ready with enough resources for the Uyuni server. -It also assumes that `kubectl` is installed on your machine and configured to connect to the cluster. - -** HACK ** For now I used the SSL certificates and CA generated in one of my installation attempts. -I will assume you already have SSL certificates matching the FQDN of the cluster node. -Instructions or tools on how to generate those will come later. +It also assumes that `kubectl` and `helm` are installed on your machine and configured to connect to the cluster. ## Setting up the resources @@ -96,20 +92,80 @@ Proceed with the next steps. ***Hostname***: this procedure doesn't handle any hostname change. Certificates migration also needs to be documented, but that can be guessed for now with the instructions to setup a server from scratch. -### Deploy the pod and its resources -Create the TLS secret holding the server SSL certificates: +### CA certificates using `rhn-ssl-tool` + +On the cluster node, prepare the volume with the CA password in the `/var/uyuni/ssl-build/password` file: ``` -kubectl create secret tls uyuni-cert --key /server.key --cert /server.crt +mkdir -p /var/uyuni/ssl-build +chmod 700 /var/uyuni +vim /var/uyuni/ssl-build/password +chmod 500 /var/uyuni/ssl-build/password ``` -Create a `ConfigMap` with the CA certificate: +Edit the `rhn-ssl-tool.yaml` file to match your FQDN and subject. +Generate the CA certificate and server certificate and key using `rhn-ssl-tool` by running: ``` -kubectl create configmap uyuni-ca --from-file=ca.crt=/RHN-ORG-TRUSTED-SSL-CERT +kubectl apply -f rhn-ssl-tool.yaml ``` +**Note** that it pulls the big server container image and thus takes quite some time to complete. +Wait for the generated pod to be in `COMPLETED` state before continuing. + +Create the TLS secret holding the server SSL certificates by running this on the cluster node: + +``` +kubectl create secret tls uyuni-cert --key /var/uyuni/ssl-build//server.key --cert /var/uyuni/ssl-build//server.crt +``` + +Create a `ConfigMap` with the CA certificate by running this on the cluster node: + +``` +kubectl create configmap uyuni-ca --from-file=ca.crt=/var/uyuni/ssl-build/RHN-ORG-TRUSTED-SSL-CERT +``` + +### CA certificates using Cert-Manager + +Install cert-manager on the cluster. +The [default static install](https://cert-manager.io/docs/installation/#default-static-install) is enoughfor the testing use case: + +``` +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +``` + +`cert-manager` now needs to be configured to issue certificates. +The following instructions will document setting up a self signed CA and the corresponding issuers. +Check the [documentation](https://cert-manager.io/docs/configuration/acme/) on how to set up other issuers like Let's Encrypt. + +Edit the `cert-manager-selfsigned-issuer.yaml` file to match the server FQDN and subject and then apply it: + +``` +kubectl apply -f cert-manager-selfsigned-issuer.yaml +``` + +For security reason, copy the CA certificate into a separate config map, to not mount the CA secret on the pod: + +``` +kubectl get secret uyuni-ca -o=jsonpath='{.data.ca\.crt}' | base64 -d >ca.crt +kubectl create configmap uyuni-ca --from-file=ca.crt +rm ca.crt +``` + +Run the following command to append the ingress annotation to use the new CA when applying the helm chart later: + +``` +cat >values.yaml << EOF +ingressSslAnnotations: + cert-manager.io/issuer: uyuni-ca-issuer +EOF +``` + + +### Deploy the pod and its resources + + Change the hostname associated to the persistent volumes to match the hostname of your node: ``` @@ -123,21 +179,28 @@ The volumes are folders on the cluster node and need to be manually created: mkdir -p `kubectl get pv -o jsonpath='{.items[*].spec.local.path}'` ``` -Install the helm chart from the source's `containers` folder: -Replace the `uyuni-dev.world-co.com` by your FQDN. +Run the following to add the helm chart configuration values but replace the `uyuni-dev.world-co.com` by your server's FQDN: + ``` -helm install uyuni server-helm \ - --set repository=registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni \ - --set storageClass=local-storage \ - --set exposeJavaDebug=true \ - --set uyuniMailFrom=notifications@uyuni-dev.world-co.com \ - --set fqdn=uyuni-dev.world-co.com +CAT >>values.yaml << EOF +repository: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni +storageClass: local-storage +exposeJavaDebug: true +uyuniMailFrom: notifications@uyuni-dev.world-co.com +fqdn: uyuni-dev.world-co.com +EOF ``` -If deploying on `rke2`, add the `--set ingres=nginx` parameter to the `helm install` command. +If deploying on `rke2`, add the `ingress: nginx` line to the `values.yaml` file. You can also set more variables like `sccUser` or `sccPass`. -Check the `server-helm/values.yaml` file for the complete list. +Check the [server-helm/values.yaml](https://github.com/uyuni-project/uyuni/blob/server-container/containers/server-helm/values.yaml) file for the complete list. + +Install the helm chart from the source's `containers` folder: + +``` +helm install uyuni server-helm -f values +``` Note that the Helm chart installs a deployment with one replica. The pod name is automatically generated by kubernetes and changes at every start. diff --git a/containers/doc/server-kubernetes/rhn-ssl-tool.yaml b/containers/doc/server-kubernetes/rhn-ssl-tool.yaml new file mode 100644 index 000000000000..2e0aad7049b8 --- /dev/null +++ b/containers/doc/server-kubernetes/rhn-ssl-tool.yaml @@ -0,0 +1,54 @@ +apiVersion: batch/v1 +kind: Job +metadata: + labels: + run: rhn-ssl-tool + name: rhn-ssl-tool +spec: + backoffLimit: 0 + template: + spec: + restartPolicy: Never + initContainers: + - name: gen-ca + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server + command: + - rhn-ssl-tool + - --gen-ca + - --no-rpm + - --set-common-name=uyuni-dev.world-co.com + - --set-country=FR + - --set-state=Burgundy + - --set-city=Macon + - --set-org=SUSE + - --set-org-unit=BCL + - --set-email=sylvestre@world-co.com + - --password-file=/ssl-build/password + volumeMounts: + - name: ssl-build + mountPath: /ssl-build + containers: + - name: gen-server + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server + command: + - rhn-ssl-tool + - --gen-server + - --no-rpm + - --set-cname=uyuni-dev.world-co.com + - --set-country=FR + - --set-state=Burgundy + - --set-city=Macon + - --set-org=SUSE + - --set-org-unit=BCL + - --set-email=sylvestre@world-co.com + - --set-hostname=uyuni-dev + - --set-cname=uyuni.world-co.com + - --password-file=/ssl-build/password + volumeMounts: + - name: ssl-build + mountPath: /ssl-build + volumes: + - name: ssl-build + hostPath: + path: /var/uyuni/ssl-build + type: Directory From b6ce6cf363d16a27116168e13f529002a3cfb0ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 21 Mar 2023 17:36:46 +0100 Subject: [PATCH 19/80] testsuite: use uyunictl exec to run commands in the container --- testsuite/features/support/lavanda.rb | 15 +++++++++++---- testsuite/features/support/twopence_init.rb | 5 +++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 79e8bbe9c3d3..84a7940517ea 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -77,6 +77,12 @@ def init_os_version(os_version) @in_os_version = os_version end + ## + # Initializes the @in_has_uyunictl variable to true. + def init_has_uyunictl + @in_has_uyunictl = true + end + # getter functions, executed on testsuite def hostname raise 'empty hostname, something wrong' if @in_hostname.empty? @@ -145,15 +151,16 @@ def os_version # buffer_size: The maximum buffer size in bytes. Defaults to 65536. # verbose: Whether to log the output of the command in case of success. Defaults to false. def run(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) + cmd_prefix = @in_has_uyunictl ? "uyunictl exec -i " : "" if separated_results - out, err, _lo, _rem, code = test_and_store_results_separately(cmd, user, timeout, buffer_size) + out, err, _lo, _rem, code = test_and_store_results_separately(cmd_prefix + cmd, user, timeout, buffer_size) else - out, _lo, _rem, code = test_and_store_results_together(cmd, user, timeout, buffer_size) + out, _lo, _rem, code = test_and_store_results_together(cmd_prefix + cmd, user, timeout, buffer_size) end if check_errors - raise "FAIL: #{cmd} returned status code = #{code}.\nOutput:\n#{out}" unless successcodes.include?(code) + raise "FAIL: #{cmd_prefix}#{cmd} returned status code = #{code}.\nOutput:\n#{out}" unless successcodes.include?(code) end - STDOUT.puts "#{cmd} returned status code = #{code}.\nOutput:\n#{out}" if verbose + STDOUT.puts "#{cmd_prefix}#{cmd} returned status code = #{code}.\nOutput:\n#{out}" if verbose if separated_results [out, err, code] else diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 611eb0b6c9d0..34ccd1be4e38 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -264,6 +264,11 @@ def file_inject(node, local_file, remote_file) code end +_out, code = $server.run('which uyunictl', check_errors: false) +if code.zero? + $server.init_has_uyunictl +end + # Other global variables $product = product $product_version = product_version From dc564045c34e9aac0d17503a6b9330cac634b831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 23 Mar 2023 16:56:59 +0100 Subject: [PATCH 20/80] Implement file_inject for containerized server --- testsuite/features/support/lavanda.rb | 44 ++++++++++++++++++--- testsuite/features/support/twopence_init.rb | 2 +- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 84a7940517ea..b94e15b62af7 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -151,16 +151,35 @@ def os_version # buffer_size: The maximum buffer size in bytes. Defaults to 65536. # verbose: Whether to log the output of the command in case of success. Defaults to false. def run(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) - cmd_prefix = @in_has_uyunictl ? "uyunictl exec -i " : "" + cmd_prefixed = cmd + if @in_has_uyunictl + cmd_prefixed = "uyunictl exec -i '#{cmd.gsub(/'/, '\'"\'"\'')}'" + end + run_local(cmd_prefixed, separated_results: separated_results, check_errors: check_errors, timeout: timeout, user: user, successcodes: successcodes, buffer_size: buffer_size, verbose: verbose) + end + + ## + # It runs a command, and returns the output, error, and exit code. + # + # Args: + # cmd: The command to run. + # separated_results: Whether the results should be stored separately. Defaults to false. + # check_errors: Whether to check for errors or not. Defaults to true. + # timeout: The timeout to be used, in seconds. Defaults to 250 or the value of the DEFAULT_TIMEOUT environment variable. + # user: The user to be used to run the command. Defaults to root. + # successcodes: An array with the values to be accepted as success codes from the command run. + # buffer_size: The maximum buffer size in bytes. Defaults to 65536. + # verbose: Whether to log the output of the command in case of success. Defaults to false. + def run_local(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) if separated_results - out, err, _lo, _rem, code = test_and_store_results_separately(cmd_prefix + cmd, user, timeout, buffer_size) + out, err, _lo, _rem, code = test_and_store_results_separately(cmd, user, timeout, buffer_size) else - out, _lo, _rem, code = test_and_store_results_together(cmd_prefix + cmd, user, timeout, buffer_size) + out, _lo, _rem, code = test_and_store_results_together(cmd, user, timeout, buffer_size) end if check_errors - raise "FAIL: #{cmd_prefix}#{cmd} returned status code = #{code}.\nOutput:\n#{out}" unless successcodes.include?(code) + raise "FAIL: #{cmd} returned status code = #{code}.\nOutput:\n#{out}" unless successcodes.include?(code) end - STDOUT.puts "#{cmd_prefix}#{cmd} returned status code = #{code}.\nOutput:\n#{out}" if verbose + STDOUT.puts "#{cmd} returned status code = #{code}.\nOutput:\n#{out}" if verbose if separated_results [out, err, code] else @@ -209,4 +228,19 @@ def wait_while_process_running(process) result end end + + def inject(local_file, remote_file, user = "root", dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local("mktemp -d") + tmp_file = File.join(tmp_folder.strip, File.basename(local_file)) + code, _remote = inject_file(local_file, tmp_file, user, dots) + if code.zero? + _out, code = run_local("uyunictl cp --user #{user} #{tmp_file} server:#{remote_file}") + end + run_local("rm -r #{tmp_folder}") + else + code, _remote = inject_file(local_file, remote_file, user, dots) + end + code + end end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 34ccd1be4e38..c3551bb0de7e 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -260,7 +260,7 @@ def file_extract(node, remote_file, local_file) # This function injects a file into a node def file_inject(node, local_file, remote_file) - code, _remote = node.inject_file(local_file, remote_file, 'root', false) + code = node.inject(local_file, remote_file, 'root', false) code end From a6748acc99994f830825e0aa6e92dfa9f1467c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 30 Mar 2023 10:58:49 +0200 Subject: [PATCH 21/80] testsuite: raise an exception again if the system doesn't exist --- testsuite/features/step_definitions/navigation_steps.rb | 3 +-- testsuite/features/support/twopence_init.rb | 9 ++------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/testsuite/features/step_definitions/navigation_steps.rb b/testsuite/features/step_definitions/navigation_steps.rb index ed095647bc65..681fd6d790cb 100644 --- a/testsuite/features/step_definitions/navigation_steps.rb +++ b/testsuite/features/step_definitions/navigation_steps.rb @@ -452,9 +452,8 @@ system_name = get_system_name(host) rescue raise "Host #{host} not found" if if_present.empty? - log "Host #{host} is not deployed, not trying to select it" - return + next end step %(I select "#{system_name}" from "#{field}") end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index c3551bb0de7e..1adced9e8ba0 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -199,13 +199,8 @@ def get_system_name(host) when 'containerized_proxy' system_name = $proxy.full_hostname.sub('pxy', 'pod-pxy') else - begin - node = get_target(host) - system_name = node.full_hostname - rescue RuntimeError - # If the node for that host is not defined, just return the host parameter as system_name - system_name = host - end + node = get_target(host) + system_name = node.full_hostname end system_name end From eccc05430e8f90a89b0577982f7e7d7143cb135a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 31 Mar 2023 17:15:50 +0200 Subject: [PATCH 22/80] testsuite: SSL proxy certificate generation for k3s --- .../step_definitions/command_steps.rb | 88 +++++++++++++++---- 1 file changed, 72 insertions(+), 16 deletions(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 2704bb975bbd..c03756d036f1 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -1002,35 +1002,91 @@ end When(/^I copy server\'s keys to the proxy$/) do - %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| - return_code = file_extract($server, '/root/ssl-build/' + file, '/tmp/' + file) - raise 'File extraction failed' unless return_code.zero? - $proxy.run('mkdir -p /root/ssl-build') - return_code = file_inject($proxy, '/tmp/' + file, '/root/ssl-build/' + file) - raise 'File injection failed' unless return_code.zero? + _out, code = $server.run_local("systemctl is-active k3s") + if code.zero? + # Server running in Kubernetes doesn't know anything about SSL CA + certificate = "apiVersion: cert-manager.io/v1\\n"\ + "kind: Certificate\\n"\ + "metadata:\\n"\ + " name: uyuni-proxy\\n"\ + "spec:\\n"\ + " secretName: uyuni-proxy-cert\\n"\ + " subject:\\n"\ + " countries: ['DE']\\n"\ + " provinces: ['Bayern']\\n"\ + " localities: ['Nuernberg']\\n"\ + " organizations: ['SUSE']\\n"\ + " organizationalUnits: ['SUSE']\\n"\ + " emailAddresses:\\n"\ + " - galaxy-noise@suse.de\\n"\ + " commonName: #{$proxy.full_hostname}\\n"\ + " dnsNames:\\n"\ + " - #{$proxy.full_hostname}\\n"\ + " issuerRef:\\n"\ + " name: uyuni-ca-issuer\\n"\ + " kind: Issuer" + _out, return_code = $server.run_local("echo -e \"#{certificate}\" | kubectl apply -f -") + raise 'Failed to define proxy Certificate resource' unless return_code.zero? + # cert-manager takes some time to generate the secret, wait for it before continuing + repeat_until_timeout(timeout: 600, message: "Kubernetes uyuni-proxy-cert secret has not been defined") do + _result, code = $server.run_local("kubectl get secret uyuni-proxy-cert", check_errors: false) + break if code.zero? + sleep 1 + end + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.crt}' | base64 -d >/tmp/proxy.crt") + raise 'Failed to store proxy certificate' unless return_code.zero? + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.key}' | base64 -d >/tmp/proxy.key") + raise 'Failed to store proxy key' unless return_code.zero? + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >/tmp/ca.crt") + raise 'Failed to store CA certificate' unless return_code.zero? + + %w[proxy.crt proxy.key ca.crt].each do |file| + return_code, = $server.extract_file("/tmp/#{file}", "/tmp/#{file}") + raise 'File extraction failed' unless return_code.zero? + return_code = file_inject($proxy, "/tmp/#{file}", "/tmp/#{file}") + raise 'File injection failed' unless return_code.zero? + end + else + %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| + return_code = file_extract($server, '/root/ssl-build/' + file, '/tmp/' + file) + raise 'File extraction failed' unless return_code.zero? + $proxy.run('mkdir -p /root/ssl-build') + return_code = file_inject($proxy, '/tmp/' + file, '/root/ssl-build/' + file) + raise 'File injection failed' unless return_code.zero? + end end end When(/^I configure the proxy$/) do + _out, code = $server.run_local("systemctl is-active k3s") + # prepare the settings file settings = "RHN_PARENT=#{$server.full_hostname}\n" \ "HTTP_PROXY=''\n" \ "VERSION=''\n" \ "TRACEBACK_EMAIL=galaxy-noise@suse.de\n" \ - "USE_EXISTING_CERTS=n\n" \ "INSTALL_MONITORING=n\n" \ - "SSL_PASSWORD=spacewalk\n" \ - "SSL_ORG=SUSE\n" \ - "SSL_ORGUNIT=SUSE\n" \ - "SSL_COMMON=#{$proxy.full_hostname}\n" \ - "SSL_CITY=Nuremberg\n" \ - "SSL_STATE=Bayern\n" \ - "SSL_COUNTRY=DE\n" \ - "SSL_EMAIL=galaxy-noise@suse.de\n" \ - "SSL_CNAME_ASK=proxy.example.org\n" \ "POPULATE_CONFIG_CHANNEL=y\n" \ "RHN_USER=admin\n" \ "ACTIVATE_SLP=y\n" + if code.zero? + settings += "USE_EXISTING_CERTS=y\n" \ + "CA_CERT=/tmp/ca.crt\n" \ + "SERVER_KEY=/tmp/proxy.key\n" \ + "SERVER_CERT=/tmp/proxy.crt\n" + else + settings += "USE_EXISTING_CERTS=n\n" \ + "INSTALL_MONITORING=n\n" \ + "SSL_PASSWORD=spacewalk\n" \ + "SSL_ORG=SUSE\n" \ + "SSL_ORGUNIT=SUSE\n" \ + "SSL_COMMON=#{$proxy.full_hostname}\n" \ + "SSL_CITY=Nuremberg\n" \ + "SSL_STATE=Bayern\n" \ + "SSL_COUNTRY=DE\n" \ + "SSL_EMAIL=galaxy-noise@suse.de\n" \ + "SSL_CNAME_ASK=proxy.example.org\n" + end path = generate_temp_file('config-answers.txt', settings) step 'I copy "' + path + '" to "proxy"' `rm #{path}` From 209cc8c43663a46f2c76ee26cfa897f752695b3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 31 Mar 2023 17:16:22 +0200 Subject: [PATCH 23/80] testsuite: add file_extract support for containers --- testsuite/features/support/lavanda.rb | 18 ++++++++++++++++++ testsuite/features/support/twopence_init.rb | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index b94e15b62af7..9183eae7459e 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -236,6 +236,7 @@ def inject(local_file, remote_file, user = "root", dots = true) code, _remote = inject_file(local_file, tmp_file, user, dots) if code.zero? _out, code = run_local("uyunictl cp --user #{user} #{tmp_file} server:#{remote_file}") + raise "Failed to copy #{tmp_file} to container" unless code.zero? end run_local("rm -r #{tmp_folder}") else @@ -243,4 +244,21 @@ def inject(local_file, remote_file, user = "root", dots = true) end code end + + def extract(remote_file, local_file, user = "root", dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local("mktemp -d") + tmp_file = File.join(tmp_folder.strip, File.basename(remote_file)) + if code.zero? + _out, code = run_local("uyunictl cp --user #{user} server:#{remote_file} #{tmp_file}") + raise "Failed to extract #{remote_file} from container" unless code.zero? + code, _remote = extract_file(tmp_file, local_file, user, dots) + raise "Failed to extract #{tmp_file} from host" unless code.zero? + end + run_local("rm -r #{tmp_folder}") + else + code, _local = extract_file(remote_file, local_file, user, dots) + end + code + end end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 1adced9e8ba0..86929b2787c1 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -249,7 +249,7 @@ def folder_delete(node, folder) # This function extracts a file from a node def file_extract(node, remote_file, local_file) - code, _remote = node.extract_file(remote_file, local_file, 'root', false) + code, _remote = node.extract(remote_file, local_file, 'root', false) code end From 26caaf13c4ba6e2f75e83eee88ec300bb34cdcad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 5 Apr 2023 17:20:45 +0200 Subject: [PATCH 24/80] testsuite: use java.hostname to get the server fqdn Since kubernetes uses the pod name as hostname the hostname ahdn hostname -f returns non-sense on such servers. Use the java.hostname from the rhn.conf as it has to be set to the publicly visible FQDN anyway. --- testsuite/features/support/twopence_init.rb | 46 ++++++++++++--------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 86929b2787c1..f0c6c5f63fd9 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -139,25 +139,38 @@ def twopence_init(target) node.extend(LavandaBasic) end +_out, code = $server.run('which uyunictl', check_errors: false) +if code.zero? + $server.init_has_uyunictl +end + # Initialize hostname $nodes.each do |node| next if node.nil? - hostname, local, remote, code = node.test_and_store_results_together('hostname', 'root', 500) - # special handling for nested VMs since they will only be crated later in the test suite - # we to a late hostname initialization in a special step for those - next if hostname.empty? || node == $salt_migration_minion - - raise "Cannot connect to get hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? - raise "No hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}" if hostname.empty? - node.init_hostname(hostname) - - fqdn, local, remote, code = node.test_and_store_results_together('hostname -f', 'root', 500) - raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? - raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? - node.init_full_hostname(fqdn) + if node == $server + fqdn, code = node.run('sed -n \'s/^java.hostname *= *\(.\+\)$/\1/p\' /etc/rhn/rhn.conf') + raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? + raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? + node.init_full_hostname(fqdn) + node.init_hostname(fqdn.split(".")[0]) + else + hostname, local, remote, code = node.test_and_store_results_together('hostname', 'root', 500) + # special handling for nested VMs since they will only be crated later in the test suite + # we to a late hostname initialization in a special step for those + next if hostname.empty? || node == $salt_migration_minion + + raise "Cannot connect to get hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? + raise "No hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}" if hostname.empty? + node.init_hostname(hostname) + + fqdn, local, remote, code = node.test_and_store_results_together('hostname -f', 'root', 500) + raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? + raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? + node.init_full_hostname(fqdn) + end - STDOUT.puts "Host '#{$named_nodes[node.hash]}' is alive with determined hostname #{hostname.strip} and FQDN #{fqdn.strip}" unless $build_validation + STDOUT.puts "Host '#{$named_nodes[node.hash]}' is alive with determined hostname #{node.hostname} and FQDN #{node.full_hostname}" unless $build_validation os_version, os_family = get_os_version(node) node.init_os_family(os_family) node.init_os_version(os_version) @@ -259,11 +272,6 @@ def file_inject(node, local_file, remote_file) code end -_out, code = $server.run('which uyunictl', check_errors: false) -if code.zero? - $server.init_has_uyunictl -end - # Other global variables $product = product $product_version = product_version From ceedc3fce069a82303f28e349ab0a86693ee5384 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 5 Apr 2023 17:38:22 +0200 Subject: [PATCH 25/80] testsuite: use run_local to get the ip address We don't want to get the internal IP address of the server container, but the public one of the host, so use run_local. --- testsuite/features/support/twopence_init.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index f0c6c5f63fd9..15eefc14f427 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -372,7 +372,7 @@ def client_public_ip(host) raise "Cannot resolve node for host '#{host}'" if node.nil? %w[br0 eth0 eth1 ens0 ens1 ens2 ens3 ens4 ens5 ens6].each do |dev| - output, code = node.run("ip address show dev #{dev} | grep 'inet '", check_errors: false) + output, code = node.run_local("ip address show dev #{dev} | grep 'inet '", check_errors: false) next unless code.zero? node.init_public_interface(dev) From 3afb6d4a7299c1e057742803f15f7a867304d05c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 5 Apr 2023 21:13:17 +0200 Subject: [PATCH 26/80] testsuite: ignore errors when checking for k3s --- testsuite/features/step_definitions/command_steps.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index c03756d036f1..4574ccd53530 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -1002,7 +1002,7 @@ end When(/^I copy server\'s keys to the proxy$/) do - _out, code = $server.run_local("systemctl is-active k3s") + _out, code = $server.run_local("systemctl is-active k3s", check_errors: false) if code.zero? # Server running in Kubernetes doesn't know anything about SSL CA certificate = "apiVersion: cert-manager.io/v1\\n"\ @@ -1058,7 +1058,7 @@ end When(/^I configure the proxy$/) do - _out, code = $server.run_local("systemctl is-active k3s") + _out, code = $server.run_local("systemctl is-active k3s", check_errors: false) # prepare the settings file settings = "RHN_PARENT=#{$server.full_hostname}\n" \ From 1da3f95ade5e3d0a8f476616235a67df701c1b56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 6 Apr 2023 09:34:50 +0200 Subject: [PATCH 27/80] lavanda: fix extract() function --- testsuite/features/support/lavanda.rb | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 9183eae7459e..77f2faf00af9 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -249,12 +249,10 @@ def extract(remote_file, local_file, user = "root", dots = true) if @in_has_uyunictl tmp_folder, _code = run_local("mktemp -d") tmp_file = File.join(tmp_folder.strip, File.basename(remote_file)) - if code.zero? - _out, code = run_local("uyunictl cp --user #{user} server:#{remote_file} #{tmp_file}") - raise "Failed to extract #{remote_file} from container" unless code.zero? - code, _remote = extract_file(tmp_file, local_file, user, dots) - raise "Failed to extract #{tmp_file} from host" unless code.zero? - end + _out, code = run_local("uyunictl cp --user #{user} server:#{remote_file} #{tmp_file}") + raise "Failed to extract #{remote_file} from container" unless code.zero? + code, _remote = extract_file(tmp_file, local_file, user, dots) + raise "Failed to extract #{tmp_file} from host" unless code.zero? run_local("rm -r #{tmp_folder}") else code, _local = extract_file(remote_file, local_file, user, dots) From 3ae110740827cc090d7bba059a4ec4ea9ef0e89d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 6 Apr 2023 11:00:13 +0200 Subject: [PATCH 28/80] sanity checks: don't check for server hostname The server hostname cannot be used in a kubernetes container as it is set to the pod name, the FQDN and hostname need to be used from `rhn.conf` --- testsuite/features/core/allcli_sanity.feature | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testsuite/features/core/allcli_sanity.feature b/testsuite/features/core/allcli_sanity.feature index 4949116b4fca..e28d0750c9f5 100644 --- a/testsuite/features/core/allcli_sanity.feature +++ b/testsuite/features/core/allcli_sanity.feature @@ -6,8 +6,7 @@ Feature: Sanity checks I want to be sure to use a sane environment Scenario: The server is healthy - Then "server" should have a FQDN - And reverse resolution should work for "server" + Then reverse resolution should work for "server" And the clock from "server" should be exact And service "apache2" is enabled on "server" And service "apache2" is active on "server" From 31225b0f92f85b76e7f246fe814b4fded1b936d7 Mon Sep 17 00:00:00 2001 From: Artem Shiliaev Date: Thu, 6 Apr 2023 12:06:10 +0200 Subject: [PATCH 29/80] server container: enable monitoring in the image Enable monitoring in the container image to avoid the need of a few more volumes. --- .../server-helm/templates/deployment.yaml | 5 ++ .../templates/k3s-ingress-routes.yaml | 44 ++++++++++++++++ containers/server-helm/templates/service.yaml | 14 +++++ containers/server-helm/values.yaml | 3 ++ containers/server-image/Dockerfile | 16 +++++- containers/server-image/java_agent.yaml | 6 +++ containers/server-image/postgres-exporter | 19 +++++++ .../postgres_exporter_queries.yaml | 52 +++++++++++++++++++ containers/server-image/taskomatic_jmx.conf | 2 + containers/server-image/tomcat_jmx.conf | 2 + .../uyuni-server.service | 3 ++ 11 files changed, 165 insertions(+), 1 deletion(-) create mode 100644 containers/server-image/java_agent.yaml create mode 100644 containers/server-image/postgres-exporter create mode 100644 containers/server-image/postgres_exporter_queries.yaml create mode 100644 containers/server-image/taskomatic_jmx.conf create mode 100644 containers/server-image/tomcat_jmx.conf diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml index bdf859b2c46d..4c3c7c349c4a 100644 --- a/containers/server-helm/templates/deployment.yaml +++ b/containers/server-helm/templates/deployment.yaml @@ -361,6 +361,11 @@ spec: - containerPort: 4505 - containerPort: 4506 - containerPort: 69 +{{- if .Values.enableMonitoring | default true }} + - containerPort: 9100 + - containerPort: 9187 + - containerPort: 9800 +{{- end }} protocol: UDP - containerPort: 25151 - containerPort: 5432 diff --git a/containers/server-helm/templates/k3s-ingress-routes.yaml b/containers/server-helm/templates/k3s-ingress-routes.yaml index 70ebaba9b3bd..dd5c355b2ba4 100644 --- a/containers/server-helm/templates/k3s-ingress-routes.yaml +++ b/containers/server-helm/templates/k3s-ingress-routes.yaml @@ -64,6 +64,50 @@ spec: services: - name: uyuni-tcp port: 25151 +{{- if .Values.enableMonitoring | default true }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: node-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - node-exporter + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9100 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgres-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - postgres-exporter + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9187 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: taskomatic-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - taskomatic + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9800 +{{- end }} {{- if .Values.exposeJavaDebug }} --- apiVersion: traefik.containo.us/v1alpha1 diff --git a/containers/server-helm/templates/service.yaml b/containers/server-helm/templates/service.yaml index f74d4ba9e136..a0fa7301d8b7 100644 --- a/containers/server-helm/templates/service.yaml +++ b/containers/server-helm/templates/service.yaml @@ -31,6 +31,20 @@ spec: port: 5432 protocol: TCP targetPort: 5432 +{{- if .Values.enableMonitoring | default true }} + - name: node-exporter + port: 9100 + protocol: TCP + targetPort: 9100 + - name: postgres-exporter + port: 9187 + protocol: TCP + targetPort: 9187 + - name: taskomatic + port: 9800 + protocol: TCP + targetPort: 9800 +{{- end }} {{- if .Values.exposeJavaDebug | default false }} - name: tomcat-debug port: 8000 diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml index 7c4bf2a69573..5a5e59f492c7 100644 --- a/containers/server-helm/values.yaml +++ b/containers/server-helm/values.yaml @@ -53,6 +53,9 @@ matchPvByLabel: false ## to tomcat and taskomatic respectively # exposeJavaDebug: true +## enableMonitoring will expose the 9100 9187 5556 5557 9500 9800 ports for prometheus to scrape +enableMonitoring: true + ## ingress defines the ingress that is used in the cluster. ## It can be either "nginx", "traefik" or any other value. ingress: "traefik" diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 14bef32943c6..d72c20bd012a 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -54,7 +54,21 @@ RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-li RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb -RUN systemctl enable uyuni-setup +RUN mkdir -p /etc/postgres_exporter \ + /etc/prometheus-jmx_exporter/tomcat \ + /usr/lib/systemd/system/tomcat.service.d \ + /etc/prometheus-jmx_exporter/taskomatic \ + /usr/lib/systemd/system/taskomatic.service.d + +COPY postgres_exporter_queries.yaml /etc/postgres_exporter/postgres_exporter_queries.yaml +COPY postgres-exporter /etc/sysconfig/prometheus-postgres_exporter +COPY java_agent.yaml /etc/prometheus-jmx_exporter/tomcat/java_agent.yml +COPY java_agent.yaml /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml +COPY tomcat_jmx.conf /usr/lib/systemd/system/tomcat.service.d/jmx.conf +COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf + +RUN systemctl enable prometheus-node_exporter; \ + systemctl enable uyuni-setup # LABELs ARG PRODUCT=Uyuni diff --git a/containers/server-image/java_agent.yaml b/containers/server-image/java_agent.yaml new file mode 100644 index 000000000000..50cd72ebba9f --- /dev/null +++ b/containers/server-image/java_agent.yaml @@ -0,0 +1,6 @@ +whitelistObjectNames: + - java.lang:type=Threading,* + - java.lang:type=Memory,* + - Catalina:type=ThreadPool,name=* +rules: + - pattern: ".*" diff --git a/containers/server-image/postgres-exporter b/containers/server-image/postgres-exporter new file mode 100644 index 000000000000..4a8011acf428 --- /dev/null +++ b/containers/server-image/postgres-exporter @@ -0,0 +1,19 @@ +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "postgresql://user:passwd@localhost:5432/database?sslmode=disable" +## ServiceRestart: postgres-exporter +# +# Connection URL to postgresql instance +# +DATA_SOURCE_NAME="postgresql://spacewalk:spacewalk@localhost:5432/susemanager?sslmode=disable" + +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "" +## ServiceRestart: postgres-exporter +# +# Extra options for postgres-exporter +# +POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml" diff --git a/containers/server-image/postgres_exporter_queries.yaml b/containers/server-image/postgres_exporter_queries.yaml new file mode 100644 index 000000000000..f6b3d362880f --- /dev/null +++ b/containers/server-image/postgres_exporter_queries.yaml @@ -0,0 +1,52 @@ +mgr_serveractions: + query: | + SELECT ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Queued' + ) + ) AS queued, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Picked Up' + ) + ) AS picked_up, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Completed') + ) + ) AS completed, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Failed') + ) + ) AS failed; + metrics: + - queued: + usage: "GAUGE" + description: "Count of queued Actions" + - picked_up: + usage: "GAUGE" + description: "Count of picked up Actions" + - completed: + usage: "COUNTER" + description: "Count of completed Actions" + - failed: + usage: "COUNTER" + description: "Count of failed Actions" + salt_events: + query: | + SELECT COUNT(*) + FROM suseSaltEvent + AS salt_events_count; + metrics: + - salt_events_count: + usage: "GAUGE" + description: "Count of suse salt events" diff --git a/containers/server-image/taskomatic_jmx.conf b/containers/server-image/taskomatic_jmx.conf new file mode 100644 index 000000000000..7f19d11ddb83 --- /dev/null +++ b/containers/server-image/taskomatic_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml" diff --git a/containers/server-image/tomcat_jmx.conf b/containers/server-image/tomcat_jmx.conf new file mode 100644 index 000000000000..a31b816897fe --- /dev/null +++ b/containers/server-image/tomcat_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml" diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service index 9993e7f63b35..cb05503fcaf0 100644 --- a/containers/server-systemd-services/uyuni-server.service +++ b/containers/server-systemd-services/uyuni-server.service @@ -30,6 +30,9 @@ ExecStart=/usr/bin/podman run \ -p 69:69 \ -p 25151:25151 \ -p 5432:5432 \ + -p 9100:9100 \ + -p 9187:9187 \ + -p 9800:9800 \ -v cgroup:/sys/fs/cgroup:rw \ -v pgsql:/var/lib/pgsql \ -v var-cache:/var/cache \ From c2e7e1a5d484f967701795a8785f9ea21ac67ab7 Mon Sep 17 00:00:00 2001 From: Ondrej Holecek Date: Wed, 19 Apr 2023 15:26:25 +0200 Subject: [PATCH 30/80] Use curl instead of wget to download bootstrap script - curl -O always stores file to its filename. When using wget if the file exists it will append .1 and so on to prevent overwriting local file --- testsuite/features/step_definitions/command_steps.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 4574ccd53530..1b1c4403707d 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -455,7 +455,7 @@ When(/^I fetch "([^"]*)" to "([^"]*)"$/) do |file, host| node = get_target(host) - node.run("wget http://#{$server.full_hostname}/#{file}") + node.run("curl -s -O http://#{$server.full_hostname}/#{file}") end When(/^I wait until file "([^"]*)" contains "([^"]*)" on server$/) do |file, content| From b0cf9c48f24f41b3164048aede0607f9f2508944 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 15 Feb 2023 21:37:46 +0100 Subject: [PATCH 31/80] Cleanup the volume definitions --- .../server-helm/templates/deployment.yaml | 21 ++++++++++++++++ containers/server-helm/templates/volumes.yaml | 24 +++++++++++++++++++ .../uyuni-server.service | 20 +++++++--------- 3 files changed, 53 insertions(+), 12 deletions(-) diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml index 4c3c7c349c4a..408f2f2d4347 100644 --- a/containers/server-helm/templates/deployment.yaml +++ b/containers/server-helm/templates/deployment.yaml @@ -36,6 +36,22 @@ spec: name: etc-tls - name: tls-key mountPath: /etc/pki/spacewalk-tls + - name: init-var-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/cobbler /mnt; + chmod --reference=/var/lib/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cobbler - name: init-var-pgsql image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} imagePullPolicy: {{ .Values.pullPolicy }} @@ -388,6 +404,8 @@ spec: name: tmp - mountPath: /sys/fs/cgroup name: cgroup + - mountPath: /var/lib/cobbler + name: var-cobbler - mountPath: /var/lib/pgsql name: var-pgsql - mountPath: /var/cache @@ -451,6 +469,9 @@ spec: hostPath: path: /sys/fs/cgroup type: Directory + - name: var-cobbler + persistentVolumeClaim: + claimName: var-cobbler - name: var-pgsql persistentVolumeClaim: claimName: var-pgsql diff --git a/containers/server-helm/templates/volumes.yaml b/containers/server-helm/templates/volumes.yaml index d750805f1501..c909be28363e 100644 --- a/containers/server-helm/templates/volumes.yaml +++ b/containers/server-helm/templates/volumes.yaml @@ -1,5 +1,29 @@ apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: var-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: var-pgsql namespace: "{{ .Release.Namespace }}" diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service index cb05503fcaf0..0779054efb9f 100644 --- a/containers/server-systemd-services/uyuni-server.service +++ b/containers/server-systemd-services/uyuni-server.service @@ -34,9 +34,11 @@ ExecStart=/usr/bin/podman run \ -p 9187:9187 \ -p 9800:9800 \ -v cgroup:/sys/fs/cgroup:rw \ - -v pgsql:/var/lib/pgsql \ + -v var-lib-cobbler:/var/lib/cobbler \ + -v var-pgsql:/var/lib/pgsql \ -v var-cache:/var/cache \ -v var-spacewalk:/var/spacewalk \ + -v var-log:/var/log \ -v srv-salt:/srv/salt \ -v srv-www-pub:/srv/www/htdocs/pub \ -v srv-www-cobbler:/srv/www/cobbler \ @@ -46,22 +48,16 @@ ExecStart=/usr/bin/podman run \ -v srv-pillar:/srv/pillar \ -v srv-susemanager:/srv/susemanager \ -v srv-spacewalk:/srv/spacewalk \ + -v root:/root \ + -v etc-apache2:/etc/apache2 \ -v etc-rhn:/etc/rhn \ -v etc-systemd:/etc/systemd/system/multi-user.target.wants \ - -v var-log-rhn:/var/log/rhn \ -v etc-salt:/etc/salt \ - -v apache2:/etc/apache2 \ - -v tomcat:/etc/tomcat \ - -v etc-tls:/etc/pki/tls \ - -v ca-cert:/etc/pki/trust/anchors/ \ - -v tls-key:/etc/pki/spacewalk-tls \ - -v uyuni-config:/root \ - -v tomcat-monitoring:/usr/lib/systemd/system/tomcat.service.d \ - -v taskomatic-monitoring:/usr/lib/systemd/system/taskomatic.service.d \ + -v etc-tomcat:/etc/tomcat \ -v etc-cobbler:/etc/cobbler \ - -v var-lib-cobbler:/var/lib/cobbler \ - -v home:/home \ -v etc-sysconfig:/etc/sysconfig \ + -v etc-tls:/etc/pki/tls \ + -v ca-cert:/etc/pki/trust/anchors/ \ --env-host \ --hostname ${UYUNI_FQDN} \ $EXTRA_POD_ARGS \ From db35e32936723c98d48e59b26b39249406529c64 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 1 Jun 2023 10:55:58 +0200 Subject: [PATCH 32/80] Migrate uyuni server to container (#7009) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix for migration --------- Co-authored-by: Cédric Bosdonnat --- .../uyuni-server-services.config | 42 ++++++++++--------- .../uyuni-server.service | 2 +- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/containers/server-systemd-services/uyuni-server-services.config b/containers/server-systemd-services/uyuni-server-services.config index f8fd4f2904ce..5c86694533da 100644 --- a/containers/server-systemd-services/uyuni-server-services.config +++ b/containers/server-systemd-services/uyuni-server-services.config @@ -15,24 +15,26 @@ TAG=latest # Add -p 8000:8000 -p 8001:8001 to enable java remote debugging EXTRA_POD_ARGS='' +#all these fields are required if it's a migration and should match with the current migrated instance +REPORT_DB_PASS=pythia_susemanager # Initial setup configuration options -MANAGER_USER="spacewalk" -MANAGER_PASS="spacewalk" -MANAGER_ADMIN_EMAIL="galaxy-noise@suse.de" -CERT_O="SUSE" -CERT_OU="SUSE" -CERT_CITY="Nuernberg" -CERT_STATE="Bayern" -CERT_COUNTRY="DE" -CERT_EMAIL="galaxy-noise@suse.de" -CERT_PASS="spacewalk" -USE_EXISTING_CERTS="N" -MANAGER_DB_NAME="susemanager" -MANAGER_DB_HOST="localhost" -MANAGER_DB_PORT="5432" -MANAGER_DB_PROTOCOL="TCP" -MANAGER_ENABLE_TFTP="Y" -SCC_USER="" -SCC_PASS="" -REPORT_DB_HOST="uyuni-server" -UYUNI_FQDN="uyuni-server" +MANAGER_USER=spacewalk +MANAGER_PASS=spacewalk +MANAGER_ADMIN_EMAIL=galaxy-noise@suse.de +CERT_O=SUSE +CERT_OU=SUSE +CERT_CITY=Nuernberg +CERT_STATE=Bayern +CERT_COUNTRY=DE +CERT_EMAIL=galaxy-noise@suse.de +CERT_PASS=spacewalk +USE_EXISTING_CERTS=N +MANAGER_DB_NAME=susemanager +MANAGER_DB_HOST=localhost +MANAGER_DB_PORT=5432 +MANAGER_DB_PROTOCOL=TCP +MANAGER_ENABLE_TFTP=Y +SCC_USER= +SCC_PASS= +REPORT_DB_HOST=uyuni-server +UYUNI_FQDN=uyuni-server diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service index 0779054efb9f..083c4c79bdb0 100644 --- a/containers/server-systemd-services/uyuni-server.service +++ b/containers/server-systemd-services/uyuni-server.service @@ -20,8 +20,8 @@ ExecStart=/usr/bin/podman run \ --cgroups=no-conmon \ --rm \ --sdnotify=conmon \ + --cap-add NET_RAW \ -d \ - --replace \ --tmpfs /run \ -p 443:443 \ -p 80:80 \ From cd2d2320198a15e73b74f5e1c2e4edf2778bed49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 7 Jun 2023 10:34:19 +0200 Subject: [PATCH 33/80] Use uyunictl for testsuite file checks --- testsuite/features/support/lavanda.rb | 40 +++++++++++++++++++++ testsuite/features/support/twopence_init.rb | 18 ++++------ 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 77f2faf00af9..090fc235dafa 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -259,4 +259,44 @@ def extract(remote_file, local_file, user = "root", dots = true) end code end + + def file_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -f #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -f #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + def folder_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -d #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -d #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + def file_delete(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm #{file}'") + else + _out, _local, _remote, code = test_and_store_results_together("rm #{file}", 'root', 500) + end + code + end + + def folder_delete(folder) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm -rf #{folder}'") + else + _out, _local, _remote, code = test_and_store_results_together("rm-rf #{folder}", 'root', 500) + end + code + end end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 15eefc14f427..128a698970d5 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -238,38 +238,32 @@ def net_prefix # This function tests whether a file exists on a node def file_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -f #{file}", 'root', 500) - code.zero? && local.zero? + node.file_exists(file) end # This function tests whether a folder exists on a node def folder_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -d #{file}", 'root', 500) - code.zero? && local.zero? + node.folder_exists(file) end # This function deletes a file from a node def file_delete(node, file) - _out, _local, _remote, code = node.test_and_store_results_together("rm #{file}", 'root', 500) - code + node.file_delete(file) end # This function deletes a file from a node def folder_delete(node, folder) - _out, _local, _remote, code = node.test_and_store_results_together("rm -rf #{folder}", 'root', 500) - code + node.folder_delete(folder) end # This function extracts a file from a node def file_extract(node, remote_file, local_file) - code, _remote = node.extract(remote_file, local_file, 'root', false) - code + node.extract(remote_file, local_file, 'root', false) end # This function injects a file into a node def file_inject(node, local_file, remote_file) - code = node.inject(local_file, remote_file, 'root', false) - code + node.inject(local_file, remote_file, 'root', false) end # Other global variables From 2b0f41275a4878a0840af68afc255c3b9b59e0cf Mon Sep 17 00:00:00 2001 From: Artem Shiliaev Date: Mon, 19 Jun 2023 18:42:07 +0200 Subject: [PATCH 34/80] testsuite ipmi fixes --- containers/server-image/Dockerfile | 3 ++- testsuite/features/step_definitions/command_steps.rb | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index d72c20bd012a..1734b003a0d8 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -50,7 +50,8 @@ RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-li virtual-host-gatherer-Libvirt \ virtual-host-gatherer-Nutanix \ virtual-host-gatherer-VMware \ - vim + vim \ + ipmitool RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 1b1c4403707d..8232b2f39f07 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -581,8 +581,8 @@ return_code = file_inject($server, source, dest) raise 'File injection failed' unless return_code.zero? end - $server.run('curl --output DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') - $server.run('unzip DSP2043_2019.1.zip') + $server.run('curl --output /root/DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') + $server.run('unzip /root/DSP2043_2019.1.zip -d /root/') cmd = "/usr/bin/python3 /root/Redfish-Mockup-Server/redfishMockupServer.py " \ "-H #{$server.full_hostname} -p 8443 " \ "-S -D /root/DSP2043_2019.1/public-catfish/ " \ From 7e38f22e70e7c84b42d42b81894d38a3a006aca1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 21 Jun 2023 06:45:45 +0200 Subject: [PATCH 35/80] Use CA certificate from /etc/pki/trust/anchors in rhnpush --- client/tools/mgr-push/rhnpush_config.py | 2 +- client/tools/mgr-push/rhnpushrc | 2 +- testsuite/features/step_definitions/common_steps.rb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/client/tools/mgr-push/rhnpush_config.py b/client/tools/mgr-push/rhnpush_config.py index 938ff841d75b..22c7c7f35a23 100644 --- a/client/tools/mgr-push/rhnpush_config.py +++ b/client/tools/mgr-push/rhnpush_config.py @@ -66,7 +66,7 @@ def __init__(self, filename=None, ensure_consistency=False): 'no_session_caching': '0', 'proxy': '', 'tolerant': '0', - 'ca_chain': '/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT', + 'ca_chain': '/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT', 'timeout': None } diff --git a/client/tools/mgr-push/rhnpushrc b/client/tools/mgr-push/rhnpushrc index 1d63522f73a9..670c09256911 100644 --- a/client/tools/mgr-push/rhnpushrc +++ b/client/tools/mgr-push/rhnpushrc @@ -74,7 +74,7 @@ no_session_caching = 0 tolerant = 0 #The CA cert used to verify the ssl server -ca_chain = /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT +ca_chain = /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT #Default connection timeout, (no value for default) timeout = 300 diff --git a/testsuite/features/step_definitions/common_steps.rb b/testsuite/features/step_definitions/common_steps.rb index 26f0b089b218..c976e1234011 100644 --- a/testsuite/features/step_definitions/common_steps.rb +++ b/testsuite/features/step_definitions/common_steps.rb @@ -504,7 +504,7 @@ end When(/^I push package "([^"]*)" into "([^"]*)" channel$/) do |arg1, arg2| - srvurl = "http://#{ENV['SERVER']}/APP" + srvurl = "https://#{ENV['SERVER']}/APP" command = "rhnpush --server=#{srvurl} -u admin -p admin --nosig -c #{arg2} #{arg1} " $server.run(command, timeout: 500) $server.run('ls -lR /var/spacewalk/packages', timeout: 500) From 28265d9f56cde2ea4e45c6961528fb3c62b31250 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 22 Jun 2023 09:32:14 +0200 Subject: [PATCH 36/80] Initial Hub XML-RPC API container image --- containers/hub-xmlrpc-api-image/Dockerfile | 27 +++++++++++++++++++ containers/hub-xmlrpc-api-image/_service | 4 +++ .../hub-xmlrpc-api.changes | 4 +++ containers/hub-xmlrpc-api-image/tito.props | 2 ++ rel-eng/packages/hub-xmlrpc-api-image | 1 + 5 files changed, 38 insertions(+) create mode 100644 containers/hub-xmlrpc-api-image/Dockerfile create mode 100644 containers/hub-xmlrpc-api-image/_service create mode 100644 containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes create mode 100644 containers/hub-xmlrpc-api-image/tito.props create mode 100644 rel-eng/packages/hub-xmlrpc-api-image diff --git a/containers/hub-xmlrpc-api-image/Dockerfile b/containers/hub-xmlrpc-api-image/Dockerfile new file mode 100644 index 000000000000..7f5103edf688 --- /dev/null +++ b/containers/hub-xmlrpc-api-image/Dockerfile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/hub-xmlrpc-api:latest uyuni/hub-xmlrpc-api:4.4.0 uyuni/hub-xmlrpc-api:4.4.0.%RELEASE% + +ARG INIT_BASE=registry.opensuse.org/bci/bci-micro +FROM $INIT_BASE + +RUN zypper --gpg-auto-import-keys --non-interactive install hub-xmlrpc-api + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.hub-xmlrpc-api +LABEL org.opencontainers.image.title="${PRODUCT} Hub XML-RPC API container" +LABEL org.opencontainers.image.description="${PRODUCT} Hub XML-RPC API image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/bin/hub-xmlrpc-api"] diff --git a/containers/hub-xmlrpc-api-image/_service b/containers/hub-xmlrpc-api-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/hub-xmlrpc-api-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes new file mode 100644 index 000000000000..e2637480450b --- /dev/null +++ b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Jun 22 07:30:36 UTC 2023 - Cédric Bosdonnat + +- Initial image for Uyuni Hub XML-RPC API diff --git a/containers/hub-xmlrpc-api-image/tito.props b/containers/hub-xmlrpc-api-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/hub-xmlrpc-api-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/rel-eng/packages/hub-xmlrpc-api-image b/rel-eng/packages/hub-xmlrpc-api-image new file mode 100644 index 000000000000..45971d5a183c --- /dev/null +++ b/rel-eng/packages/hub-xmlrpc-api-image @@ -0,0 +1 @@ +4.4.0 containers/hub-xmlrpc-api-image/ From 9d7384631708158eb009caff2ef83239a4495d0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 22 Jun 2023 09:50:25 +0200 Subject: [PATCH 37/80] fixup! initial version uyuni server image --- containers/server-image/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 1734b003a0d8..3e3d24d16080 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: MIT -#!BuildTag: uyuni/server:latest uyuni/server:%PKG_VERSION% uyuni/server:%PKG_VERSION%.%RELEASE% +#!BuildTag: uyuni/server:latest uyuni/server:4.4.0 uyuni/server:4.4.0.%RELEASE% ARG INIT_BASE=registry.suse.com/bci/bci-base:15.4 FROM $INIT_BASE From 7ec3f5b8a431e6218ee8564fd31a5eaaaa6a748b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 4 Jul 2023 17:33:45 +0200 Subject: [PATCH 38/80] fixup! Add server helm chart --- .../templates/k3s-ingress-routes.yaml | 20 +++---------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/containers/server-helm/templates/k3s-ingress-routes.yaml b/containers/server-helm/templates/k3s-ingress-routes.yaml index dd5c355b2ba4..1a2ccad1e0d3 100644 --- a/containers/server-helm/templates/k3s-ingress-routes.yaml +++ b/containers/server-helm/templates/k3s-ingress-routes.yaml @@ -73,7 +73,7 @@ metadata: namespace: "{{ .Release.Namespace }}" spec: entryPoints: - - node-exporter + - node-metrics routes: - match: HostSNI(`*`) services: @@ -83,30 +83,16 @@ spec: apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: - name: postgres-exporter-router + name: postgresql-exporter-router namespace: "{{ .Release.Namespace }}" spec: entryPoints: - - postgres-exporter + - psql-metrics routes: - match: HostSNI(`*`) services: - name: uyuni-tcp port: 9187 ---- -apiVersion: traefik.containo.us/v1alpha1 -kind: IngressRouteTCP -metadata: - name: taskomatic-router - namespace: "{{ .Release.Namespace }}" -spec: - entryPoints: - - taskomatic - routes: - - match: HostSNI(`*`) - services: - - name: uyuni-tcp - port: 9800 {{- end }} {{- if .Values.exposeJavaDebug }} --- From 94f1f5237778bc33ff7c215ada0d8c7db2a2842e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 4 Jul 2023 17:37:43 +0200 Subject: [PATCH 39/80] fixup! Add notes on how to use the server image on rke2 --- .../doc/server-kubernetes/k3s-traefik-config.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/containers/doc/server-kubernetes/k3s-traefik-config.yaml b/containers/doc/server-kubernetes/k3s-traefik-config.yaml index e27590046c67..fd78e0024d47 100644 --- a/containers/doc/server-kubernetes/k3s-traefik-config.yaml +++ b/containers/doc/server-kubernetes/k3s-traefik-config.yaml @@ -36,6 +36,16 @@ spec: expose: true exposedPort: 8001 protocol: TCP + psql-metrics: + port: 9187 + expose: true + exposedPort: 9187 + protocol: TCP + node-metrics: + port: 9101 + expose: true + exposedPort: 9101 + protocol: TCP tftp: port: 69 expose: true From 0f41b51d16d67fc6139f7e8f6038e17f25e3b6af Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 6 Jul 2023 17:45:34 +0200 Subject: [PATCH 40/80] configure podman timezone from host --- containers/server-systemd-services/uyuni-server.service | 1 + 1 file changed, 1 insertion(+) diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service index 083c4c79bdb0..63c840cfbbb3 100644 --- a/containers/server-systemd-services/uyuni-server.service +++ b/containers/server-systemd-services/uyuni-server.service @@ -15,6 +15,7 @@ Restart=on-failure ExecStartPre=/bin/rm \ -f %t/uyuni-server.pid %t/%n.ctr-id ExecStart=/usr/bin/podman run \ + --tz=local \ --conmon-pidfile %t/uyuni-server.pid \ --cidfile=%t/%n.ctr-id \ --cgroups=no-conmon \ From 9c3add518bb247c3807b404b7e45440c20d6829d Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 7 Jul 2023 18:36:08 +0200 Subject: [PATCH 41/80] set timezone in the container (#7244) --- containers/server-helm/templates/config.yaml | 1 + containers/server-image/uyuni-setup.service | 1 + containers/server-systemd-services/setup_podman_timezone.sh | 3 +++ .../server-systemd-services/uyuni-server-services.config | 1 + .../server-systemd-services/uyuni-server-systemd-services.spec | 3 +++ containers/server-systemd-services/uyuni-server.service | 2 +- 6 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 containers/server-systemd-services/setup_podman_timezone.sh diff --git a/containers/server-helm/templates/config.yaml b/containers/server-helm/templates/config.yaml index 3b1e3f48f0b9..5b7cb5ded706 100644 --- a/containers/server-helm/templates/config.yaml +++ b/containers/server-helm/templates/config.yaml @@ -23,6 +23,7 @@ data: NO_SSL: "Y" MANAGER_MAIL_FROM: {{ .Values.uyuniMailFrom }} UYUNI_FQDN: {{ .Values.fqdn }} + TZ: {{ .Values.timeZone | default "Etc/UTC" }} kind: ConfigMap metadata: name: uyuni-config diff --git a/containers/server-image/uyuni-setup.service b/containers/server-image/uyuni-setup.service index db98e02db7cc..15cfabe46c5c 100644 --- a/containers/server-image/uyuni-setup.service +++ b/containers/server-image/uyuni-setup.service @@ -11,6 +11,7 @@ PassEnvironment=LOCAL_DB MANAGER_DB_NAME MANAGER_DB_HOST MANAGER_DB_PORT MANAGER PassEnvironment=MANAGER_ENABLE_TFTP EXTERNALDB_ADMIN_USER EXTERNALDB_ADMIN_PASS EXTERNALDB_PROVIDER PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN MIRROR_PATH PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT_USER REPORT_DB_PASS REPORT_DB_CA_CERT +PassEnvironment=TZ ExecStart=/usr/lib/susemanager/bin/mgr-setup -l /var/log/susemanager_setup.log -s -n ExecStartPost=systemctl disable --now uyuni-setup.service Type=oneshot diff --git a/containers/server-systemd-services/setup_podman_timezone.sh b/containers/server-systemd-services/setup_podman_timezone.sh new file mode 100644 index 000000000000..7b48c3f2c7b8 --- /dev/null +++ b/containers/server-systemd-services/setup_podman_timezone.sh @@ -0,0 +1,3 @@ +#!/bin/bash +HOST_TZ=$(timedatectl | awk '/Time zone:/{print $3}') +sed "s|^TZ=.*$|TZ=$HOST_TZ|" -i /etc/sysconfig/uyuni-server-systemd-services diff --git a/containers/server-systemd-services/uyuni-server-services.config b/containers/server-systemd-services/uyuni-server-services.config index 5c86694533da..323ee90c29c0 100644 --- a/containers/server-systemd-services/uyuni-server-services.config +++ b/containers/server-systemd-services/uyuni-server-services.config @@ -38,3 +38,4 @@ SCC_USER= SCC_PASS= REPORT_DB_HOST=uyuni-server UYUNI_FQDN=uyuni-server +TZ=Etc/UTC diff --git a/containers/server-systemd-services/uyuni-server-systemd-services.spec b/containers/server-systemd-services/uyuni-server-systemd-services.spec index 70c2800b2990..6bcd94470a25 100644 --- a/containers/server-systemd-services/uyuni-server-systemd-services.spec +++ b/containers/server-systemd-services/uyuni-server-systemd-services.spec @@ -61,6 +61,7 @@ install -D -m 644 uyuni-server.service %{buildroot}%{_unitdir}/uyuni-server.serv ln -s /usr/sbin/service %{buildroot}%{_sbindir}/rcuyuni-server install -m 755 uyuni-server.sh %{buildroot}%{_sbindir}/uyuni-server.sh +install -m 755 setup_podman_timezone.sh %{buildroot}%{_sbindir}/setup_podman_timezone.sh %check @@ -106,5 +107,7 @@ install -m 755 uyuni-server.sh %{buildroot}%{_sbindir}/uyuni-server.sh %endif %{_sysconfdir}/uyuni %{_sbindir}/uyuni-server.sh +%{_sbindir}/setup_podman_timezone.sh + %changelog diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service index 63c840cfbbb3..e3430af2c3dc 100644 --- a/containers/server-systemd-services/uyuni-server.service +++ b/containers/server-systemd-services/uyuni-server.service @@ -14,8 +14,8 @@ EnvironmentFile=-/etc/sysconfig/uyuni-server-systemd-services Restart=on-failure ExecStartPre=/bin/rm \ -f %t/uyuni-server.pid %t/%n.ctr-id +ExecStartPre=setup_podman_timezone.sh ExecStart=/usr/bin/podman run \ - --tz=local \ --conmon-pidfile %t/uyuni-server.pid \ --cidfile=%t/%n.ctr-id \ --cgroups=no-conmon \ From 4a77b5150216cdbcddec56fe0b330e453b2f6bdf Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 7 Jul 2023 16:05:18 +0200 Subject: [PATCH 42/80] do not install local-formula if container --- .../features/secondary/allcli_system_group.feature | 10 +++++++--- testsuite/features/secondary/min_salt_formulas.feature | 6 +++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/testsuite/features/secondary/allcli_system_group.feature b/testsuite/features/secondary/allcli_system_group.feature index 78e748ded1ec..fca19b05ed0e 100644 --- a/testsuite/features/secondary/allcli_system_group.feature +++ b/testsuite/features/secondary/allcli_system_group.feature @@ -60,9 +60,13 @@ Feature: Manage a group of systems And I should see "rhlike_minion" as link And I should see "sle_minion" as link - Scenario: Install some formula on the server - When I manually install the "locale" formula on the server - And I synchronize all Salt dynamic modules on "sle_minion" + #container already has locale formula installed + @skip_if_container_server + Scenario: Install the locale formula package on the server + When I manually install the "locale" formula on the server + + Scenario: I synchronize all Salt dynamic modules on "sle_minion" + When I synchronize all Salt dynamic modules on "sle_minion" Scenario: New formula page is rendered for the system group When I follow the left menu "Systems > System Groups" diff --git a/testsuite/features/secondary/min_salt_formulas.feature b/testsuite/features/secondary/min_salt_formulas.feature index 646adcee0577..2528e3f7917a 100644 --- a/testsuite/features/secondary/min_salt_formulas.feature +++ b/testsuite/features/secondary/min_salt_formulas.feature @@ -11,9 +11,13 @@ Feature: Use salt formulas Scenario: Log in as admin user Given I am authorized for the "Admin" section + #container already has locale formula installed + @skip_if_container_server Scenario: Install the locale formula package on the server When I manually install the "locale" formula on the server - And I synchronize all Salt dynamic modules on "sle_minion" + + Scenario: I synchronize all Salt dynamic modules on "sle_minion" + When I synchronize all Salt dynamic modules on "sle_minion" Scenario: The new formula appears on the server When I follow the left menu "Salt > Formula Catalog" From cd0576244a79c67f2e237e7b3f89008d11dd4c1b Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Mon, 10 Jul 2023 08:33:34 +0200 Subject: [PATCH 43/80] Update values.yaml --- containers/server-helm/values.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml index 5a5e59f492c7..be96e1c2837c 100644 --- a/containers/server-helm/values.yaml +++ b/containers/server-helm/values.yaml @@ -104,6 +104,8 @@ uyuniMailFrom: "notifications@uyuni.local" ## fqdn is the user accessible fully qualified domain name of the uyuni server fqdn: "uyuni.local" +## TZ is the timezone +TZ: "Etc/UTC" ## sccUser is the SUSE Customer Center login # sccUser: "" From 05a21281b3375bf356fe69ff8df5920ed6148f00 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 14 Jul 2023 10:26:23 +0200 Subject: [PATCH 44/80] add debug acceptance test line --- testsuite/features/step_definitions/command_steps.rb | 2 +- testsuite/features/support/lavanda.rb | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 8232b2f39f07..30043bb211a0 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -1176,7 +1176,7 @@ next unless refresh_result.include? node node_refreshes += "^#{refresh_id}|" end - cmd = "spacecmd -u admin -p admin schedule_list #{current_time} #{timeout_time} | egrep '#{node_refreshes.delete_suffix('|')}'" + cmd = "spacecmd -u admin -p admin schedule_list #{current_time} #{timeout_time} | egrep '#{node_refreshes.delete_suffix('\|')}'" repeat_until_timeout(timeout: long_wait_delay, message: "'refresh package list' did not finish") do result, code = $server.run(cmd, check_errors: false) sleep 1 diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 090fc235dafa..94dece26c33f 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -154,6 +154,7 @@ def run(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIME cmd_prefixed = cmd if @in_has_uyunictl cmd_prefixed = "uyunictl exec -i '#{cmd.gsub(/'/, '\'"\'"\'')}'" + print "#{cmd_prefixed}\n" end run_local(cmd_prefixed, separated_results: separated_results, check_errors: check_errors, timeout: timeout, user: user, successcodes: successcodes, buffer_size: buffer_size, verbose: verbose) end From c3613ed8fa526ff29a6f8433668e619cb1b643a7 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 18 Jul 2023 10:18:28 +0200 Subject: [PATCH 45/80] testsuite: do not install and remove formula if in container --- testsuite/features/step_definitions/salt_steps.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testsuite/features/step_definitions/salt_steps.rb b/testsuite/features/step_definitions/salt_steps.rb index 1d683c6b91cf..3cbf15a0b3cc 100644 --- a/testsuite/features/step_definitions/salt_steps.rb +++ b/testsuite/features/step_definitions/salt_steps.rb @@ -198,11 +198,14 @@ end # Salt formulas + +@skip_if_container_server When(/^I manually install the "([^"]*)" formula on the server$/) do |package| $server.run("zypper --non-interactive refresh") $server.run("zypper --non-interactive install --force #{package}-formula") end +@skip_if_container_server When(/^I manually uninstall the "([^"]*)" formula from the server$/) do |package| $server.run("zypper --non-interactive remove #{package}-formula") # Remove automatically installed dependency if needed From f125821cd46a51d6dcd273b806bfc963cd04fca2 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 18 Jul 2023 15:48:02 +0200 Subject: [PATCH 46/80] parse virtual-host-gatherer null value (#7282) --- .../com/suse/manager/gatherer/GathererJsonIO.java | 12 ++++++++++-- .../manager/gatherer/test/GathererJsonIOTest.java | 8 +++++++- .../com/suse/manager/gatherer/test/modulelist.json | 6 ++++++ java/spacewalk-java.changes.mbussolotto.parse_null | 1 + 4 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 java/spacewalk-java.changes.mbussolotto.parse_null diff --git a/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java b/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java index bf425bd87963..fc1a25c47ad6 100644 --- a/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java +++ b/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java @@ -26,6 +26,7 @@ import com.google.gson.TypeAdapter; import com.google.gson.reflect.TypeToken; import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonToken; import com.google.gson.stream.JsonWriter; import java.io.IOException; @@ -100,11 +101,18 @@ public GathererModule read(JsonReader reader) throws IOException { reader.beginObject(); while (reader.hasNext()) { String key = reader.nextName(); + String value = null; + if (reader.peek() == JsonToken.NULL) { + reader.nextNull(); + } + else { + value = reader.nextString(); + } if (key.equals("module")) { - gm.setName(reader.nextString()); + gm.setName(value); } else { - gm.addParameter(key, reader.nextString()); + gm.addParameter(key, value); } } reader.endObject(); diff --git a/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java b/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java index e3f7869795b6..b0dffff72ed1 100644 --- a/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java +++ b/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java @@ -55,9 +55,10 @@ public void testReadGathererModules() throws Exception { FileUtils.readStringFromFile(TestUtils.findTestData(MODULELIST).getPath()); Map mods = new GathererJsonIO().readGathererModules(json); - assertEquals(2, mods.keySet().size()); + assertEquals(3, mods.keySet().size()); assertTrue(mods.keySet().contains("VMware")); assertTrue(mods.keySet().contains("SUSECloud")); + assertTrue(mods.keySet().contains("Libvirt")); for (GathererModule g : mods.values()) { if (g.getName().equals("VMware")) { @@ -76,6 +77,11 @@ else if (g.getName().equals("SUSECloud")) { assertTrue(g.getParameters().containsKey("protocol")); assertTrue(g.getParameters().containsKey("tenant")); } + else if (g.getName().equals("Libvirt")) { + assertTrue(g.getParameters().containsKey("uri")); + assertTrue(g.getParameters().containsKey("sasl_username")); + assertTrue(g.getParameters().containsKey("sasl_password")); + } else { fail("Unknown Module"); } diff --git a/java/code/src/com/suse/manager/gatherer/test/modulelist.json b/java/code/src/com/suse/manager/gatherer/test/modulelist.json index 12f3d18e73f1..64613aab280c 100644 --- a/java/code/src/com/suse/manager/gatherer/test/modulelist.json +++ b/java/code/src/com/suse/manager/gatherer/test/modulelist.json @@ -14,6 +14,12 @@ "port": 443, "username": "", "password": "" + }, + "Libvirt": { + "module": "Libvirt", + "uri": "", + "sasl_username": null, + "sasl_password": null } } diff --git a/java/spacewalk-java.changes.mbussolotto.parse_null b/java/spacewalk-java.changes.mbussolotto.parse_null new file mode 100644 index 000000000000..6e9a1cd25822 --- /dev/null +++ b/java/spacewalk-java.changes.mbussolotto.parse_null @@ -0,0 +1 @@ +- parse virtual-host-gatherer null value From b7e125712ba7addb3c39b69b9095117c5428ff73 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 18 Jul 2023 17:07:46 +0200 Subject: [PATCH 47/80] testsuite: increase waiting AJAX transition --- testsuite/features/support/commonlib.rb | 8 ++++---- testsuite/features/support/env.rb | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index 7b135caf9d6a..3d1e15892803 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -129,7 +129,7 @@ def format_detail(message, last_result, report_result) def click_button_and_wait(locator = nil, **options) click_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -138,7 +138,7 @@ def click_button_and_wait(locator = nil, **options) def click_link_and_wait(locator = nil, **options) click_link(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -147,7 +147,7 @@ def click_link_and_wait(locator = nil, **options) def click_link_or_button_and_wait(locator = nil, **options) click_link_or_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -158,7 +158,7 @@ module CapybaraNodeElementExtension def click super begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end diff --git a/testsuite/features/support/env.rb b/testsuite/features/support/env.rb index 4ce81dcd97a3..97d7989081ba 100644 --- a/testsuite/features/support/env.rb +++ b/testsuite/features/support/env.rb @@ -180,7 +180,7 @@ def process_code_coverage AfterStep do if has_css?('.senna-loading', wait: 0) log 'WARN: Step ends with an ajax transition not finished, let\'s wait a bit!' - log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 20) + log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 40) end end From a0ab6aab8f3259c0449c21e739edcce43900beaf Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 19 Jul 2023 14:30:04 +0200 Subject: [PATCH 48/80] skip uninstall of locale-formula --- testsuite/features/secondary/allcli_system_group.feature | 1 + testsuite/features/secondary/min_salt_formulas.feature | 1 + 2 files changed, 2 insertions(+) diff --git a/testsuite/features/secondary/allcli_system_group.feature b/testsuite/features/secondary/allcli_system_group.feature index fca19b05ed0e..109e26cf7f59 100644 --- a/testsuite/features/secondary/allcli_system_group.feature +++ b/testsuite/features/secondary/allcli_system_group.feature @@ -105,6 +105,7 @@ Feature: Manage a group of systems # Red Hat-like minion is intentionally not removed from group + @skip_if_container_server Scenario: Cleanup: uninstall formula from the server When I manually uninstall the "locale" formula from the server diff --git a/testsuite/features/secondary/min_salt_formulas.feature b/testsuite/features/secondary/min_salt_formulas.feature index 2528e3f7917a..d5177dcc453e 100644 --- a/testsuite/features/secondary/min_salt_formulas.feature +++ b/testsuite/features/secondary/min_salt_formulas.feature @@ -165,6 +165,7 @@ Feature: Use salt formulas And the keymap on "sle_minion" should be "us" And the language on "sle_minion" should be "en_US.UTF-8" + @skip_if_container_server Scenario: Cleanup: uninstall formula package from the server When I manually uninstall the "locale" formula from the server From ffc5074c596cb0269f7cecd9b6b3b8c41621573e Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 19 Jul 2023 17:11:30 +0200 Subject: [PATCH 49/80] keep aligned TZ and /etc/localtime --- susemanager/bin/mgr-setup | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 35468b9fefa5..9b555254b168 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -46,6 +46,10 @@ if [ ! $? -eq 0 ]; then exit 1 fi +if [[ ! -z "$TZ" ]]; then + timedatectl set-timezone $TZ +fi + TMPDIR="/var/spacewalk/tmp" DO_MIGRATION=0 DO_SETUP=0 From b1329a6a2365796a5979c6bf8c8c249f62fc7a8a Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 20 Jul 2023 11:35:23 +0200 Subject: [PATCH 50/80] timezone service --- containers/server-image/Dockerfile | 9 +++++++-- containers/server-image/timezone_alignment.service | 10 ++++++++++ containers/server-image/timezone_alignment.sh | 5 +++++ 3 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 containers/server-image/timezone_alignment.service create mode 100755 containers/server-image/timezone_alignment.sh diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 3e3d24d16080..327d8a4ed1e4 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -9,10 +9,12 @@ ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" # Add distro and product repos COPY add_repos.sh /usr/bin +COPY timezone_alignment.sh /usr/bin RUN sh add_repos.sh # Copy Uyuni setup script -COPY uyuni-setup.service /usr/lib/systemd/system/ +COPY uyuni-setup.service /usr/lib/systemd/system/ +COPY timezone_alignment.service /usr/lib/systemd/system/ COPY remove_unused.sh . RUN echo "rpm.install.excludedocs = yes" >>/etc/zypp/zypp.conf @@ -68,8 +70,11 @@ COPY java_agent.yaml /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml COPY tomcat_jmx.conf /usr/lib/systemd/system/tomcat.service.d/jmx.conf COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf +RUN chmod -R 755 /usr/bin/timezone_alignment.sh + RUN systemctl enable prometheus-node_exporter; \ - systemctl enable uyuni-setup + systemctl enable uyuni-setup; \ + systemctl enable timezone_alignment; # LABELs ARG PRODUCT=Uyuni diff --git a/containers/server-image/timezone_alignment.service b/containers/server-image/timezone_alignment.service new file mode 100644 index 000000000000..d091ff8329f9 --- /dev/null +++ b/containers/server-image/timezone_alignment.service @@ -0,0 +1,10 @@ +[Unit] +Description=Timezone alignment +After=postgresql.service + +[Service] +ExecStart=timezone_alignment.sh +Type=oneshot + +[Install] +WantedBy=multi-user.target diff --git a/containers/server-image/timezone_alignment.sh b/containers/server-image/timezone_alignment.sh new file mode 100755 index 000000000000..9f66b822c86a --- /dev/null +++ b/containers/server-image/timezone_alignment.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [[ ! -z "$TZ" ]]; then + timedatectl set-timezone $TZ +fi From 17107905a9fd003397e66bf1f5f2279fbd3cf170 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 25 Jul 2023 09:39:24 +0200 Subject: [PATCH 51/80] add PRODUCT REPO to add_repos.sh --- containers/server-image/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 327d8a4ed1e4..4ba3afcefea0 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -10,7 +10,7 @@ ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" # Add distro and product repos COPY add_repos.sh /usr/bin COPY timezone_alignment.sh /usr/bin -RUN sh add_repos.sh +RUN sh add_repos.sh ${PRODUCT_REPO} # Copy Uyuni setup script COPY uyuni-setup.service /usr/lib/systemd/system/ From f489ab3337d2926518a135eb359497e920f24df8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 11 Jul 2023 17:45:00 +0200 Subject: [PATCH 52/80] Fix uyuni-setup TO REMOVE --- containers/server-image/uyuni-setup.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/server-image/uyuni-setup.service b/containers/server-image/uyuni-setup.service index 15cfabe46c5c..c96dba67ff51 100644 --- a/containers/server-image/uyuni-setup.service +++ b/containers/server-image/uyuni-setup.service @@ -10,7 +10,7 @@ PassEnvironment=CERT_CNAMES CERT_O CERT_OU CERT_CITY CERT_STATE CERT_COUNTRY CER PassEnvironment=LOCAL_DB MANAGER_DB_NAME MANAGER_DB_HOST MANAGER_DB_PORT MANAGER_DB_CA_CERT MANAGER_DB_PROTOCOL PassEnvironment=MANAGER_ENABLE_TFTP EXTERNALDB_ADMIN_USER EXTERNALDB_ADMIN_PASS EXTERNALDB_PROVIDER PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN MIRROR_PATH -PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT_USER REPORT_DB_PASS REPORT_DB_CA_CERT +PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT REPORT_DB_USER REPORT_DB_PASS REPORT_DB_CA_CERT PassEnvironment=TZ ExecStart=/usr/lib/susemanager/bin/mgr-setup -l /var/log/susemanager_setup.log -s -n ExecStartPost=systemctl disable --now uyuni-setup.service From ffc590c7e9e1a7b5e20a07e064da022c2836af9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 18 Jul 2023 18:10:43 +0200 Subject: [PATCH 53/80] Remove uyuni-setup service from server image --- containers/server-image/Dockerfile | 3 --- containers/server-image/uyuni-setup.service | 17 ----------------- 2 files changed, 20 deletions(-) delete mode 100644 containers/server-image/uyuni-setup.service diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile index 4ba3afcefea0..958765818f84 100644 --- a/containers/server-image/Dockerfile +++ b/containers/server-image/Dockerfile @@ -12,8 +12,6 @@ COPY add_repos.sh /usr/bin COPY timezone_alignment.sh /usr/bin RUN sh add_repos.sh ${PRODUCT_REPO} -# Copy Uyuni setup script -COPY uyuni-setup.service /usr/lib/systemd/system/ COPY timezone_alignment.service /usr/lib/systemd/system/ COPY remove_unused.sh . @@ -73,7 +71,6 @@ COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf RUN chmod -R 755 /usr/bin/timezone_alignment.sh RUN systemctl enable prometheus-node_exporter; \ - systemctl enable uyuni-setup; \ systemctl enable timezone_alignment; # LABELs diff --git a/containers/server-image/uyuni-setup.service b/containers/server-image/uyuni-setup.service deleted file mode 100644 index c96dba67ff51..000000000000 --- a/containers/server-image/uyuni-setup.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Uyuni run setup - -[Install] -WantedBy=multi-user.target - -[Service] -PassEnvironment=MANAGER_USER MANAGER_PASS MANAGER_ADMIN_EMAIL -PassEnvironment=CERT_CNAMES CERT_O CERT_OU CERT_CITY CERT_STATE CERT_COUNTRY CERT_EMAIL CERT_PASS -PassEnvironment=LOCAL_DB MANAGER_DB_NAME MANAGER_DB_HOST MANAGER_DB_PORT MANAGER_DB_CA_CERT MANAGER_DB_PROTOCOL -PassEnvironment=MANAGER_ENABLE_TFTP EXTERNALDB_ADMIN_USER EXTERNALDB_ADMIN_PASS EXTERNALDB_PROVIDER -PassEnvironment=SCC_USER SCC_PASS ISS_PARENT ACTIVATE_SLP MANAGER_MAIL_FROM NO_SSL UYUNI_FQDN MIRROR_PATH -PassEnvironment=REPORT_DB_NAME REPORT_DB_HOST REPORT_DB_PORT REPORT_DB_USER REPORT_DB_PASS REPORT_DB_CA_CERT -PassEnvironment=TZ -ExecStart=/usr/lib/susemanager/bin/mgr-setup -l /var/log/susemanager_setup.log -s -n -ExecStartPost=systemctl disable --now uyuni-setup.service -Type=oneshot From 8944d9a043372be0efcadff1fc5fdbbca3d6c8d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 31 Jul 2023 18:07:06 +0200 Subject: [PATCH 54/80] Remove the server systemd service package uyuniadm is the one generating the systemd unit file for the server container, we don't need it as a package anymore. --- containers/server-systemd-services/README.md | 15 -- .../setup_podman_timezone.sh | 3 - .../uyuni-server-services.config | 41 ---- .../uyuni-server-systemd-services.changes | 1 - .../uyuni-server-systemd-services.spec | 113 ----------- .../uyuni-server.service | 80 -------- .../server-systemd-services/uyuni-server.sh | 175 ------------------ 7 files changed, 428 deletions(-) delete mode 100644 containers/server-systemd-services/README.md delete mode 100644 containers/server-systemd-services/setup_podman_timezone.sh delete mode 100644 containers/server-systemd-services/uyuni-server-services.config delete mode 100644 containers/server-systemd-services/uyuni-server-systemd-services.changes delete mode 100644 containers/server-systemd-services/uyuni-server-systemd-services.spec delete mode 100644 containers/server-systemd-services/uyuni-server.service delete mode 100644 containers/server-systemd-services/uyuni-server.sh diff --git a/containers/server-systemd-services/README.md b/containers/server-systemd-services/README.md deleted file mode 100644 index c1ae371556cd..000000000000 --- a/containers/server-systemd-services/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# General usage - -Start the services by running `systemctl start uyuni-server.service`. - -Edit the `/etc/sysconfig/uyuni-server-systemd-services` file if you need to add more options to the `podman` pod running command. - -# Advanced options - -In order to change the default images registry, namespace and tag, edit the `NAMESPACE` and `TAG` variables in `/etc/sysconfig/uyuni-server-systemd-services` file. -Restart the `uyuni-server` service is required to apply the change. - -# Getting logs - -You can get logs from the `journalctl -xeu uyuni-server.service` services using `journalctl`. -You can also use `podman logs` using the same names. diff --git a/containers/server-systemd-services/setup_podman_timezone.sh b/containers/server-systemd-services/setup_podman_timezone.sh deleted file mode 100644 index 7b48c3f2c7b8..000000000000 --- a/containers/server-systemd-services/setup_podman_timezone.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -HOST_TZ=$(timedatectl | awk '/Time zone:/{print $3}') -sed "s|^TZ=.*$|TZ=$HOST_TZ|" -i /etc/sysconfig/uyuni-server-systemd-services diff --git a/containers/server-systemd-services/uyuni-server-services.config b/containers/server-systemd-services/uyuni-server-services.config deleted file mode 100644 index 323ee90c29c0..000000000000 --- a/containers/server-systemd-services/uyuni-server-services.config +++ /dev/null @@ -1,41 +0,0 @@ -# This file is expected to be found in `/etc/sysconfig/container-server-services.config`, -# the EnvironmentFile services property is pointing there - -# Where to get the images from if not defined otherwise in a service-specific configuration -# It should contain the registry FQDN and path to the server-* images without trailing slash -NAMESPACE=registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni - -# Tag of the images to pull if not defined otherwise in a service-specific configuration -TAG=latest - -# Pass additional parameters to the pod start command. -# -# Example: -# EXTRA_POD_ARGS='--add-host=server.tf.local:192.168.122.254 --add-host=client.tf.local:192.168.122.89' -# Add -p 8000:8000 -p 8001:8001 to enable java remote debugging -EXTRA_POD_ARGS='' - -#all these fields are required if it's a migration and should match with the current migrated instance -REPORT_DB_PASS=pythia_susemanager -# Initial setup configuration options -MANAGER_USER=spacewalk -MANAGER_PASS=spacewalk -MANAGER_ADMIN_EMAIL=galaxy-noise@suse.de -CERT_O=SUSE -CERT_OU=SUSE -CERT_CITY=Nuernberg -CERT_STATE=Bayern -CERT_COUNTRY=DE -CERT_EMAIL=galaxy-noise@suse.de -CERT_PASS=spacewalk -USE_EXISTING_CERTS=N -MANAGER_DB_NAME=susemanager -MANAGER_DB_HOST=localhost -MANAGER_DB_PORT=5432 -MANAGER_DB_PROTOCOL=TCP -MANAGER_ENABLE_TFTP=Y -SCC_USER= -SCC_PASS= -REPORT_DB_HOST=uyuni-server -UYUNI_FQDN=uyuni-server -TZ=Etc/UTC diff --git a/containers/server-systemd-services/uyuni-server-systemd-services.changes b/containers/server-systemd-services/uyuni-server-systemd-services.changes deleted file mode 100644 index b89bb5cefb2b..000000000000 --- a/containers/server-systemd-services/uyuni-server-systemd-services.changes +++ /dev/null @@ -1 +0,0 @@ -- create first draft of uyuni-server-systemd-services diff --git a/containers/server-systemd-services/uyuni-server-systemd-services.spec b/containers/server-systemd-services/uyuni-server-systemd-services.spec deleted file mode 100644 index 6bcd94470a25..000000000000 --- a/containers/server-systemd-services/uyuni-server-systemd-services.spec +++ /dev/null @@ -1,113 +0,0 @@ -# -# spec file for package uyuni-server-systemd-services -# -# Copyright (c) 2022 SUSE LLC -# -# All modifications and additions to the file contributed by third parties -# remain the property of their copyright owners, unless otherwise agreed -# upon. The license for this file, and modifications and additions to the -# file, is the same license as for the pristine package itself (unless the -# license for the pristine package is not an Open Source License, in which -# case the license is the MIT License). An "Open Source License" is a -# license that conforms to the Open Source Definition (Version 1.9) -# published by the Open Source Initiative. - -# Please submit bugfixes or comments via https://bugs.opensuse.org/ -# - -Name: uyuni-server-systemd-services -Summary: Uyuni Server systemd services containers -License: GPL-2.0-only -Group: Applications/Internet -Version: 4.4.1 -Release: 1 -URL: https://github.com/uyuni-project/uyuni -Source0: %{name}-%{version}-1.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-build -BuildArch: noarch -Requires: podman -%if 0%{?suse_version} -Requires(post): %fillup_prereq -%endif -BuildRequires: systemd-rpm-macros - -%description -This package contains systemd services to run the Uyuni server containers using podman. - -%prep -%setup -q - -%build - -%install -install -d -m 755 %{buildroot}/%{_sysconfdir}/uyuni/server -install -d -m 755 %{buildroot}%{_sbindir} - -#TODO currently removed but it can be useful in future -#%if "%{?susemanager_container_images_path}" != "" -#sed 's|^NAMESPACE=.*$|NAMESPACE=%{susemanager_container_images_path}|' -i uyuni-server-services.config -#%endif - -%if !0%{?is_opensuse} -PRODUCT_VERSION=$(echo %{version} | sed 's/^\([0-9]\+\.[0-9]\+\).*$/\1/') -%endif -%if 0%{?rhel} -install -D -m 644 uyuni-server-services.config %{buildroot}%{_sysconfdir}/sysconfig/uyuni-server-systemd-services.config -%else -install -D -m 644 uyuni-server-services.config %{buildroot}%{_fillupdir}/sysconfig.%{name} -%endif - -install -D -m 644 uyuni-server.service %{buildroot}%{_unitdir}/uyuni-server.service -ln -s /usr/sbin/service %{buildroot}%{_sbindir}/rcuyuni-server - -install -m 755 uyuni-server.sh %{buildroot}%{_sbindir}/uyuni-server.sh -install -m 755 setup_podman_timezone.sh %{buildroot}%{_sbindir}/setup_podman_timezone.sh - -%check - -%pre -%if !0%{?rhel} - %service_add_pre uyuni-server.service -%endif - -%post -%if 0%{?suse_version} -%fillup_only -%endif - -%if 0%{?rhel} - %systemd_post uyuni-server.service -%else - %service_add_post uyuni-server -%endif - -%preun -%if 0%{?rhel} - %systemd_preun uyuni-server.service -%else - %service_del_preun uyuni-server -%endif - -%postun -%if 0%{?rhel} - %systemd_postun uyuni-server.service -%else - %service_del_postun uyuni-server -%endif - -%files -%defattr(-,root,root) -%doc README.md -%{_unitdir}/*.service -%{_sbindir}/rcuyuni-* -%if 0%{?rhel} -%{_sysconfdir}/sysconfig/uyuni-server-systemd-services.config -%else -%{_fillupdir}/sysconfig.%{name} -%endif -%{_sysconfdir}/uyuni -%{_sbindir}/uyuni-server.sh -%{_sbindir}/setup_podman_timezone.sh - - -%changelog diff --git a/containers/server-systemd-services/uyuni-server.service b/containers/server-systemd-services/uyuni-server.service deleted file mode 100644 index e3430af2c3dc..000000000000 --- a/containers/server-systemd-services/uyuni-server.service +++ /dev/null @@ -1,80 +0,0 @@ -# container-uyuni-server.service -# autogenerated by Podman 4.3.1 -# Tue Feb 28 17:20:52 CET 2023 - -[Unit] -Description=Uyuni server image container service -Wants=network.target -After=network-online.target -RequiresMountsFor=%t/containers - -[Service] -Environment=PODMAN_SYSTEMD_UNIT=%n -EnvironmentFile=-/etc/sysconfig/uyuni-server-systemd-services -Restart=on-failure -ExecStartPre=/bin/rm \ - -f %t/uyuni-server.pid %t/%n.ctr-id -ExecStartPre=setup_podman_timezone.sh -ExecStart=/usr/bin/podman run \ - --conmon-pidfile %t/uyuni-server.pid \ - --cidfile=%t/%n.ctr-id \ - --cgroups=no-conmon \ - --rm \ - --sdnotify=conmon \ - --cap-add NET_RAW \ - -d \ - --tmpfs /run \ - -p 443:443 \ - -p 80:80 \ - -p 4505:4505 \ - -p 4506:4506 \ - -p 69:69 \ - -p 25151:25151 \ - -p 5432:5432 \ - -p 9100:9100 \ - -p 9187:9187 \ - -p 9800:9800 \ - -v cgroup:/sys/fs/cgroup:rw \ - -v var-lib-cobbler:/var/lib/cobbler \ - -v var-pgsql:/var/lib/pgsql \ - -v var-cache:/var/cache \ - -v var-spacewalk:/var/spacewalk \ - -v var-log:/var/log \ - -v srv-salt:/srv/salt \ - -v srv-www-pub:/srv/www/htdocs/pub \ - -v srv-www-cobbler:/srv/www/cobbler \ - -v srv-www-osimages:/srv/www/os-images \ - -v srv-tftpboot:/srv/tftpboot \ - -v srv-formulametadata:/srv/formula_metadata \ - -v srv-pillar:/srv/pillar \ - -v srv-susemanager:/srv/susemanager \ - -v srv-spacewalk:/srv/spacewalk \ - -v root:/root \ - -v etc-apache2:/etc/apache2 \ - -v etc-rhn:/etc/rhn \ - -v etc-systemd:/etc/systemd/system/multi-user.target.wants \ - -v etc-salt:/etc/salt \ - -v etc-tomcat:/etc/tomcat \ - -v etc-cobbler:/etc/cobbler \ - -v etc-sysconfig:/etc/sysconfig \ - -v etc-tls:/etc/pki/tls \ - -v ca-cert:/etc/pki/trust/anchors/ \ - --env-host \ - --hostname ${UYUNI_FQDN} \ - $EXTRA_POD_ARGS \ - --name uyuni-server ${NAMESPACE}/server:${TAG} -ExecStop=/usr/bin/podman stop \ - --ignore -t 10 \ - --cidfile=%t/%n.ctr-id -ExecStopPost=/usr/bin/podman rm \ - -f \ - --ignore -t 10 \ - --cidfile=%t/%n.ctr-id - -PIDFile=%t/uyuni-server.pid -TimeoutStopSec=180 -TimeoutStartSec=900 -Type=forking - -[Install] -WantedBy=multi-user.target default.target diff --git a/containers/server-systemd-services/uyuni-server.sh b/containers/server-systemd-services/uyuni-server.sh deleted file mode 100644 index dae5f3b96842..000000000000 --- a/containers/server-systemd-services/uyuni-server.sh +++ /dev/null @@ -1,175 +0,0 @@ -#!/bin/bash - -############################# SETUP ############################# - -set -Eeuo pipefail -trap cleanup SIGINT SIGTERM ERR EXIT - -script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) - -cleanup() { - trap - SIGINT SIGTERM ERR EXIT -} - -msg() { - echo >&2 -e "${1-}" -} - -die() { - local msg=$1 - local code=${2-1} - msg "$msg" - exit "$code" -} - -############################# ROOT ############################# - -usage_root() { - cat </etc/sysconfig/uyuni-server-services.config < Date: Tue, 25 Jul 2023 15:37:42 +0200 Subject: [PATCH 55/80] Remove the setup config part of the server helm chart --- containers/server-helm/templates/config.yaml | 41 ---------------- .../server-helm/templates/deployment.yaml | 9 ++-- containers/server-helm/values.yaml | 47 +------------------ 3 files changed, 5 insertions(+), 92 deletions(-) delete mode 100644 containers/server-helm/templates/config.yaml diff --git a/containers/server-helm/templates/config.yaml b/containers/server-helm/templates/config.yaml deleted file mode 100644 index 5b7cb5ded706..000000000000 --- a/containers/server-helm/templates/config.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: v1 -data: - MANAGER_USER: {{ .Values.uyuniUser | default "spacewalk" }} - MANAGER_ADMIN_EMAIL: {{ .Values.uyuniAdminEmail }} - MANAGER_DB_NAME: "susemanager" - MANAGER_DB_HOST: "localhost" - MANAGER_DB_PORT: "5432" - MANAGER_DB_PROTOCOL: "TCP" - MANAGER_ENABLE_TFTP: '{{ .Values.uyuniEnableTftp | default true | ternary "Y" "N" }}' - REPORT_DB_HOST: {{ .Values.reportDbHost | default .Values.fqdn }} -{{- if .Values.reportDbPort }} - REPORT_DB_PORT: {{ .Values.reportDbPort }} -{{- end }} -{{- if .Values.reportDbUser }} - REPORT_DB_USER: {{ .Values.reportDbUser }} -{{- end }} -{{- if .Values.reportDbPass }} - REPORT_DB_PASS: {{ .Values.reportDbPass }} -{{- end }} -{{- if .Values.reportDbName }} - REPORT_DB_NAME: {{ .Values.reportDbName }} -{{- end }} - NO_SSL: "Y" - MANAGER_MAIL_FROM: {{ .Values.uyuniMailFrom }} - UYUNI_FQDN: {{ .Values.fqdn }} - TZ: {{ .Values.timeZone | default "Etc/UTC" }} -kind: ConfigMap -metadata: - name: uyuni-config - namespace: "{{ .Release.Namespace }}" ---- -apiVersion: v1 -data: - MANAGER_PASS: {{ .Values.uyuniPass | default "spacewalk" | b64enc }} - SCC_USER: {{ .Values.sccUser | default "" | b64enc }} - SCC_PASS: {{ .Values.sccPass | default "" | b64enc }} -kind: Secret -metadata: - name: uyuni-secret - namespace: "{{ .Release.Namespace }}" - diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml index 408f2f2d4347..67e8a4bbd838 100644 --- a/containers/server-helm/templates/deployment.yaml +++ b/containers/server-helm/templates/deployment.yaml @@ -389,16 +389,13 @@ spec: - containerPort: 8000 - containerPort: 8001 {{- end }} -{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} env: + - name: TZ + value: {{ .Values.timezone | default "Etc/UTC" }} +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} - name: MIRROR_PATH value: /mirror {{- end }} - envFrom: - - configMapRef: - name: uyuni-config - - secretRef: - name: uyuni-secret volumeMounts: - mountPath: /run name: tmp diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml index be96e1c2837c..cf85d631e853 100644 --- a/containers/server-helm/values.yaml +++ b/containers/server-helm/values.yaml @@ -66,48 +66,5 @@ ingress: "traefik" ## cert-manager.io/cluster-issuer: uyuniIssuer # ingressSslAnnotations: -## uyuniUser is the login of the user accessing the database -uyuniUser: "spacewalk" -## uyuniPass is the password of the user accessing the database -uyuniPass: "spacewalk" - -## uyuniEnableTftp toggles TFTP service -uyuniEnableTftp: true - -## reportDbHost is the report database FQDN. -## Only set when using an external report database -# reportDbHost: "uyuni.local" - -## reportDbPort is the report database port to connect to. -## Only set when using an external report database -# reportDbPort: "5432" - -## reportDbUser is the username to use to connect to the report database. -## Only set when using an external report database -# reportDbUser: "" - -## reportDbPass is the password to use to connect to the report database. -## Only set when using an external report database -# reportDbPass: " - -## reportDbName is the report database name. -## Only set when using an external report database -# reportDbName: "" - -# TODO Add reportDbCaCert value and handle it - -## uyuniAdminEmail is the email where all Uyuni notifications are sent -uyuniAdminEmail: "galaxy-noise@suse.de" -## uyuniMailFrom is the address in the from field of the emails sent by Uyuni -uyuniMailFrom: "notifications@uyuni.local" - -## fqdn is the user accessible fully qualified domain name of the uyuni server -fqdn: "uyuni.local" - -## TZ is the timezone -TZ: "Etc/UTC" -## sccUser is the SUSE Customer Center login -# sccUser: "" - -## sccPass is the SUSE Customer Center password -# sccPass: "" +# The time zone to set in the containers +timezone: "Etc/UTC" From 81802e26fc0d54a1bb2194c9b13822044c034366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 31 Jul 2023 17:31:39 +0200 Subject: [PATCH 56/80] Update containerized server README with uyuniadm --- containers/doc/server-kubernetes/README.md | 295 +++++++++++++-------- 1 file changed, 179 insertions(+), 116 deletions(-) diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md index 33fb428efb2e..65298462a83a 100644 --- a/containers/doc/server-kubernetes/README.md +++ b/containers/doc/server-kubernetes/README.md @@ -1,15 +1,17 @@ -# Running the server-image on kubernetes +# Prerequisites -## Prerequisites +The following assumes you have either a single-node RKE2 or K3s cluster ready or a server with Podman installed and enough resources for the Uyuni server. +When installing on a Kubernetes cluster, it also assumes that `kubectl` and `helm` are installed on the server and configured to connect to the cluster. -The following assumes you have a single-node rke2 or k3s cluster ready with enough resources for the Uyuni server. -It also assumes that `kubectl` and `helm` are installed on your machine and configured to connect to the cluster. +# Preparing the installation -## Setting up the resources +## Podman specific setup -### RKE2 specific setup +There is nothing to prepare for a Podman installation. -Copy the `rke2-ingress-nginx-config.yaml` file to `/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml` on your rke2 node. +## RKE2 specific setup + +Copy the `rke2-ingress-nginx-config.yaml` file to `/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml` on your RKE2 node. Wait for the ingress controller to restart. Run this command to watch it restart: @@ -17,10 +19,10 @@ Run this command to watch it restart: watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=rke2-ingress-nginx ``` -### K3s specific setup +## K3s specific setup -Copy the `k3s-traefik-config.yaml` file to `/var/lib/rancher/k3s/server/manifests/` on your k3s node. +Copy the `k3s-traefik-config.yaml` file to `/var/lib/rancher/k3s/server/manifests/` on your K3s node. Wait for trafik to restart. Run this commant to watch it restart: @@ -28,200 +30,265 @@ Run this commant to watch it restart: watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=traefik ``` -***Offline installation:*** with k3s it is possible to preload the container images and avoid it to be fetched from a registry. +# Offline installation + + +## For K3s + +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. For example: +⚠️ **TODO**: Verify instructions ``` +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + podman pull quay.io/jetstack/$image + podman save --output $image.tar quay.io/jetstack/$image:latest +done + podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest -podman save --output server-image.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + +podman save --output server.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest ``` or +⚠️ **TODO**: Verify instructions ``` -skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server-image.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + skopeo copy docker://quay.io/jetstack/$image:latest docker-archive:$image.tar:quay.io/jetstack/$image:latest +done + +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest ``` -Transfer the resulting `server-image.tar` to the k3s node and load it using the following command: +Copy the `cert-manager` and `uyuni/server` helm charts locally: + +⚠️ **TODO**: verify instructions ``` -k3s ctr images import server-image.tar +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server ``` -In order to tell k3s to not pull the image, add `imagePullPolicy: Never` to all `initContainer`s and `container` in the `server.yaml` file: +Transfer the resulting `*.tar` images to the K3s node and load them using the following command: ``` -sed 's/^\( \+\)image:\(.*\)$/\1image: \2\n\1imagePullPolicy: Never/' -i server.yaml +for archive in `ls *.tar`; do + k3s ctr images import $archive +done ``` -### Migrating from a regular server +In order to tell K3s to not pull the images, set the image pull policy needs to be set to `Never`. +This needs to be done for both Uyuni and cert-manager helm charts. -Stop the source services: +For the Uyuni helm chart, set the `pullPolicy` chart value to `Never` by passing a `--helm-uyuni-values=uyuni-values.yaml` parameter to `uyuniadm install` with the following `uyuni-values.yaml` file content: ``` -spacewalk-service stop -systemctl stop postgresql +pullPolicy: Never ``` -Create a password-less SSH key and create a kubernetes secret with it: +For the cert-manager helm chart, create a `cert-values.yaml` file with the following content and pass `--helm-certmanager-values=values.yaml` parameter to `uyuniadm install`: ``` -ssh-keygen -kubectl create secret generic migration-ssh-key --from-file=id_rsa=$HOME/.ssh/id_rsa --from-file=id_rsa.pub=$HOME/.ssh/id_rsa.pub +image: + pullPolicy: Never ``` -Add the generated public key to the server to migrate authorized keys. -Run the migration job: +⚠️ **TODO**: verify the file names +To use the downloaded helm charts instead of the default ones, pass `--helm-uyuni-chart=server.tgz` and `--helm-certmanager-chart=cert-manager.tgz` or add the following to the `uyuniadm` configuration file: ``` -kubectl apply -f migration-job.yaml +helm: + uyuni: + chart: server.tgz + values: uyuni-values.yaml + certmanager: + chart: cert-manager.tgz + values: cert.values.yaml ``` -To follow the progression of the process, check the generated container log: +## For RKE2 -``` -kubectl logs (kubectl get pod -ljob-name=uyuni-migration -o custom-columns=NAME:.metadata.name --no-hea -ders) -``` +RKE2 doesn't allow to preload images on the nodes. +Instead, use `skopeo` to import the images in a local registry and use this one to install. -Once done, both the job and its pod will remain until the user deletes them to allow checking logs. +Copy the `cert-manager` and `uyuni/server` helm charts locally: -Proceed with the next steps. +⚠️ **TODO**: verify instructions -***Hostname***: this procedure doesn't handle any hostname change. -Certificates migration also needs to be documented, but that can be guessed for now with the instructions to setup a server from scratch. +``` +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server +``` +⚠️ **TODO** Prepare instructions +``` +# TODO Copy the cert-manager and uyuni images +# TODO Set the uyuniadm parameters +``` -### CA certificates using `rhn-ssl-tool` +## For Podman -On the cluster node, prepare the volume with the CA password in the `/var/uyuni/ssl-build/password` file: +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: ``` -mkdir -p /var/uyuni/ssl-build -chmod 700 /var/uyuni -vim /var/uyuni/ssl-build/password -chmod 500 /var/uyuni/ssl-build/password +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +podman save --output server-image.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest ``` -Edit the `rhn-ssl-tool.yaml` file to match your FQDN and subject. -Generate the CA certificate and server certificate and key using `rhn-ssl-tool` by running: +or ``` -kubectl apply -f rhn-ssl-tool.yaml +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server-image.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest ``` -**Note** that it pulls the big server container image and thus takes quite some time to complete. -Wait for the generated pod to be in `COMPLETED` state before continuing. - -Create the TLS secret holding the server SSL certificates by running this on the cluster node: +Transfer the resulting `server-image.tar` to the server and load it using the following command: ``` -kubectl create secret tls uyuni-cert --key /var/uyuni/ssl-build//server.key --cert /var/uyuni/ssl-build//server.crt +podman load -i server-image.tar ``` -Create a `ConfigMap` with the CA certificate by running this on the cluster node: +# Migrating from a regular server -``` -kubectl create configmap uyuni-ca --from-file=ca.crt=/var/uyuni/ssl-build/RHN-ORG-TRUSTED-SSL-CERT -``` +In order to migrate a regular Uyuni server to containers, a new machine is required: it is not possible to perform an in-place migration. +The old server is designated as the source server and the new machine is the destination one. + +The migration procedure does not perform any hostname rename. +The fully qualified domain name will be the same on the new server than on the source one. +This means the DNS records need to be adjusted after the migration to use the new server. -### CA certificates using Cert-Manager +## Preparing -Install cert-manager on the cluster. -The [default static install](https://cert-manager.io/docs/installation/#default-static-install) is enoughfor the testing use case: +### Stop the source server + +Stop the source services: ``` -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +spacewalk-service stop +systemctl stop postgresql ``` -`cert-manager` now needs to be configured to issue certificates. -The following instructions will document setting up a self signed CA and the corresponding issuers. -Check the [documentation](https://cert-manager.io/docs/configuration/acme/) on how to set up other issuers like Let's Encrypt. +### Preparing the SSH connection -Edit the `cert-manager-selfsigned-issuer.yaml` file to match the server FQDN and subject and then apply it: +The `SSH` configuration and agent should be ready on the host for a password less connection to the source server. +The migration script only uses the source server fully qualified domain name in the SSH command. +This means that every other configuration required to connect needs to be defined in the `~/.ssh/config` file. -``` -kubectl apply -f cert-manager-selfsigned-issuer.yaml -``` +For a password less connection, the migration script will use an SSH agent on the server. +If none is running yet, run `eval $(ssh-agent)`. +Add the SSH key to the running agent using `ssh-add /path/to/the/private/key`. +The private key password will be prompted. -For security reason, copy the CA certificate into a separate config map, to not mount the CA secret on the pod: +### Prepare for Kubernetes -``` -kubectl get secret uyuni-ca -o=jsonpath='{.data.ca\.crt}' | base64 -d >ca.crt -kubectl create configmap uyuni-ca --from-file=ca.crt -rm ca.crt -``` +Since the migration job will start the container from scratch the Persistent Volumes need to be defined before running the `uyuniadm migrate command`. +Refer to the installation section for more details on the volumes preparation. + +## Migrating -Run the following command to append the ingress annotation to use the new CA when applying the helm chart later: +Run the following command to install a new Uyuni server from the source one after replacing the `uyuni.source.fqdn` by the proper source server FQDN: +This command will synchronize all the data from the source server to the new one: this can take time! ``` -cat >values.yaml << EOF -ingressSslAnnotations: - cert-manager.io/issuer: uyuni-ca-issuer -EOF +uyuniadm migrate uyuni.source.fqdn ``` +## Notes for Kubernetes + +⚠️ **TODO** Revisit this section! + +Once done, both the job and its pod will remain until the user deletes them to allow checking logs. + +Certificates migration also needs to be documented, but that can be guessed for now with the instructions to setup a server from scratch. + + +# Installing Uyuni + +## Volumes preparation -### Deploy the pod and its resources +### For Kubernetes +⚠️ **TODO** Document this -Change the hostname associated to the persistent volumes to match the hostname of your node: +### For Podman + +⚠️ **TODO** Document this + +## Installing + +The installation using `uyuniadm install` will ask for the password if those are not provided using the command line parameters or the configuration file. +For security reason, using command line parameters to specify passwords should be avoided: use the configuration file with proper permissions instead. + +Prepare an `uyuniadm.yaml` file like the following: ``` -sed 's/uyuni-dev/youhostname/' -i pvs.yaml +db: + password: MySuperSecretDBPass +cert: + password: MySuperSecretCAPass ``` -Define the persistent volumes by running `kubectl apply -f pvs.yaml`. -The volumes are folders on the cluster node and need to be manually created: +To dismiss the email prompts add the `email` and `emailFrom` configurations to the above file or use the `--email` and `--emailFrom` parameters for `uyuniadm install`. + +Run the following command to install after replacing the `uyuni.example.com` by the FQDN of the server to install: ``` -mkdir -p `kubectl get pv -o jsonpath='{.items[*].spec.local.path}'` +uyuniadm -c uyuniadm.yaml install --image registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server uyuni.example.com ``` -Run the following to add the helm chart configuration values but replace the `uyuni-dev.world-co.com` by your server's FQDN: +### Podman specific configuration + +Additional parameters can be passed to Podman using `--podman-arg` parameters or configuration like the following in `uyuniadm.yaml`: ``` -CAT >>values.yaml << EOF -repository: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni -storageClass: local-storage -exposeJavaDebug: true -uyuniMailFrom: notifications@uyuni-dev.world-co.com -fqdn: uyuni-dev.world-co.com -EOF +podman: + arg: + - -p 8000:8000 + - -p 8001:8001 ``` -If deploying on `rke2`, add the `ingress: nginx` line to the `values.yaml` file. +is equivalent to passing `--podman-arg "-p 8000:8000" --podman-arg "-p 8001:8001"` to `uyuniadm install` -You can also set more variables like `sccUser` or `sccPass`. -Check the [server-helm/values.yaml](https://github.com/uyuni-project/uyuni/blob/server-container/containers/server-helm/values.yaml) file for the complete list. +This can be usefull to expose ports like the Java debugging ones or mount additional volumes. + +### Kubernetes specific configuration -Install the helm chart from the source's `containers` folder: +The `uyuniadm install` command comes with parameters and thus configuration values for advanced helm chart configuration. +To pass additional values to the Uyuni helm chart at installation time, use the `--helm-uyuni-values chart-values.yaml` parameter or a configuration like the following: ``` -helm install uyuni server-helm -f values +helm: + uyuni: + values: chart-values.yaml ``` -Note that the Helm chart installs a deployment with one replica. -The pod name is automatically generated by kubernetes and changes at every start. +The path set as value for this configuration is a YAML file passed to the Uyuni Helm chart. +Be aware that some of the values in this file will be overriden by the `uyuniadm install` parameters. + +For example, to expose the Java debugging ports, add the `exposeJavaDebug: true` line to the helm chart values file. +You can also set more variables like `sccUser` or `sccPass`. +Check the [server-helm/values.yaml](https://github.com/uyuni-project/uyuni/blob/server-container/containers/server-helm/values.yaml) file for the complete list. -The pod takes a while to start as it needs to initialize the mounts and run the setup. -Run `kubectl get pod -lapp=uyuni` and wait for it to be in `RUNNING` state. -Even after this, give it time to complete the setup during first boot. +If deploying on RKE2, add the `ingress: nginx` line to the Helm chart values file. + +Note that the Helm chart installs a deployment with one replica. +The pod name is automatically generated by Kubernetes and changes at every start. -You can monitor the progress of the setup with `kubectl exec $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}) -- tail -f /var/log/susemanager_setup.log` -## Using the pod +# Using Uyuni in containers -To getting a shell in the pod run `kubectl exec -ti $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}) -- sh`. -Note that the part after the `--` can be any command to run inside the server. +To getting a shell in the pod run `uyunictl exec -ti bash`. +Note that this command can be use to run any command to run inside the server like `uyunictl exec tail /var/log/rhn/rhn_web_ui.log` -To copy files to the server, use the `kubectl cp $(kubectl get pod -lapp=uyuni -o jsonpath={.items[0].metadata.name}):` command. -Run `kubectl cp --help` for more details on how to use it. +To copy files to the server, use the `uyunictl cp server:` command. +Conversely to copy files from the server use `uyunictl cp server: `. -## Developping with the pod +# Developping with the containers -### Deploying code +## Deploying code To deploy java code on the pod change to the `java` directory and run: @@ -233,7 +300,7 @@ In case you changed the pod namespace, pass the corresponding `-Ddeploy.namespac **Note** To deploy TSX or Salt code, use the `deploy-static-resources-kube` and `deploy-salt-files-kube` tasks of the ant file. -### Attaching a java debugger +## Attaching a java debugger First enable the JDWP options in both tomcat and taskomatic using the following command: @@ -249,16 +316,12 @@ ant -f manager-build.xml restart-tomcat-kube restart-taskomatic-kube The debugger can now be attached to the usual ports (8000 for tomcat and 8001 for taskomatic) on the host FQDN. -## Throwing everything away +# Uninstalling -If you want to create from a fresh pod, run `helm uninstall uyuni`. - -Then run this command on the cluster node to cleanup the volumes: +To remove everything including the volumes, run the following command: ``` -for v in `ls /var/uyuni/`; do - rm -r /var/uyuni/$v; mkdir /var/uyuni/$v -done +uyuniadm uninstall --purge-volumes ``` -To create the pod again, just run the Helm install again and wait. +Note that `cert-manager` will not be uninstalled if it was not installed by `uyuniadm`. From a996c6de92f6d1d5c6623bc7c01b27d3e5274f18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 2 Aug 2023 10:36:19 +0200 Subject: [PATCH 57/80] Remove uyuni-server-systemd-services releng definition --- rel-eng/packages/uyuni-server-systemd-services | 1 - 1 file changed, 1 deletion(-) delete mode 100644 rel-eng/packages/uyuni-server-systemd-services diff --git a/rel-eng/packages/uyuni-server-systemd-services b/rel-eng/packages/uyuni-server-systemd-services deleted file mode 100644 index a44987f68204..000000000000 --- a/rel-eng/packages/uyuni-server-systemd-services +++ /dev/null @@ -1 +0,0 @@ -4.4.1-1 containers/server-systemd-services/ From ec19b6eec1471fd6da6883fddab456ac609986ee Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 3 Aug 2023 12:12:02 +0200 Subject: [PATCH 58/80] move /srv/www/htdoc/pub/repositories to /usr/share/susemanager/gpg/repositories (#7352) --- susemanager/susemanager.spec | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 5ea49028fd0a..8a5afeb9dc1c 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -30,7 +30,6 @@ %global salt_group root %global serverdir %{_sharedstatedir} %global wwwroot %{_localstatedir}/www -%global wwwdocroot %{wwwroot}/html %endif %if 0%{?suse_version} @@ -41,9 +40,10 @@ %global salt_group salt %global serverdir /srv %global wwwroot %{serverdir}/www -%global wwwdocroot %{wwwroot}/htdocs %endif +%global reporoot %{_datarootdir}/susemanager/gpg/ + %global debug_package %{nil} Name: susemanager @@ -156,6 +156,7 @@ Requires: spacewalk-backend-sql Requires: spacewalk-common Requires: susemanager-build-keys Requires: susemanager-sync-data +Requires: uyuni-build-keys BuildRequires: docbook-utils %description tools @@ -191,11 +192,11 @@ make -C src install PREFIX=$RPM_BUILD_ROOT PYTHON_BIN=%{pythonX} MANDIR=%{_mandi install -d -m 755 %{buildroot}/%{wwwroot}/os-images/ # empty repo for rhel base channels -mkdir -p %{buildroot}%{wwwdocroot}/pub/repositories/ -cp -r pub/empty %{buildroot}%{wwwdocroot}/pub/repositories/ +mkdir -p %{buildroot}%{reporoot}/repositories/ +cp -r pub/empty %{buildroot}%{reporoot}/repositories/ # empty repo for Ubuntu base fake channel -cp -r pub/empty-deb %{buildroot}%{wwwdocroot}/pub/repositories/ +cp -r pub/empty-deb %{buildroot}%{reporoot}/repositories/ # YaST configuration mkdir -p %{buildroot}%{_datadir}/YaST2/clients @@ -324,11 +325,10 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{pythonsmroot}/susemanager %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager -%dir %{wwwdocroot}/pub -%dir %{wwwdocroot}/pub/repositories -%dir %{wwwdocroot}/pub/repositories/empty -%dir %{wwwdocroot}/pub/repositories/empty/repodata -%dir %{wwwdocroot}/pub/repositories/empty-deb +%dir %{reporoot}/repositories +%dir %{reporoot}/repositories/empty +%dir %{reporoot}/repositories/empty/repodata +%dir %{reporoot}/repositories/empty-deb %config(noreplace) %{_sysconfdir}/logrotate.d/susemanager-tools %{_prefix}/share/rhn/config-defaults/rhn_*.conf %attr(0755,root,root) %{_bindir}/mgr-salt-ssh @@ -351,8 +351,8 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %{_datadir}/susemanager/__pycache__/ %endif %{_mandir}/man8/mgr-sync.8* -%{wwwdocroot}/pub/repositories/empty/repodata/*.xml* -%{wwwdocroot}/pub/repositories/empty-deb/Packages -%{wwwdocroot}/pub/repositories/empty-deb/Release +%{reporoot}/repositories/empty/repodata/*.xml* +%{reporoot}/repositories/empty-deb/Packages +%{reporoot}/repositories/empty-deb/Release %changelog From ad7515916cda18e3d019c6fbd5c1e1ce43f4bcb7 Mon Sep 17 00:00:00 2001 From: Dominik Gedon Date: Mon, 7 Aug 2023 14:39:16 +0200 Subject: [PATCH 59/80] QE: Fix Uyuni reposync for openSUSE Leap 15.4 (#7374) --- testsuite/features/support/commonlib.rb | 7 ++++++- testsuite/features/support/constants.rb | 11 +++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index 3d1e15892803..2285a56b98db 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -30,7 +30,12 @@ def generate_temp_file(name, content) # This is a safety net only, the best thing to do is to not start the reposync at all. def compute_channels_to_leave_running # keep the repos needed for the auto-installation tests - do_not_kill = CHANNEL_TO_SYNCH_BY_OS_VERSION['default'] + do_not_kill = + if $product == 'Uyuni' + CHANNEL_TO_SYNCH_BY_OS_VERSION['15.4'] + else + CHANNEL_TO_SYNCH_BY_OS_VERSION['default'] + end [$minion, $build_host, $ssh_minion, $rhlike_minion].each do |node| next unless node os_version = node.os_version diff --git a/testsuite/features/support/constants.rb b/testsuite/features/support/constants.rb index 68ea24bcc433..46b6c66b7b53 100644 --- a/testsuite/features/support/constants.rb +++ b/testsuite/features/support/constants.rb @@ -428,6 +428,17 @@ res8-manager-tools-pool-x86_64 res8-manager-tools-updates-x86_64 sll8-uyuni-client-x86_64 + ], + '15.4' => + %w[ + opensuse_leap15_4-x86_64 + opensuse_leap15_4-x86_64-non-oss + opensuse_leap15_4-x86_64-non-oss-updates + opensuse_leap15_4-x86_64-updates + opensuse_leap15_4-x86_64-backports-updates + opensuse_leap15_4-x86_64-sle-updates + uyuni-proxy-devel-leap-x86_64 + opensuse_leap15_4-uyuni-client-x86_64 ] }.freeze From 0eccf12f8d9a9a8497a6d24729f578c641ec2a26 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Mon, 7 Aug 2023 19:00:43 +0200 Subject: [PATCH 60/80] move /srv/www/htdoc/pub/repositories to /usr/share/susemanager/gpg/repositories missing folder --- susemanager/susemanager.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 8a5afeb9dc1c..e05f9a43526a 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -325,6 +325,7 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{pythonsmroot}/susemanager %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager +%dir %{reporoot} %dir %{reporoot}/repositories %dir %{reporoot}/repositories/empty %dir %{reporoot}/repositories/empty/repodata From c1fda9ce99e04acce2bdb55afb06e3310cee6c09 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 8 Aug 2023 11:33:14 +0200 Subject: [PATCH 61/80] fix empty repo rewrite rule --- susemanager/empty-repo.conf | 2 ++ susemanager/susemanager.spec | 4 ++++ 2 files changed, 6 insertions(+) create mode 100644 susemanager/empty-repo.conf diff --git a/susemanager/empty-repo.conf b/susemanager/empty-repo.conf new file mode 100644 index 000000000000..82b468721cff --- /dev/null +++ b/susemanager/empty-repo.conf @@ -0,0 +1,2 @@ +RewriteRule ^/pub/repositories/empty/(.*)$ /gpg/repositories/empty/$1 [L,PT] +RewriteRule ^/pub/repositories/empty-deb/(.*)$ /gpg/repositories/empty-deb/$1 [L,PT] diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index e05f9a43526a..13644c1f166b 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -190,6 +190,7 @@ install -m 0644 etc/logrotate.d/susemanager-tools %{buildroot}/%{_sysconfdir}/lo install -m 0644 etc/slp.reg.d/susemanager.reg %{buildroot}/%{_sysconfdir}/slp.reg.d make -C src install PREFIX=$RPM_BUILD_ROOT PYTHON_BIN=%{pythonX} MANDIR=%{_mandir} install -d -m 755 %{buildroot}/%{wwwroot}/os-images/ +install empty-repo.conf $RPM_BUILD_ROOT/etc/apache2/conf.d/empty-repo.conf # empty repo for rhel base channels mkdir -p %{buildroot}%{reporoot}/repositories/ @@ -330,6 +331,8 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{reporoot}/repositories/empty %dir %{reporoot}/repositories/empty/repodata %dir %{reporoot}/repositories/empty-deb +%dir /etc/apache2 +%dir /etc/apache2/conf.d %config(noreplace) %{_sysconfdir}/logrotate.d/susemanager-tools %{_prefix}/share/rhn/config-defaults/rhn_*.conf %attr(0755,root,root) %{_bindir}/mgr-salt-ssh @@ -355,5 +358,6 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %{reporoot}/repositories/empty/repodata/*.xml* %{reporoot}/repositories/empty-deb/Packages %{reporoot}/repositories/empty-deb/Release +/etc/apache2/conf.d/empty-repo.conf %changelog From 11af2506b257a29e5452f1ee5cf4c617c414917f Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 8 Aug 2023 11:42:28 +0200 Subject: [PATCH 62/80] fix susemanager-tools --- susemanager/susemanager.spec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 13644c1f166b..ec684d3c25cc 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -190,7 +190,8 @@ install -m 0644 etc/logrotate.d/susemanager-tools %{buildroot}/%{_sysconfdir}/lo install -m 0644 etc/slp.reg.d/susemanager.reg %{buildroot}/%{_sysconfdir}/slp.reg.d make -C src install PREFIX=$RPM_BUILD_ROOT PYTHON_BIN=%{pythonX} MANDIR=%{_mandir} install -d -m 755 %{buildroot}/%{wwwroot}/os-images/ -install empty-repo.conf $RPM_BUILD_ROOT/etc/apache2/conf.d/empty-repo.conf +mkdir -p %{buildroot}/etc/apache2/conf.d +install empty-repo.conf %{buildroot}/etc/apache2/conf.d/empty-repo.conf # empty repo for rhel base channels mkdir -p %{buildroot}%{reporoot}/repositories/ From 4f987599b31ef4523215ce32960b8292cd83efa0 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 2 Aug 2023 11:40:38 +0200 Subject: [PATCH 63/80] remove client_config_update --- spacewalk/certs-tools/Makefile.certs | 10 +- spacewalk/certs-tools/client_config_update.py | 216 ------------------ spacewalk/certs-tools/mgr-bootstrap.sgml | 1 - spacewalk/certs-tools/rhn_bootstrap.py | 44 +++- ...es.mbussolotto.remove_client_config_update | 1 + .../certs-tools/spacewalk-certs-tools.spec | 3 +- spacewalk/certs-tools/spacewalk-ssh-push-init | 4 +- 7 files changed, 46 insertions(+), 233 deletions(-) delete mode 100755 spacewalk/certs-tools/client_config_update.py create mode 100644 spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update diff --git a/spacewalk/certs-tools/Makefile.certs b/spacewalk/certs-tools/Makefile.certs index 65513d5ad06a..8cc42ee4ca1b 100644 --- a/spacewalk/certs-tools/Makefile.certs +++ b/spacewalk/certs-tools/Makefile.certs @@ -18,7 +18,7 @@ SUBDIR = certs FILES = __init__ rhn_ssl_tool sslToolCli sslToolConfig sslToolLib \ - timeLib rhn_bootstrap rhn_bootstrap_strings client_config_update \ + timeLib rhn_bootstrap rhn_bootstrap_strings \ mgr_ssl_cert_setup INSTALL_ROOT_FILES = gen-rpm.sh sign.sh update-ca-cert-trust.sh @@ -51,17 +51,9 @@ install :: $(SBINFILES) $(BINFILES) $(PYBINFILES) $(MANS) $(PREFIX)/$(MANDIR) $(INSTALL_BIN) $(f) $(PREFIX)$(BINDIR)/$(f) ; ) $(foreach f,$(PYBINFILES), \ $(INSTALL_BIN) $(f) $(PREFIX)$(BINDIR)/$(f)-$(PYTHONVERSION) ; ) - -install :: instClientScript @$(foreach f,$(INSTALL_ROOT_FILES), \ $(INSTALL_DATA) $(f) $(PREFIX)$(ROOT)/$(SUBDIR)/$(f) ; ) -# note: this file is in two places. One in the RPM and one in pub/bootstrap/ -instClientScript: $(PUB_BOOTSTRAP_DIR)/client_config_update.py - -$(PUB_BOOTSTRAP_DIR)/client_config_update.py : $(PREFIX)/$(PUB_BOOTSTRAP_DIR) client_config_update.py - install -m 0755 client_config_update.py $(PREFIX)/$@ - %.$(MANSECT) : %.sgml /usr/bin/docbook2man $< diff --git a/spacewalk/certs-tools/client_config_update.py b/spacewalk/certs-tools/client_config_update.py deleted file mode 100755 index bf8bebf2ede3..000000000000 --- a/spacewalk/certs-tools/client_config_update.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/python -u -# -# Copyright (c) 2008--2013 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. -# -# Red Hat trademarks are not licensed under GPLv2. No permission is -# granted to use or replicate Red Hat trademarks that are incorporated -# in this software or its documentation. -# -# key=value formatted "config file" mapping script -# -# NOT TO BE USED DIRECTLY -# This is called by a script generated by the rhn-bootstrap utility. -# -# Specifically engineered with the RHN Update Agent configuration files -# in mind though it is relatively generic in nature. -# -# Author: Todd Warner -# - -""" -Client configuration mapping script that writes to an SUSE Manager Update Agent-type -config file(s) - -I.e., maps a file with SUSE Manager Update Agent-like key=value pairs e.g., -serverURL=https://test-satellite.example.redhat.com/XMLRPC -enableProxy=0 -sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT - -And maps that to the client's configuration files. - -------------- -To map new settings to a file that uses the format key=value, where -key[comment]=value is a comment line you do this (e.g., mapping -key=value pairs to /etc/sysconfig/rhn/up2date): - - 1. edit a file (e.g., 'client-config-overrides.txt'), inputing new key=value pairs - to replace in config file (e.g., /etc/sysconfig/rhn/up2date). - Specifically: -serverURL=https://test-satellite.example.redhat.com/XMLRPC - - 2. ./client_config_update.py /etc/sysconfig/rhn/up2date client-config-overrides.txt - -That's all there is to it. - -If you are running an older RHN Update Agent, the rhn_register file can be -mapped as well: - - ./client_config_update.py /etc/sysconfig/rhn/rhn_register client-config-overrides.txt -""" - - -from __future__ import print_function -import os -import sys -import tempfile - -DEFAULT_CLIENT_CONFIG_OVERRIDES = 'client-config-overrides.txt' - -RHN_REGISTER = "/etc/sysconfig/rhn/rhn_register" -UP2DATE = "/etc/sysconfig/rhn/up2date" - - -def _parseConfigLine(line): - """parse a line from a config file. Format can be either "key=value\n" - or "whatever text\n" - - return either: - (key, value) - or - None - The '\n' is always stripped from the value. - """ - - kv = line.decode('utf8').split('=') - if len(kv) < 2: - # not a setting - return None - - if len(kv) > 2: - # '=' is part of the value, need to rejoin it. - kv = [kv[0], '='.join(kv[1:])] - - if kv[0].find('[comment]') > 0: - # comment; not a setting - return None - - # it's a setting, trim the '\n' and return the (key, value) pair. - kv[0] = kv[0].strip() - kv[1] = kv[1].strip() - return tuple(kv) - -def readConfigFile(configFile): - "read in config file, return dictionary of key/value pairs" - - fin = open(configFile, 'rb') - - d = {} - - for line in fin.readlines(): - kv = _parseConfigLine(line) - if kv: - d[kv[0]] = kv[1] - return d - - -def dumpConfigFile(configFile): - "print out dictionary of key/value pairs from configFile" - - import pprint - pprint.pprint(readConfigFile(configFile)) - - -def mapNewSettings(configFile, dnew): - fo = tempfile.TemporaryFile(prefix = '/tmp/client-config-overrides-', mode = 'r+b') - fin = open(configFile, 'rb') - - changedYN = 0 - - # write to temp file - for line in fin.readlines(): - kv = _parseConfigLine(line) - if not kv: - # not a setting, write the unaltered line - fo.write(line) - else: - # it's a setting, populate from the dictionary - if kv[0] in dnew: - if dnew[kv[0]] != kv[1]: - fo.write(('%s=%s\n' % (kv[0], dnew[kv[0]])).encode('utf8')) - changedYN = 1 - else: - fo.write(line) - # it's a setting but not being mapped - else: - fo.write(line) - fin.close() - - if changedYN: - # write from temp file to configFile - fout = open(configFile, 'wb') - fo.seek(0) - fout.write(fo.read()) - print('*', configFile, 'written') - - -def parseCommandline(): - """parse/process the commandline - - Commandline is dead simple for easiest portability. - """ - - # USAGE & HELP! - if '--usage' in sys.argv or '-h' in sys.argv or '--help' in sys.argv: - print("""\ -usage: python %s CONFIG_FILENAME NEW_MAPPINGS [options] -arguments: - CONFIG_FILENAME config file to alter - NEW_MAPPINGS file containing new settings that map onto the - config file -options: - -h, --help show this help message and exit - --usage show brief usage summary - -examples: - python %s %s %s - python %s %s %s -""" % (sys.argv[0], - sys.argv[0], RHN_REGISTER, DEFAULT_CLIENT_CONFIG_OVERRIDES, - sys.argv[0], UP2DATE, DEFAULT_CLIENT_CONFIG_OVERRIDES)) - - sys.exit(0) - - - if len(sys.argv) != 3: - msg = "ERROR: exactly two arguments are required, see --help" - raise TypeError(msg) - - configFilename = os.path.abspath(sys.argv[1]) - newMappings = os.path.abspath(sys.argv[2]) - - if not os.path.exists(configFilename): - msg = ("ERROR: filename to alter (1st argument), does not exist:\n" - " %s" - % configFilename) - raise IOError(msg) - - if not os.path.exists(newMappings): - msg = ("ERROR: filename that contains the mappings (2nd argument), " - "does not exist:\n" - " %s" % newMappings) - raise IOError(msg) - - return configFilename, newMappings - - -def main(): - "parse commandline, process config file key=value mappings" - - configFilename, newMappings = parseCommandline() - #dumpConfigFile(configFilename) - #mapNewSettings('test-up2date', readConfigFile(DEFAULT_CLIENT_CONFIG_OVERRIDES)) - - mapNewSettings(configFilename, readConfigFile(newMappings)) - -if __name__ == '__main__': - try: - sys.exit(main() or 0) - except Exception as err: - print(err) diff --git a/spacewalk/certs-tools/mgr-bootstrap.sgml b/spacewalk/certs-tools/mgr-bootstrap.sgml index 981ee85d66a7..17093f450488 100644 --- a/spacewalk/certs-tools/mgr-bootstrap.sgml +++ b/spacewalk/certs-tools/mgr-bootstrap.sgml @@ -185,7 +185,6 @@ Files /usr/bin/mgr-bootstrap - /usr/bin/client_config_update.py /usr/bin/client-config-overrides.txt diff --git a/spacewalk/certs-tools/rhn_bootstrap.py b/spacewalk/certs-tools/rhn_bootstrap.py index b2011e93f7a5..8b098b186087 100755 --- a/spacewalk/certs-tools/rhn_bootstrap.py +++ b/spacewalk/certs-tools/rhn_bootstrap.py @@ -39,7 +39,6 @@ ## local imports from uyuni.common import rhn_rpm from spacewalk.common.rhnConfig import CFG, initCFG -from .client_config_update import readConfigFile from .rhn_bootstrap_strings import \ getHeader, getGPGKeyImportSh, \ getCorpCACertSh, getRegistrationStackSh, \ @@ -85,6 +84,47 @@ errnoCANotFound = 16 errnoGPGNotFound = 17 +def _parseConfigLine(line): + """parse a line from a config file. Format can be either "key=value\n" + or "whatever text\n" + + return either: + (key, value) + or + None + The '\n' is always stripped from the value. + """ + + kv = line.decode('utf8').split('=') + if len(kv) < 2: + # not a setting + return None + + if len(kv) > 2: + # '=' is part of the value, need to rejoin it. + kv = [kv[0], '='.join(kv[1:])] + + if kv[0].find('[comment]') > 0: + # comment; not a setting + return None + + # it's a setting, trim the '\n' and return the (key, value) pair. + kv[0] = kv[0].strip() + kv[1] = kv[1].strip() + return tuple(kv) + +def readConfigFile(configFile): + "read in config file, return dictionary of key/value pairs" + + fin = open(configFile, 'rb') + + d = {} + + for line in fin.readlines(): + kv = _parseConfigLine(line) + if kv: + d[kv[0]] = kv[1] + return d # should come out of common code when we move this code out of # rhns-certs-tools @@ -500,8 +540,6 @@ def writeClientConfigOverrides(options): fout.write("""\ # RHN Client (rhn_register/up2date) config-overrides file v4.0 # -# To be used only in conjuction with client_config_update.py -# # This file was autogenerated. # # The simple rules: diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update b/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update new file mode 100644 index 000000000000..e5dff0de0831 --- /dev/null +++ b/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update @@ -0,0 +1 @@ +- Remove client_config_update.py diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.spec b/spacewalk/certs-tools/spacewalk-certs-tools.spec index fbb69106fb9c..ff9c2dd391fd 100644 --- a/spacewalk/certs-tools/spacewalk-certs-tools.spec +++ b/spacewalk/certs-tools/spacewalk-certs-tools.spec @@ -87,7 +87,7 @@ sed -i 's|etc/httpd/conf|etc/apache2|g' ssl-howto.txt %install install -d -m 755 $RPM_BUILD_ROOT/%{rhnroot}/certs -sed -i '1s|python\b|python3|' rhn-ssl-tool mgr-package-rpm-certificate-osimage rhn-bootstrap client_config_update.py +sed -i '1s|python\b|python3|' rhn-ssl-tool mgr-package-rpm-certificate-osimage rhn-bootstrap make -f Makefile.certs install PREFIX=$RPM_BUILD_ROOT ROOT=%{rhnroot} \ PYTHONPATH=%{python3_sitelib} PYTHONVERSION=%{python3_version} \ MANDIR=%{_mandir} PUB_BOOTSTRAP_DIR=%{pub_bootstrap_dir} @@ -126,7 +126,6 @@ ln -s spacewalk-ssh-push-init $RPM_BUILD_ROOT/%{_sbindir}/mgr-ssh-push-init %doc %{_mandir}/man1/mgr-*.1* %doc ssl-howto-simple.txt ssl-howto.txt %license LICENSE -%{pub_bootstrap_dir}/client_config_update.py* %dir %{rhnroot} %dir %{pub_dir} %dir %{pub_bootstrap_dir} diff --git a/spacewalk/certs-tools/spacewalk-ssh-push-init b/spacewalk/certs-tools/spacewalk-ssh-push-init index d95629abf93a..08e505a505fc 100755 --- a/spacewalk/certs-tools/spacewalk-ssh-push-init +++ b/spacewalk/certs-tools/spacewalk-ssh-push-init @@ -268,7 +268,7 @@ if [ "${USE_TUNNEL}" = "Y" ]; then exit_in_case_of_error echo "* Cleaning up temporary files" - ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv enable.sh bootstrap.sh client-config-overrides-tunnel.txt client_config_update.py" + ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv enable.sh bootstrap.sh client-config-overrides-tunnel.txt" cleanup_temp_files elif [ -n "${BOOTSTRAP}" ]; then # Simple registration with given bootstrap script @@ -278,5 +278,5 @@ elif [ -n "${BOOTSTRAP}" ]; then exit_in_case_of_error echo "* Cleaning up temporary files remotely" - ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv bootstrap.sh client-config-overrides.txt client_config_update.py" + ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv bootstrap.sh client-config-overrides.txt" fi From e868b417c462724e14f88602006f2aa3f1431b11 Mon Sep 17 00:00:00 2001 From: elariekerboull Date: Wed, 2 Aug 2023 16:28:40 +0200 Subject: [PATCH 64/80] Move repository creation and sync to reposync (#7342) --- testsuite/features/core/srv_channels_add.feature | 2 +- .../features/{core => reposync}/srv_create_repository.feature | 0 testsuite/run_sets/core.yml | 1 - testsuite/run_sets/refhost.yml | 2 +- testsuite/run_sets/reposync.yml | 1 + 5 files changed, 3 insertions(+), 3 deletions(-) rename testsuite/features/{core => reposync}/srv_create_repository.feature (100%) diff --git a/testsuite/features/core/srv_channels_add.feature b/testsuite/features/core/srv_channels_add.feature index 20a94fc833b6..2baa076144c8 100644 --- a/testsuite/features/core/srv_channels_add.feature +++ b/testsuite/features/core/srv_channels_add.feature @@ -3,7 +3,7 @@ # # This feature can cause failures in: # - features/core/srv_create_activationkey.feature -# - features/core/srv_create_repository.feature +# - features/reposync/srv_create_repository.feature # - features/init_client/sle_minion.feature # - features/init_client/sle_ssh_minion.feature # - features/init_client/min_rhlike.feature diff --git a/testsuite/features/core/srv_create_repository.feature b/testsuite/features/reposync/srv_create_repository.feature similarity index 100% rename from testsuite/features/core/srv_create_repository.feature rename to testsuite/features/reposync/srv_create_repository.feature diff --git a/testsuite/run_sets/core.yml b/testsuite/run_sets/core.yml index 541c79e8dc03..0617abde816a 100644 --- a/testsuite/run_sets/core.yml +++ b/testsuite/run_sets/core.yml @@ -14,7 +14,6 @@ - features/core/srv_organization_credentials.feature - features/core/srv_user_preferences.feature - features/core/srv_channels_add.feature -- features/core/srv_create_repository.feature - features/core/srv_create_activationkey.feature - features/core/srv_osimage.feature - features/core/srv_docker.feature diff --git a/testsuite/run_sets/refhost.yml b/testsuite/run_sets/refhost.yml index cd09326b7421..02ee8f32dd25 100644 --- a/testsuite/run_sets/refhost.yml +++ b/testsuite/run_sets/refhost.yml @@ -12,7 +12,7 @@ - features/core/allcli_sanity.feature - features/core/srv_first_settings.feature - features/core/srv_channels_add.feature -- features/core/srv_create_repository.feature +- features/reposync/srv_create_repository.feature - features/core/srv_create_activationkey.feature - features/core/srv_docker.feature diff --git a/testsuite/run_sets/reposync.yml b/testsuite/run_sets/reposync.yml index 87664f936ab5..a6423908d0c9 100644 --- a/testsuite/run_sets/reposync.yml +++ b/testsuite/run_sets/reposync.yml @@ -13,5 +13,6 @@ - features/reposync/srv_sync_products.feature - features/reposync/srv_enable_sync_products.feature - features/reposync/srv_wait_for_reposync.feature +- features/reposync/srv_create_repository.feature ## Channels and Product synchronization features END ### From 2dd763bff2c1d5e221b93bf24ac00ff8e7e69d98 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 9 Aug 2023 09:22:32 +0200 Subject: [PATCH 65/80] Revert "QE: Fix Uyuni reposync for openSUSE Leap 15.4 (#7374)" This reverts commit ad7515916cda18e3d019c6fbd5c1e1ce43f4bcb7. --- testsuite/features/support/commonlib.rb | 7 +------ testsuite/features/support/constants.rb | 11 ----------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index 2285a56b98db..3d1e15892803 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -30,12 +30,7 @@ def generate_temp_file(name, content) # This is a safety net only, the best thing to do is to not start the reposync at all. def compute_channels_to_leave_running # keep the repos needed for the auto-installation tests - do_not_kill = - if $product == 'Uyuni' - CHANNEL_TO_SYNCH_BY_OS_VERSION['15.4'] - else - CHANNEL_TO_SYNCH_BY_OS_VERSION['default'] - end + do_not_kill = CHANNEL_TO_SYNCH_BY_OS_VERSION['default'] [$minion, $build_host, $ssh_minion, $rhlike_minion].each do |node| next unless node os_version = node.os_version diff --git a/testsuite/features/support/constants.rb b/testsuite/features/support/constants.rb index 46b6c66b7b53..68ea24bcc433 100644 --- a/testsuite/features/support/constants.rb +++ b/testsuite/features/support/constants.rb @@ -428,17 +428,6 @@ res8-manager-tools-pool-x86_64 res8-manager-tools-updates-x86_64 sll8-uyuni-client-x86_64 - ], - '15.4' => - %w[ - opensuse_leap15_4-x86_64 - opensuse_leap15_4-x86_64-non-oss - opensuse_leap15_4-x86_64-non-oss-updates - opensuse_leap15_4-x86_64-updates - opensuse_leap15_4-x86_64-backports-updates - opensuse_leap15_4-x86_64-sle-updates - uyuni-proxy-devel-leap-x86_64 - opensuse_leap15_4-uyuni-client-x86_64 ] }.freeze From 929d78a6aa0253f09a691e5d19bbd18f5a6b33dd Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Fri, 11 Aug 2023 15:24:54 +0200 Subject: [PATCH 66/80] setup postfix hostname using conf file --- susemanager/bin/mgr-setup | 23 ++++--------------- .../susemanager.changes.mbussolotto.postfix | 1 + utils/spacewalk-hostname-rename | 4 ++++ 3 files changed, 9 insertions(+), 19 deletions(-) create mode 100644 susemanager/susemanager.changes.mbussolotto.postfix diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 9b555254b168..797a0dc45706 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -172,25 +172,10 @@ fi } setup_mail () { - -# fix hostname for postfix -REALHOSTNAME=`hostname -f` -if [ -z "$REALHOSTNAME" ]; then - for i in `ip -f inet -o addr show scope global | awk '{print $4}' | awk -F \/ '{print $1}'`; do - for j in `dig +noall +answer +time=2 +tries=1 -x $i | awk '{print $5}' | sed 's/\.$//'`; do - if [ -n "$j" ]; then - REALHOSTNAME=$j - break 2 - fi - done - done -fi -if [ -n "$REALHOSTNAME" ]; then - echo "$REALHOSTNAME" > /etc/hostname -fi -# bsc#979664 - SUSE Manager requires a working mail system -systemctl --quiet enable postfix 2>&1 -systemctl restart postfix + postconf -e myhostname=$HOSTNAME + # bsc#979664 - SUSE Manager requires a working mail system + systemctl --quiet enable postfix 2>&1 + systemctl restart postfix } setup_hostname() { diff --git a/susemanager/susemanager.changes.mbussolotto.postfix b/susemanager/susemanager.changes.mbussolotto.postfix new file mode 100644 index 000000000000..76abe2b83535 --- /dev/null +++ b/susemanager/susemanager.changes.mbussolotto.postfix @@ -0,0 +1 @@ +- setup postfix hostname using conf file diff --git a/utils/spacewalk-hostname-rename b/utils/spacewalk-hostname-rename index cde5ff6c0723..66931e5890c7 100755 --- a/utils/spacewalk-hostname-rename +++ b/utils/spacewalk-hostname-rename @@ -638,6 +638,10 @@ if [ -e $MGR_SYNC_CONF ]; then fi print_status 0 # just simulate end +echo -n "Changing postfix settings ... " | tee -a $LOG +postconf -e myhostname=$HOSTNAME +systemctl restart postfix + echo -n "Starting spacewalk services ... " | tee -a $LOG if [ "$DB_SERVICE" != "" ] then From a782d4ccb3998b5346563c1a6fec40a45b2b2205 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 22 Aug 2023 14:44:50 +0200 Subject: [PATCH 67/80] update helm chart to persist postfix configuration --- .../doc/server-kubernetes/migration-job.yaml | 6 +++++ containers/doc/server-kubernetes/pvs.yaml | 25 +++++++++++++++++++ .../server-helm/templates/deployment.yaml | 21 ++++++++++++++++ containers/server-helm/templates/volumes.yaml | 24 ++++++++++++++++++ .../susemanager.changes.mbussolotto.postfix | 1 - 5 files changed, 76 insertions(+), 1 deletion(-) delete mode 100644 susemanager/susemanager.changes.mbussolotto.postfix diff --git a/containers/doc/server-kubernetes/migration-job.yaml b/containers/doc/server-kubernetes/migration-job.yaml index f695ef8079cc..24c0c092b906 100644 --- a/containers/doc/server-kubernetes/migration-job.yaml +++ b/containers/doc/server-kubernetes/migration-job.yaml @@ -39,6 +39,7 @@ spec: /etc/salt \ /etc/tomcat \ /etc/cobbler \ + /etc/postfix \ /etc/sysconfig; do rsync -avz uyuni.world-co.com:$folder/ $folder; @@ -91,6 +92,8 @@ spec: name: etc-cobbler - mountPath: /etc/sysconfig name: etc-sysconfig + - mountPath: /etc/postfix + name: etc-postfix - mountPath: /root/keys name: ssh-key volumes: @@ -157,6 +160,9 @@ spec: - name: etc-sysconfig persistentVolumeClaim: claimName: etc-sysconfig + - name: etc-postfix + persistentVolumeClaim: + claimName: etc-postfix - name: ssh-key secret: secretName: migration-ssh-key diff --git a/containers/doc/server-kubernetes/pvs.yaml b/containers/doc/server-kubernetes/pvs.yaml index e75a64c99c42..a71e69596897 100644 --- a/containers/doc/server-kubernetes/pvs.yaml +++ b/containers/doc/server-kubernetes/pvs.yaml @@ -529,3 +529,28 @@ spec: operator: In values: - uyuni-dev +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: etc-postfix + labels: + data: etc-postfix +spec: + capacity: + storage: 1Mi + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + storageClassName: local-storage + local: + path: /var/uyuni/etc-postfix + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - uyuni-dev + diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml index 67e8a4bbd838..524a934ac099 100644 --- a/containers/server-helm/templates/deployment.yaml +++ b/containers/server-helm/templates/deployment.yaml @@ -367,6 +367,22 @@ spec: volumeMounts: - mountPath: /mnt name: etc-sysconfig + - name: init-etc-postfix + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/postfix /mnt; + chmod --reference=/etc/postfix /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/postfix/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-postfix containers: - name: uyuni image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} @@ -447,6 +463,8 @@ spec: name: etc-sysconfig - mountPath: /etc/pki/tls name: etc-tls + - mountPath: /etc/postfix + name: etc-postfix - name: ca-cert mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT readOnly: true @@ -532,6 +550,9 @@ spec: - name: etc-sysconfig persistentVolumeClaim: claimName: etc-sysconfig + - name: etc-postfix + persistentVolumeClaim: + claimName: etc-postfix - name: ca-cert configMap: name: uyuni-ca diff --git a/containers/server-helm/templates/volumes.yaml b/containers/server-helm/templates/volumes.yaml index c909be28363e..8ad678d3842b 100644 --- a/containers/server-helm/templates/volumes.yaml +++ b/containers/server-helm/templates/volumes.yaml @@ -549,3 +549,27 @@ spec: matchLabels: data: etc-tls {{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-postfix + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-postfix +{{- end }} diff --git a/susemanager/susemanager.changes.mbussolotto.postfix b/susemanager/susemanager.changes.mbussolotto.postfix deleted file mode 100644 index 76abe2b83535..000000000000 --- a/susemanager/susemanager.changes.mbussolotto.postfix +++ /dev/null @@ -1 +0,0 @@ -- setup postfix hostname using conf file From 4667b8e2b8656b20e32161080ebcf62a598c8286 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 23 Aug 2023 09:58:37 +0200 Subject: [PATCH 68/80] remove since covered by helm charts and uyuniadm --- .../doc/server-kubernetes/migration-job.yaml | 175 ------ containers/doc/server-kubernetes/pvs.yaml | 556 ------------------ 2 files changed, 731 deletions(-) delete mode 100644 containers/doc/server-kubernetes/migration-job.yaml delete mode 100644 containers/doc/server-kubernetes/pvs.yaml diff --git a/containers/doc/server-kubernetes/migration-job.yaml b/containers/doc/server-kubernetes/migration-job.yaml deleted file mode 100644 index 24c0c092b906..000000000000 --- a/containers/doc/server-kubernetes/migration-job.yaml +++ /dev/null @@ -1,175 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: uyuni-migration -spec: - backoffLimit: 1 - template: - spec: - restartPolicy: Never - containers: - - name: rsync-var-pgsql - image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest - command: - - sh - - -x - - -c - - > - mkdir /root/.ssh; - ssh-keyscan -t rsa uyuni.world-co.com >>~/.ssh/known_hosts; - ln -s /root/keys/id_rsa /root/.ssh/id_rsa; - ln -s /root/keys/id_rsa.pub /root/.ssh/id_rsa.pub; - for folder in /var/lib/pgsql \ - /var/cache \ - /var/spacewalk \ - /var/log \ - /srv/salt \ - /srv/www/htdocs/pub \ - /srv/www/cobbler \ - /srv/www/os-images \ - /srv/tftpboot \ - /srv/formula_metadata \ - /srv/pillar \ - /srv/susemanager \ - /srv/spacewalk \ - /root \ - /etc/apache2 \ - /etc/rhn \ - /etc/systemd/system/multi-user.target.wants \ - /etc/salt \ - /etc/tomcat \ - /etc/cobbler \ - /etc/postfix \ - /etc/sysconfig; - do - rsync -avz uyuni.world-co.com:$folder/ $folder; - done; - rm -f /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT; - ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT; - echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; - sed 's/address=[^:]*:/address=uyuni:/' -i /etc/rhn/taskomatic.conf; - sed 's/address=[^:]*:/address=uyuni:/' -i /etc/sysconfig/tomcat; - volumeMounts: - - mountPath: /var/lib/pgsql - name: var-pgsql - - mountPath: /var/cache - name: var-cache - - mountPath: /var/spacewalk - name: var-spacewalk - - mountPath: /var/log - name: var-log - - mountPath: /srv/salt - name: srv-salt - - mountPath: /srv/www/htdocs/pub - name: srv-www-pub - - mountPath: /srv/www/cobbler - name: srv-www-cobbler - - mountPath: /srv/www/os-images - name: srv-www-osimages - - mountPath: /srv/tftpboot - name: srv-tftpboot - - mountPath: /srv/formula_metadata - name: srv-formulametadata - - mountPath: /srv/pillar - name: srv-pillar - - mountPath: /srv/susemanager - name: srv-susemanager - - mountPath: /srv/spacewalk - name: srv-spacewalk - - mountPath: /root - name: root - - mountPath: /etc/apache2 - name: etc-apache2 - - mountPath: /etc/rhn - name: etc-rhn - - mountPath: /etc/systemd/system/multi-user.target.wants - name: etc-systemd - - mountPath: /etc/salt - name: etc-salt - - mountPath: /etc/tomcat - name: etc-tomcat - - mountPath: /etc/cobbler - name: etc-cobbler - - mountPath: /etc/sysconfig - name: etc-sysconfig - - mountPath: /etc/postfix - name: etc-postfix - - mountPath: /root/keys - name: ssh-key - volumes: - - name: var-pgsql - persistentVolumeClaim: - claimName: var-pgsql - - name: var-cache - persistentVolumeClaim: - claimName: var-cache - - name: var-spacewalk - persistentVolumeClaim: - claimName: var-spacewalk - - name: var-log - persistentVolumeClaim: - claimName: var-log - - name: srv-salt - persistentVolumeClaim: - claimName: srv-salt - - name: srv-www-pub - persistentVolumeClaim: - claimName: srv-www-pub - - name: srv-www-cobbler - persistentVolumeClaim: - claimName: srv-www-cobbler - - name: srv-www-osimages - persistentVolumeClaim: - claimName: srv-www-osimages - - name: srv-tftpboot - persistentVolumeClaim: - claimName: srv-tftpboot - - name: srv-formulametadata - persistentVolumeClaim: - claimName: srv-formulametadata - - name: srv-pillar - persistentVolumeClaim: - claimName: srv-pillar - - name: srv-susemanager - persistentVolumeClaim: - claimName: srv-susemanager - - name: srv-spacewalk - persistentVolumeClaim: - claimName: srv-spacewalk - - name: root - persistentVolumeClaim: - claimName: root - - name: etc-apache2 - persistentVolumeClaim: - claimName: etc-apache2 - - name: etc-rhn - persistentVolumeClaim: - claimName: etc-rhn - - name: etc-systemd - persistentVolumeClaim: - claimName: etc-systemd - - name: etc-salt - persistentVolumeClaim: - claimName: etc-salt - - name: etc-tomcat - persistentVolumeClaim: - claimName: etc-tomcat - - name: etc-cobbler - persistentVolumeClaim: - claimName: etc-cobbler - - name: etc-sysconfig - persistentVolumeClaim: - claimName: etc-sysconfig - - name: etc-postfix - persistentVolumeClaim: - claimName: etc-postfix - - name: ssh-key - secret: - secretName: migration-ssh-key - items: - - key: id_rsa - mode: 0600 - path: id_rsa - - key: id_rsa.pub - mode: 0644 - path: id_rsa.pub diff --git a/containers/doc/server-kubernetes/pvs.yaml b/containers/doc/server-kubernetes/pvs.yaml deleted file mode 100644 index a71e69596897..000000000000 --- a/containers/doc/server-kubernetes/pvs.yaml +++ /dev/null @@ -1,556 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: local-storage -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: Immediate -reclaimPolicy: Delete ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: var-pgsql - labels: - data: var-pgsql -spec: - capacity: - storage: 100Gi - accessModes: - - ReadWriteOnce - storageClassName: local-storage - local: - path: /var/uyuni/var-pgsql - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: var-cache - labels: - data: var-cache -spec: - capacity: - storage: 100Gi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/var-cache - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: var-spacewalk - labels: - data: var-spacewalk -spec: - capacity: - storage: 100Gi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/var-spacewalk - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: var-log - labels: - data: var-log -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/var-log - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-salt - labels: - data: srv-salt -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-salt - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-www-pub - labels: - data: srv-www-pub -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-www-pub - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-www-cobbler - labels: - data: srv-www-cobbler -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-www-cobbler - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-www-osimages - labels: - data: srv-www-osimages -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-www-osimages - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-tftpboot - labels: - data: srv-tftpboot -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-tftpboot - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-formulametadata - labels: - data: srv-formulametadata -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-formulametadata - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-pillar - labels: - data: srv-pillar -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-pillar - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-susemanager - labels: - data: srv-susemanager -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-susemanager - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: srv-spacewalk - labels: - data: srv-spacewalk -spec: - capacity: - storage: 100Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/srv-spacewalk - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: root - labels: - data: root -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/root - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-apache2 - labels: - data: etc-apache2 -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-apache2 - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-rhn - labels: - data: etc-rhn -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-rhn - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-systemd - labels: - data: etc-systemd -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-systemd - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-salt - labels: - data: etc-salt -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-salt - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-tomcat - labels: - data: etc-tomcat -spec: - capacity: - storage: 10Mi - accessModes: - - ReadWriteOnce - storageClassName: local-storage - local: - path: /var/uyuni/etc-tomcat - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-cobbler - labels: - data: etc-cobbler -spec: - capacity: - storage: 1Mi - accessModes: - - ReadWriteOnce - storageClassName: local-storage - local: - path: /var/uyuni/etc-cobbler - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-sysconfig - labels: - data: etc-sysconfig -spec: - capacity: - storage: 1Mi - accessModes: - - ReadWriteOnce - storageClassName: local-storage - local: - path: /var/uyuni/etc-sysconfig - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-tls - labels: - data: etc-tls -spec: - capacity: - storage: 1Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-tls - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: etc-postfix - labels: - data: etc-postfix -spec: - capacity: - storage: 1Mi - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - storageClassName: local-storage - local: - path: /var/uyuni/etc-postfix - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - uyuni-dev - From 62aa96cb9f015df8fe1388af8d146858c1639a4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 7 Jul 2023 14:27:25 +0200 Subject: [PATCH 69/80] Allow missing rhn.conf file Now that rhnLog looks for the apache user and group in rhn.conf unit tests are failing if the file is not present. Use openSUSE values as default if rhn.conf is not available. --- python/spacewalk/common/rhnConfig.py | 6 +++++- python/spacewalk/common/rhnLog.py | 4 ++-- .../spacewalk-backend.changes.cbosdo.rhnconfig-nofile | 1 + python/uyuni/common/fileutils.py | 6 +++--- .../uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile | 1 + 5 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile create mode 100644 python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile diff --git a/python/spacewalk/common/rhnConfig.py b/python/spacewalk/common/rhnConfig.py index 0fce7040b747..a05b4d452fda 100644 --- a/python/spacewalk/common/rhnConfig.py +++ b/python/spacewalk/common/rhnConfig.py @@ -100,6 +100,9 @@ def is_initialized(self): def modifiedYN(self): """returns last modified time diff if rhn.conf has changed.""" + if not os.path.exists(self.filename): + return 0 + try: si = os.stat(self.filename) except OSError: @@ -142,7 +145,8 @@ def parse(self): # Now that we parsed the defaults, we parse the multi-key # self.filename configuration (ie, /etc/rhn/rhn.conf) - self.__parsedConfig = parse_file(self.filename) + if os.path.exists(self.filename): + self.__parsedConfig = parse_file(self.filename) # And now generate and cache the current component self.__merge() diff --git a/python/spacewalk/common/rhnLog.py b/python/spacewalk/common/rhnLog.py index 6d189a8c36f1..d3ff21ea5ea3 100644 --- a/python/spacewalk/common/rhnLog.py +++ b/python/spacewalk/common/rhnLog.py @@ -100,7 +100,7 @@ def initLOG(log_file="stderr", level=0, component=""): # fetch uid, gid so we can do a "chown ..." with cfg_component(component=None) as CFG: - apache_uid, apache_gid = getUidGid(CFG.httpd_user, CFG.httpd_group) + apache_uid, apache_gid = getUidGid(CFG.get('httpd_user', 'wwwrun'), CFG.get('httpd_group', 'www')) try: os.makedirs(log_path) @@ -187,7 +187,7 @@ def __init__(self, log_file, level, component): set_close_on_exec(self.fd) if newfileYN: with cfg_component(component=None) as CFG: - apache_uid, apache_gid = getUidGid(CFG.httpd_user, CFG.httpd_group) + apache_uid, apache_gid = getUidGid(CFG.get('httpd_user', 'wwwrun'), CFG.get('httpd_group', 'www')) os.chown(self.file, apache_uid, apache_gid) os.chmod(self.file, int('0660', 8)) except: diff --git a/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile b/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile new file mode 100644 index 000000000000..71b461e69276 --- /dev/null +++ b/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile @@ -0,0 +1 @@ +- Accept missing rhn.conf file diff --git a/python/uyuni/common/fileutils.py b/python/uyuni/common/fileutils.py index 3c6cb06a013c..91396583680f 100644 --- a/python/uyuni/common/fileutils.py +++ b/python/uyuni/common/fileutils.py @@ -301,9 +301,9 @@ def createPath(path, user=None, group=None, chmod=int('0755', 8)): """ with cfg_component(component=None) as CFG: if user is None: - user = CFG.httpd_user + user = CFG.get('httpd_user', 'wwwrun') if group is None: - group = CFG.httpd_group + group = CFG.get('httpd_group', 'www') path = cleanupAbsPath(path) if not os.path.exists(path): @@ -324,7 +324,7 @@ def setPermsPath(path, user=None, group='root', chmod=int('0750', 8)): """chown user.group and set permissions to chmod""" if user is None: with cfg_component(component=None) as CFG: - user = CFG.httpd_user + user = CFG.get('httpd_user', 'wwwrun') if not os.path.exists(path): raise OSError("*** ERROR: Path doesn't exist (can't set permissions): %s" % path) diff --git a/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile b/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile new file mode 100644 index 000000000000..71b461e69276 --- /dev/null +++ b/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile @@ -0,0 +1 @@ +- Accept missing rhn.conf file From 5b20b0634333a160e1f3d940998b9a2ac22e807a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 24 Aug 2023 10:36:36 +0200 Subject: [PATCH 70/80] Don't force SSL verify-full mode for localhost report DB When the report database is installed on the server we don't need and can't use SSL to connect to it using localhost hostname. This comes in handy to avoid hairpin requests in the container setup. --- java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java | 2 +- java/spacewalk-java.changes.cbosdo.local-reportdb | 1 + spacewalk/setup/lib/Spacewalk/Setup.pm | 4 +++- spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 java/spacewalk-java.changes.cbosdo.local-reportdb create mode 100644 spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb diff --git a/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java b/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java index b773ca1fcae3..94e78316dfc6 100644 --- a/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java +++ b/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java @@ -846,7 +846,7 @@ private String buildConnectionString(String name, String backend, String host, S } connectionUrl.append(name); - if (useSsl) { + if (!"localhost".equals(host) && useSsl) { connectionUrl.append("?ssl=true&sslrootcert=" + sslrootcert + "&sslmode=" + sslmode); } diff --git a/java/spacewalk-java.changes.cbosdo.local-reportdb b/java/spacewalk-java.changes.cbosdo.local-reportdb new file mode 100644 index 000000000000..d7338755b62a --- /dev/null +++ b/java/spacewalk-java.changes.cbosdo.local-reportdb @@ -0,0 +1 @@ +- Don't force ssl verification on reportdb using localhost diff --git a/spacewalk/setup/lib/Spacewalk/Setup.pm b/spacewalk/setup/lib/Spacewalk/Setup.pm index c76ee783042a..ee42ec90b278 100644 --- a/spacewalk/setup/lib/Spacewalk/Setup.pm +++ b/spacewalk/setup/lib/Spacewalk/Setup.pm @@ -894,7 +894,9 @@ sub postgresql_reportdb_setup { } $ENV{PGSSLROOTCERT} = $answers->{'report-db-ca-cert'}; - $ENV{PGSSLMODE} = "verify-full"; + if ($answers->{'report-db-host'} ne 'localhost') { + $ENV{PGSSLMODE} = "verify-full"; + } write_rhn_conf($answers, 'externaldb-admin-user','externaldb-admin-password', 'report-db-backend', 'report-db-host', 'report-db-port', 'report-db-name', 'report-db-user', 'report-db-password', 'report-db-ssl-enabled'); diff --git a/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb b/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb new file mode 100644 index 000000000000..372a906b5d98 --- /dev/null +++ b/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb @@ -0,0 +1 @@ +- Don't force ssl verification to setup reportdb using localhost From 1443531bf1f737a6d8ac907fee918317e096e840 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 22 Aug 2023 16:40:54 +0200 Subject: [PATCH 71/80] change rhn_server_satellite.log permission, no longer required --- python/spacewalk/spacewalk-backend.spec | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index 9ab31d2a699b..8915be87d87f 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -372,9 +372,6 @@ fi %else %service_add_post spacewalk-diskcheck.service spacewalk-diskcheck.timer %endif -if test -f /var/log/rhn/rhn_server_satellite.log; then - chown -f %{apache_user}:%{apache_group} /var/log/rhn/rhn_server_satellite.log -fi %preun tools %if 0%{?rhel} @@ -657,7 +654,7 @@ fi %{!?_licensedir:%global license %doc} %license LICENSE %doc README.ULN -%attr(644,root,%{apache_group}) %{rhnconfigdefaults}/rhn_server_satellite.conf +%attr(644,%{apache_user},%{apache_group}) %{rhnconfigdefaults}/rhn_server_satellite.conf %config(noreplace) %{_sysconfdir}/logrotate.d/spacewalk-backend-tools %config(noreplace) %{rhnconf}/signing.conf %attr(755,root,root) %{_bindir}/rhn-charsets From 891615cfff7f9d00459797465d01e76702927b2b Mon Sep 17 00:00:00 2001 From: Michael Calmer Date: Wed, 5 Jul 2023 18:40:12 +0200 Subject: [PATCH 72/80] drop traditional debian client code --- client/debian/DEBIAN-HOWTO | 43 --- client/debian/apt-spacewalk/50spacewalk | 15 - client/debian/apt-spacewalk/LICENSE | 339 ------------------ client/debian/apt-spacewalk/Makefile.python | 19 - .../debian/apt-spacewalk/apt-spacewalk.spec | 162 --------- client/debian/apt-spacewalk/packages.py | 187 ---------- client/debian/apt-spacewalk/post_invoke.py | 45 --- client/debian/apt-spacewalk/pre_invoke.py | 86 ----- client/debian/apt-spacewalk/pylintrc | 188 ---------- client/debian/apt-spacewalk/spacewalk | 301 ---------------- .../apt-spacewalk/src/apt-spacewalk.changes | 9 - .../Stop-linking-with-libxml2mod.patch | 72 ---- .../python-dmidecode/python-dmidecode.spec | 324 ----------------- .../debian/python-hwdata/python-hwdata.spec | 255 ------------- 14 files changed, 2045 deletions(-) delete mode 100644 client/debian/DEBIAN-HOWTO delete mode 100644 client/debian/apt-spacewalk/50spacewalk delete mode 100644 client/debian/apt-spacewalk/LICENSE delete mode 100644 client/debian/apt-spacewalk/Makefile.python delete mode 100644 client/debian/apt-spacewalk/apt-spacewalk.spec delete mode 100644 client/debian/apt-spacewalk/packages.py delete mode 100755 client/debian/apt-spacewalk/post_invoke.py delete mode 100755 client/debian/apt-spacewalk/pre_invoke.py delete mode 100644 client/debian/apt-spacewalk/pylintrc delete mode 100755 client/debian/apt-spacewalk/spacewalk delete mode 100644 client/debian/apt-spacewalk/src/apt-spacewalk.changes delete mode 100644 client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch delete mode 100644 client/debian/python-dmidecode/python-dmidecode.spec delete mode 100644 client/debian/python-hwdata/python-hwdata.spec diff --git a/client/debian/DEBIAN-HOWTO b/client/debian/DEBIAN-HOWTO deleted file mode 100644 index ba27aaffe8bd..000000000000 --- a/client/debian/DEBIAN-HOWTO +++ /dev/null @@ -1,43 +0,0 @@ -HOW TO BUILD DEBIAN PACKAGES - -The client packages are built using debbuild. - -The following packages make up the client stack: -* apt-spacewalk (in client/debian) -* rhn-client-tools (in client/tools) -* rhnlib (in client/rhel) -* rhnsd (in client/rhel) -* rhncfg (in client/tools) -* spacewalk-usix (in usix) - -The following external packages are also part of the client stack: -* python-dmidecode (in client/debian) -* python-hwdata (in client/debian) - -Preparation steps: - -1. Install debbuild from https://github.com/ascherer/debbuild/releases - -2. Create the debbuild package build tree - -mkdir -p ~/debbuild/{SPECS,SOURCES,SDEBS,DEBS,BUILD,BUILDROOT} - -To build the non-external packages, these are the following steps: - -1. Switch to the directory of the package source (ex. for apt-spacewalk, cd client/debian/apt-spacewalk) - -2. Use tito to build tarball (tito build --tgz) - -3. Copy the tarball to ~/debbuild/SOURCES and spec to ~/debbuild/SPECS - -4. Change to ~/debbuild/SPECS and run "debbuild -ba" on the spec. (ex. for apt-spacewalk, debbuild -ba apt-spacewalk.spec) - -For external packages, the only difference is step 2, where you use spectool to fetch the tarball instead. -For example, for python-hwdata, "spectool -g python-hwdata.spec" is sufficient to get the sources. - - -How to regenerate repo: ------------------------ -cd spacewalk/debian -dpkg-scanpackages dists/spacewalk-unstable/binary-amd64 |gzip >dists/spacewalk-unstable/binary-amd64/Packages.gz -dpkg-scanpackages dists/spacewalk-unstable/binary-i386 |gzip >dists/spacewalk-unstable/binary-i386/Packages.gz diff --git a/client/debian/apt-spacewalk/50spacewalk b/client/debian/apt-spacewalk/50spacewalk deleted file mode 100644 index a80904711033..000000000000 --- a/client/debian/apt-spacewalk/50spacewalk +++ /dev/null @@ -1,15 +0,0 @@ -# -# The configuration for apt-spacewalk -# - -APT { - Update { - List-Refresh "true"; - Pre-Invoke { - "if [ -x /usr/lib/apt-spacewalk/post_invoke.py ]; then /usr/lib/apt-spacewalk/post_invoke.py; fi"; - } - } -}; -DPkg::Post-Invoke { - "/usr/lib/apt-spacewalk/post_invoke.py"; -}; diff --git a/client/debian/apt-spacewalk/LICENSE b/client/debian/apt-spacewalk/LICENSE deleted file mode 100644 index d159169d1050..000000000000 --- a/client/debian/apt-spacewalk/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/client/debian/apt-spacewalk/Makefile.python b/client/debian/apt-spacewalk/Makefile.python deleted file mode 100644 index 86f6fbcfa101..000000000000 --- a/client/debian/apt-spacewalk/Makefile.python +++ /dev/null @@ -1,19 +0,0 @@ -THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST))) -CURRENT_DIR := $(dir $(THIS_MAKEFILE)) -include $(CURRENT_DIR)../../../rel-eng/Makefile.python - -# Docker tests variables -DOCKER_CONTAINER_BASE = systemsmanagement/uyuni/master/docker/containers/uyuni-master -DOCKER_REGISTRY = registry.opensuse.org -DOCKER_RUN_EXPORT = "PYTHONPATH=$PYTHONPATH" -DOCKER_VOLUMES = -v "$(CURDIR)/../../../:/manager" - -__pylint :: - $(call update_pip_env) - pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true - -docker_pylint :: - docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/client/debian/apt-spacewalk; make -f Makefile.python __pylint" - -docker_shell :: - docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash diff --git a/client/debian/apt-spacewalk/apt-spacewalk.spec b/client/debian/apt-spacewalk/apt-spacewalk.spec deleted file mode 100644 index e2ecd961e9ed..000000000000 --- a/client/debian/apt-spacewalk/apt-spacewalk.spec +++ /dev/null @@ -1,162 +0,0 @@ -%{!?__python2:%global __python2 /usr/bin/python2} - -%if %{undefined python2_version} -%global python2_version %(%{__python2} -Esc "import sys; sys.stdout.write('{0.major}.{0.minor}'.format(sys.version_info))") -%endif - -%if %{undefined python2_sitelib} -%global python2_sitelib %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -Name: apt-spacewalk -Summary: Spacewalk plugin for Advanced Packaging Tool -%if %{_vendor} == "debbuild" -Packager: Uyuni Project -Group: admin -%endif -Version: 1.0.15 -Release: 1%{?dist} -License: GPLv2 -Source0: %{name}-%{version}.tar.gz -URL: https://github.com/uyuni-project/uyuni -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch -BuildRequires: python - -%description -apt-spacewalk is plugin used on Debian clients -to acquire content from Spacewalk server - -%package -n apt-transport-spacewalk -Summary: APT transport for communicating with Spacewalk servers -Requires: apt -Requires: python-apt -Requires: rhn-client-tools -Requires: python-six - -Recommends: mgr-daemon - -%description -n apt-transport-spacewalk - Supplies the APT method for fetching packages from Spacewalk. - Adds transaction hooks to: - 1) Update APT's sourcelist with subscribed spacewalk channels - before updating - 2) Register the machine's installed packages with the Spacewalk - server after any dpkg invocation - -%prep -%setup -q - -%build -# Nothing to build - -%install -mkdir -p $RPM_BUILD_ROOT/%{_prefix}/lib/apt-spacewalk -cp -a *_invoke.py $RPM_BUILD_ROOT/%{_prefix}/lib/apt-spacewalk -mkdir -p $RPM_BUILD_ROOT/%{_prefix}/lib/apt/methods -cp -a spacewalk $RPM_BUILD_ROOT/%{_prefix}/lib/apt/methods -mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/apt/apt.conf.d -cp -a 50spacewalk $RPM_BUILD_ROOT/%{_sysconfdir}/apt/apt.conf.d -mkdir -p $RPM_BUILD_ROOT/%{python2_sitelib}/rhn/actions -cp -a packages.py $RPM_BUILD_ROOT/%{python2_sitelib}/rhn/actions - -%files -n apt-transport-spacewalk -%license LICENSE -%{_prefix}/lib/apt-spacewalk/ -%{_prefix}/lib/apt/methods/spacewalk -%config(noreplace) %{_sysconfdir}/apt/apt.conf.d/50spacewalk -%{python2_sitelib}/rhn/actions/packages.py - -%if %{_vendor} == "debbuild" -%pre -n apt-transport-spacewalk -hook=/etc/apt/apt.conf.d/50spacewalk -if test -f $hook.disabled -then - mv $hook.disabled $hook -fi - -%postun -n apt-transport-spacewalk -hook=/etc/apt/apt.conf.d/50spacewalk -sourcelist=/etc/apt/sources.list.d/spacewalk.list - -case "$1" in - purge) - rm -f $hook.disabled - rm -f $sourcelist.disabled - ;; - - remove) - mv $hook $hook.disabled || : - mv $sourcelist $sourcelist.disabled || : - ;; - - abort-install) - if test "x$2" != "x" && test -f $hook - then - mv $hook $hook.disabled || : - mv $sourcelist $sourcelist.disabled || : - fi - ;; - - upgrade|failed-upgrade|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 -esac -%endif - -%changelog -* Thu Oct 25 2018 Tomas Kasparek 1.0.15-1 -- client, usix: Rework how client packaging is done for Debian/Ubuntu -- Move apt-spacewalk to the client/debian/ directory - -* Mon Jun 18 2018 Michael Mraka 1.0.14-1 -- client/debian: Port apt-spacewalk to be Python 3 ready - -* Mon Apr 16 2018 Tomas Kasparek 1.0.13-1 -- apt-transport-spacewalk: missed part of patch within pre_invoke -- further modifications on apt-transport-spacewalk -- modify apt-transport-spacewalk to support signed repos - -* Fri Feb 09 2018 Michael Mraka 1.0.12-1 -- removed BuildRoot from specfiles - -* Mon Jul 17 2017 Jan Dobes 1.0.11-1 -- Migrating Fedorahosted to GitHub - -* Tue Feb 24 2015 Matej Kollar 1.0.10-1 -- Getting rid of Tabs and trailing spaces in LICENSE, COPYING, and README files - -* Mon Sep 30 2013 Michael Mraka 1.0.9-1 -- removed trailing whitespaces - -* Thu Mar 21 2013 Jan Pazdziora 1.0.8-1 -- forward port debian bugs #703207, 700821 - -* Wed Feb 06 2013 Jan Pazdziora 1.0.7-1 -- update documentation on Debian packages - -* Sun Jun 17 2012 Miroslav Suchý 1.0.6-1 -- add copyright information to header of .py files -- ListRefresh is in APT:Update namespace - -* Sun Jun 17 2012 Miroslav Suchý 1.0.5-1 -- add LICENSE file for apt-spacewalk tar.gz -- %%defattr is not needed since rpm 4.4 - -* Thu Apr 28 2011 Simon Lukasik 1.0.4-1 -- The method can be killed by the keyboard interrupt (slukasik@redhat.com) - -* Sun Apr 17 2011 Simon Lukasik 1.0.3-1 -- Introducing actions.packages dispatcher (slukasik@redhat.com) -- Do not use rpmUtils on Debian (slukasik@redhat.com) -- Skip the extra lines sent by Apt (slukasik@redhat.com) - -* Wed Apr 13 2011 Jan Pazdziora 1.0.2-1 -- utilize config.getServerlURL() (msuchy@redhat.com) - -* Thu Mar 17 2011 Simon Lukasik 1.0.1-1 -- new package - diff --git a/client/debian/apt-spacewalk/packages.py b/client/debian/apt-spacewalk/packages.py deleted file mode 100644 index 11798f53a627..000000000000 --- a/client/debian/apt-spacewalk/packages.py +++ /dev/null @@ -1,187 +0,0 @@ -# -# actions.packages dispatcher for Debian clients -# -# Author: Simon Lukasik -# Lukas Durfina -# License: GPLv2 -# -# TODO: Be strict on architectures and package versions -# Staging content -# -# Copyright (c) 2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - -from __future__ import print_function - -import os -import sys -import time -import apt - -sys.path.append("/usr/share/rhn/") -from up2date_client import up2dateLog -from up2date_client import pkgUtils -from up2date_client import rhnPackageInfo - -log = up2dateLog.initLog() - -# file used to keep track of the next time rhn_check -# is allowed to update the package list on the server -LAST_UPDATE_FILE="/var/lib/up2date/dbtimestamp" - -__rhnexport__ = [ - 'update', - 'remove', - 'refresh_list', - 'fullUpdate', - 'checkNeedUpdate', - 'runTransaction', - 'verify' -] - -def remove(package_list, cache_only=None): - """We have been told that we should remove packages""" - if cache_only: - return (0, "no-ops for caching", {}) - if type(package_list) != type([]): - return (13, "Invalid arguments passed to function", {}) - log.log_debug("Called remove_packages", package_list) - - try: - cache = apt.Cache() - cache.update() - cache.open(None) - for pkg in package_list: - try: - package = cache[pkg[0]] - package.mark_delete() - except: - log.log_debug("Failed to remove package", pkg) - return (1, "remove_packages failed", {}) - cache.commit() - return (0, "remove_packages OK", {}) - except: - return (1, "remove_packages failed", {}) - -def update(package_list, cache_only=None): - """We have been told that we should retrieve/install packages""" - if type(package_list) != type([]): - return (13, "Invalid arguments passed to function", {}) - log.log_debug("Called update", package_list) - - try: - cache = apt.Cache() - cache.update() - cache.open(None) - for pkg in package_list: - try: - package = cache[pkg[0]] - if not package.is_installed: - package.mark_install() - else: - package.mark_upgrade() - except: - log.log_debug("Failed to update package", pkg) - return (1, "update failed", {}) - cache.commit() - return (0, "update OK", {}) - except: - return (1, "update failed", {}) - -def fullUpdate(force=0, cache_only=None): - """ Update all packages on the system. """ - log.log_debug("Called packages.fullUpdate") - try: - cache = apt.Cache() - cache.update() - cache.open(None) - cache.upgrade(True) - cache.commit() - except: - return (1, "packages.fullUpdate failed", {}) - return (0, "packages.fullUpdate OK", {}) - -def checkNeedUpdate(rhnsd=None, cache_only=None): - """ Check if the locally installed package list changed, if - needed the list is updated on the server - In case of error avoid pushing data to stay safe - """ - if cache_only: - return (0, "no-ops for caching", {}) - try: - last = os.stat(LAST_UPDATE_FILE)[8] - except: - last = 0 - - # Never update the package list more than once every 1/2 hour - if int(time.time()) - last <= 60: - return (0, "dpkg database not modified since last update (or package " - "list recently updated)", {}) - - if last == 0: - try: - file = open(LAST_UPDATE_FILE, "w+") - file.close() - except: - return (0, "unable to open the timestamp file", {}) - - # call the refresh_list action with a argument so we know it's - # from rhnsd - return refresh_list(rhnsd=1) - - -def refresh_list(rhnsd=None, cache_only=None): - """ push again the list of rpm packages to the server """ - if cache_only: - return (0, "no-ops for caching", {}) - log.log_debug("Called refresh_list") - - try: - rhnPackageInfo.updatePackageProfile() - except: - print("ERROR: refreshing remote package list for System Profile") - return (20, "Error refreshing package list", {}) - - touch_time_stamp() - return (0, "package list refreshed", {}) - -def touch_time_stamp(): - try: - file_d = open(LAST_UPDATE_FILE, "w+") - file_d.close() - except: - return (0, "unable to open the timestamp file", {}) - # Never update the package list more than once every hour. - t = time.time() - try: - os.utime(LAST_UPDATE_FILE, (t, t)) - except: - return (0, "unable to set the time stamp on the time stamp file %s" - % LAST_UPDATE_FILE, {}) - -def verify(packages, cache_only=None): - log.log_debug("Called packages.verify") - if cache_only: - return (0, "no-ops for caching", {}) - - data = {} - data['name'] = "packages.verify" - data['version'] = 0 - ret, missing_packages = pkgUtils.verifyPackages(packages) - - data['verify_info'] = ret - - if len(missing_packages): - data['name'] = "packages.verify.missing_packages" - data['version'] = 0 - data['missing_packages'] = missing_packages - return(43, "packages requested to be verified are missing " - "in the Apt cache", data) - - return (0, "packages verified", data) diff --git a/client/debian/apt-spacewalk/post_invoke.py b/client/debian/apt-spacewalk/post_invoke.py deleted file mode 100755 index 0cfce4d4f35e..000000000000 --- a/client/debian/apt-spacewalk/post_invoke.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/python -# -# DPkg::Post-Invoke hook for updating Debian package profile -# -# Author: Simon Lukasik -# Date: 2011-03-14 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys - -# Once we have the up2date stuff in a site-packages, -# we won't have to do path magic. -import warnings -warnings.filterwarnings("ignore", - message='the md5 module is deprecated; use hashlib instead') -sys.path.append("/usr/share/rhn/") -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors -from up2date_client import rhnserver -from up2date_client import pkgUtils - - -if __name__ == '__main__': - systemid = up2dateAuth.getSystemId() - if systemid: - try: - print("Apt-Spacewalk: Updating package profile") - s = rhnserver.RhnServer() - s.registration.update_packages(systemid, - pkgUtils.getInstalledPackageList(getArch=1)) - except up2dateErrors.RhnServerException as e: - print("Package profile information could not be sent.") - print(str(e)) diff --git a/client/debian/apt-spacewalk/pre_invoke.py b/client/debian/apt-spacewalk/pre_invoke.py deleted file mode 100755 index 8d27f48825e3..000000000000 --- a/client/debian/apt-spacewalk/pre_invoke.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/python -# -# APT::Update::Pre-Invoke hook for updating sources.list -# -# Author: Simon Lukasik -# Date: 2011-03-14 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys -import os -from six.moves.urllib.parse import urlparse -from aptsources import sourceslist -import apt_pkg - -# Once we have the up2date stuff in a site-packages, -# we don't have to do path magic -import warnings -warnings.filterwarnings("ignore", - message='the md5 module is deprecated; use hashlib instead') -sys.path.append('/usr/share/rhn/') -from up2date_client import config -from up2date_client import rhnChannel -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors - - -def get_channels(): - """Return channels associated with a machine""" - try: - channels = ['main'] - for channel in rhnChannel.getChannelDetails(): - if channel['parent_channel']: - channels.append(channel['label']) - return channels - except up2dateErrors.Error: - return [] - -def get_server(): - """Spacewalk server fqdn""" - return urlparse(config.getServerlURL()[0]).netloc - -def get_conf_file(): - """Path to spacewalk.list configuration file""" - apt_pkg.init_config() - directory = apt_pkg.config.get('Dir::Etc::sourceparts', - 'sources.list.d') - if not os.path.isabs(directory): - directory = os.path.join('/etc/apt', directory) - return os.path.join(directory, 'spacewalk.list') - -def update_sources_list(): - sources = sourceslist.SourcesList() - sw_source = [] - for source in sources.list: - if source.uri.startswith('spacewalk://'): - source.set_enabled(False) - sw_source.append(source) - - if up2dateAuth.getSystemId(): - channels = get_channels() - if len(channels): - for source in sw_source: - sources.remove(source) - sources.add(type='deb', - uri='spacewalk://' + get_server(), - dist='channels:', - orig_comps=channels, - file=get_conf_file() - ) - sources.save() - -if __name__ == '__main__': - print("Apt-Spacewalk: Updating sources.list") - update_sources_list() diff --git a/client/debian/apt-spacewalk/pylintrc b/client/debian/apt-spacewalk/pylintrc deleted file mode 100644 index 82ba454c4c46..000000000000 --- a/client/debian/apt-spacewalk/pylintrc +++ /dev/null @@ -1,188 +0,0 @@ -# apt-spacewalk package pylint configuration - -[MASTER] - -# Profiled execution. -profile=no - -# Pickle collected data for later comparisons. -persistent=no - - -[MESSAGES CONTROL] - -# Disable the message(s) with the given id(s). - - -disable=I0011, - C0302, - C0111, - R0801, - R0902, - R0903, - R0904, - R0912, - R0913, - R0914, - R0915, - R0921, - R0922, - W0142, - W0403, - W0603, - C1001, - W0121, - useless-else-on-loop, - bad-whitespace, - unpacking-non-sequence, - superfluous-parens, - cyclic-import, - redefined-variable-type, - no-else-return, - - # Uyuni disabled - E0203, - E0611, - E1101, - E1102 - -# list of disabled messages: -#I0011: 62: Locally disabling R0201 -#C0302: 1: Too many lines in module (2425) -#C0111: 1: Missing docstring -#R0902: 19:RequestedChannels: Too many instance attributes (9/7) -#R0903: Too few public methods -#R0904: 26:Transport: Too many public methods (22/20) -#R0912:171:set_slots_from_cert: Too many branches (59/20) -#R0913:101:GETServer.__init__: Too many arguments (11/10) -#R0914:171:set_slots_from_cert: Too many local variables (38/20) -#R0915:171:set_slots_from_cert: Too many statements (169/50) -#W0142:228:MPM_Package.write: Used * or ** magic -#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog' -#W0603: 72:initLOG: Using the global statement -# for pylint-1.0 we also disable -#C1001: 46, 0: Old-style class defined. (old-style-class) -#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax) -#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop) -# pylint-1.1 checks -#C:334, 0: No space allowed after bracket (bad-whitespace) -#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence) -#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens) -#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens) - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html -output-format=parseable - -# Include message's id in output -include-ids=yes - -# Tells whether to display a full report or only the messages -reports=yes - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" - -[VARIABLES] - -# A regular expression matching names used for dummy variables (i.e. not used). -dummy-variables-rgx=_|dummy - - -[BASIC] - -# Regular expression which should only match correct module names -#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ -module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$ - -# Regular expression which should only match correct module level names -const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-zA-Z0-9_]{,42}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-zA-Z0-9_]{,42}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression which should only match correct class sttribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# List of builtins function names that should not be used, separated by a comma -bad-functions=apply,input - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=10 - -# Maximum number of locals for function / method body -max-locals=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branchs=20 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=1 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[CLASSES] - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=120 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes= diff --git a/client/debian/apt-spacewalk/spacewalk b/client/debian/apt-spacewalk/spacewalk deleted file mode 100755 index d07d1915c30e..000000000000 --- a/client/debian/apt-spacewalk/spacewalk +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -u -# -# The Spacewalk Acquire Method -# -# Author: Simon Lukasik -# Date: 2011-01-01 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys -import os -import re -import hashlib - -import warnings -warnings.filterwarnings("ignore", message="the md5 module is deprecated; use hashlib instead") -sys.path.append("/usr/share/rhn/") - -from six.moves.urllib.parse import urlparse -from rhn.connections import HTTPConnection, HTTPSConnection -from up2date_client import config -from up2date_client import rhnChannel -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors -from rhn.stringutils import bstr - - - -class pkg_acquire_method: - """ - This is slightly modified python variant of apt-pkg/acquire-method. - It is a skeleton class that implements only very basic of apt methods - functionality. - """ - __eof = False - - def __init__(self): - print("100 Capabilities\nVersion: 1.0\nSingle-Instance: true\n\n", end='') - - def __get_next_msg(self): - """ - Apt uses for communication with its methods the text protocol similar - to http. This function parses the protocol messages from stdin. - """ - if self.__eof: - return None - result = {}; - line = sys.stdin.readline() - while line == '\n': - line = sys.stdin.readline() - if not line: - self.__eof = True - return None - s = line.split(" ", 1) - result['_number'] = int(s[0]) - result['_text'] = s[1].strip() - - while not self.__eof: - line = sys.stdin.readline() - if not line: - self.__eof = True - return result - if line == '\n': - return result - s = line.split(":", 1) - result[s[0]] = s[1].strip() - - def __dict2msg(self, msg): - """Convert dictionary to http like message""" - result = "" - for item in list(msg.keys()): - if msg[item] != None: - result += item + ": " + msg[item] + "\n" - return result - - def status(self, **kwargs): - print("102 Status\n%s\n" % self.__dict2msg(kwargs), end='') - - def uri_start(self, msg): - print("200 URI Start\n%s\n" % self.__dict2msg(msg), end='') - - def uri_done(self, msg): - print("201 URI Done\n%s\n" % self.__dict2msg(msg), end='') - - def uri_failure(self, msg): - print("400 URI Failure\n%s\n" % self.__dict2msg(msg), end='') - - def run(self): - """Loop through requests on stdin""" - while True: - msg = self.__get_next_msg() - if msg == None: - return 0 - if msg['_number'] == 600: - try: - self.fetch(msg) - except Exception as e: - self.fail(e.__class__.__name__ + ": " + str(e)) - except up2dateErrors.Error as e: - self.fail(e.__class__.__name__ + ": " + str(e)) - else: - return 100 - - - -def get_ssl_ca_cert(up2date_cfg): - if not ('sslCACert' in up2date_cfg and up2date_cfg['sslCACert']): - raise BadSslCaCertConfig - - ca_certs = up2date_cfg['sslCACert'] - if type(ca_certs) == list: - return ca_certs - return [ca_certs] - - - -class spacewalk_method(pkg_acquire_method): - """ - Spacewalk acquire method - """ - up2date_cfg = None - login_info = None - current_url = None - svr_channels = None - http_headers = None - base_channel = None - conn = None - not_registered_msg = 'This system is not registered with the spacewalk server' - - def fail(self, message = not_registered_msg): - self.uri_failure({'URI': self.uri, - 'Message': message}) - - - def __load_config(self): - if self.up2date_cfg == None: - self.up2date_cfg = config.initUp2dateConfig() - self.up2date_server = urlparse(config.getServerlURL()[0]) - # TODO: proxy settings - - - def __login(self): - if self.login_info == None: - self.status(URI = self.uri, Message = 'Logging into the spacewalk server') - self.login_info = up2dateAuth.getLoginInfo() - if not self.login_info: - raise up2date_client.AuthenticationError(self.not_registered_msg) - self.status(URI = self.uri, Message = 'Logged in') - - - def __init_channels(self): - if self.svr_channels == None: - self.svr_channels = rhnChannel.getChannelDetails() - for channel in self.svr_channels: - if channel['parent_channel'] == '': - self.base_channel = channel['label'] - - - def __init_headers(self): - if self.http_headers == None: - rhn_needed_headers = ['X-RHN-Server-Id', - 'X-RHN-Auth-User-Id', - 'X-RHN-Auth', - 'X-RHN-Auth-Server-Time', - 'X-RHN-Auth-Expire-Offset'] - self.http_headers = {}; - for header in rhn_needed_headers: - if header not in self.login_info: - raise up2date_client.AuthenticationError( - "Missing required login information %s" % (header)) - self.http_headers[header] = self.login_info[header] - self.http_headers['X-RHN-Transport-Capability'] = 'follow-redirects=3' - - - def __make_conn(self): - if self.conn == None: - if self.up2date_server.scheme == 'http' \ - or self.up2date_cfg['useNoSSLForPackages'] == 1: - self.conn = HTTPConnection(self.up2date_server.netloc) - else: - self.conn = HTTPSConnection(self.up2date_server.netloc, - trusted_certs=get_ssl_ca_cert(self.up2date_cfg)) - - - def __transform_document(self, document): - """Transform url given by apt to real spacewalk url""" - document = document.replace('dists/channels:/main/', - 'dists/channels:/' + self.base_channel + '/', 1) - document = re.sub('/binary-[\d\w]*/', '/repodata/', document, 1) - document = document.replace('dists/channels:/', '/XMLRPC/GET-REQ/', 1) - return document - - - def fetch(self, msg): - """ - Fetch the content from spacewalk server to the file. - - Acording to the apt protocol msg must contain: 'URI' and 'Filename'. - Other possible keys are: 'Last-Modified', 'Index-File', 'Fail-Ignore' - """ - self.uri = msg['URI'] - self.uri_parsed = urlparse(msg['URI']) - self.filename = msg['Filename'] - - self.__load_config() - if self.uri_parsed.netloc != self.up2date_server.netloc: - return self.fail() - self.__login() - self.__init_channels() - - document = self.__transform_document(self.uri_parsed.path) - - self.__init_headers() - self.__make_conn() - - hdrs = self.http_headers; - # check is partially downloaded file present - if os.path.isfile(self.filename): - fsize = os.stat(self.filename).st_size - if fsize > 0: - # resume aborted download by requesting tail of the file - # using Range HTTP header - hdrs['Range'] = 'bytes=' + str(fsize) + '-' - - self.conn.request("GET", "/" + document, headers = hdrs) - self.status(URI = self.uri, Message = 'Waiting for headers') - - res = self.conn.getresponse() - - if res.status == 200: - f = open(self.filename, "wb") - elif res.status == 206: - f = open(self.filename, "ab") - else: - self.uri_failure({'URI': self.uri, - 'Message': str(res.status) + ' ' + res.reason, - 'FailReason': 'HttpError' + str(res.status)}) - while True: - data = res.read(4096) - if not len(data): break - res.close() - return - - self.uri_start({'URI': self.uri, - 'Size': res.getheader('content-length'), - 'Last-Modified': res.getheader('last-modified')}) - - while True: - data = res.read(4096) - if not len(data): - break - f.write(data) - res.close() - f.close() - - f = open(self.filename, "r") - hash_sha256 = hashlib.sha256() - hash_md5 = hashlib.md5() - fsize = 0 - while True: - data = f.read(4096) - if not len(data): - break - fsize += len(data) - hash_sha256.update(data) - hash_md5.update(data) - f.close() - - self.uri_done({'URI': self.uri, - 'Filename': self.filename, - 'Size': str(fsize), - 'Last-Modified': res.getheader('last-modified'), - 'MD5-Hash': hash_md5.hexdigest(), - 'MD5Sum-Hash': hash_md5.hexdigest(), - 'SHA256-Hash': hash_sha256.hexdigest()}) - - - def __del__(self): - if self.conn: - self.conn.close() - - - -if __name__ == '__main__': - try: - method = spacewalk_method() - ret = method.run() - sys.exit(ret) - except KeyboardInterrupt: - pass diff --git a/client/debian/apt-spacewalk/src/apt-spacewalk.changes b/client/debian/apt-spacewalk/src/apt-spacewalk.changes deleted file mode 100644 index f55d5c2f0c72..000000000000 --- a/client/debian/apt-spacewalk/src/apt-spacewalk.changes +++ /dev/null @@ -1,9 +0,0 @@ -------------------------------------------------------------------- -Fri Apr 20 14:24:42 CEST 2012 - mc@suse.de - -- version 1.0.4.1-1 -- The method can be killed by the keyboard interrupt -- Introducing actions.packages dispatcher -- Do not use rpmUtils on Debian -- Skip the extra lines sent by Apt - diff --git a/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch b/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch deleted file mode 100644 index 68a00e1a5ee0..000000000000 --- a/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 6698dabbd45a2d93199d2d3d5abb84f8c260667d Mon Sep 17 00:00:00 2001 -From: Sandro Tosi -Date: Tue, 5 Dec 2017 18:52:09 -0500 -Subject: Stop linking with libxml2mod - -Copy the two funcions used instead of linking with libxml2mod. - -Author: Adrian Bunk ---- - src/dmidecodemodule.c | 27 ++++++++++++++++++++++++++- - src/setup_common.py | 3 --- - 2 files changed, 26 insertions(+), 4 deletions(-) - -diff --git a/src/dmidecodemodule.c b/src/dmidecodemodule.c -index b31c002..007a892 100644 ---- a/src/dmidecodemodule.c -+++ b/src/dmidecodemodule.c -@@ -42,7 +42,6 @@ - #include - - #include --#include "libxml_wrap.h" - - #include "dmidecodemodule.h" - #include "dmixml.h" -@@ -64,6 +63,32 @@ char *PyUnicode_AsUTF8(PyObject *unicode) { - } - #endif - -+static PyObject * -+libxml_xmlDocPtrWrap(xmlDocPtr doc) -+{ -+ PyObject *ret; -+ -+ if (doc == NULL) { -+ Py_INCREF(Py_None); -+ return (Py_None); -+ } -+ ret = PyCapsule_New((void *) doc, (char *) "xmlDocPtr", NULL); -+ return (ret); -+} -+ -+static PyObject * -+libxml_xmlNodePtrWrap(xmlNodePtr node) -+{ -+ PyObject *ret; -+ -+ if (node == NULL) { -+ Py_INCREF(Py_None); -+ return (Py_None); -+ } -+ ret = PyCapsule_New((void *) node, (char *) "xmlNodePtr", NULL); -+ return (ret); -+} -+ - static void init(options *opt) - { - opt->devmem = DEFAULT_MEM_DEV; -diff --git a/src/setup_common.py b/src/setup_common.py -index aec1f9b..6b678ef 100644 ---- a/src/setup_common.py -+++ b/src/setup_common.py -@@ -68,9 +68,6 @@ def libxml2_lib(libdir, libs): - elif l.find('-l') == 0: - libs.append(l.replace("-l", "", 1)) - -- # this library is not reported and we need it anyway -- libs.append('xml2mod') -- - - - # Get version from src/version.h diff --git a/client/debian/python-dmidecode/python-dmidecode.spec b/client/debian/python-dmidecode/python-dmidecode.spec deleted file mode 100644 index 452b5f031198..000000000000 --- a/client/debian/python-dmidecode/python-dmidecode.spec +++ /dev/null @@ -1,324 +0,0 @@ -%if %{_vendor} == "debbuild" -# Debian points /bin/sh to /bin/dash by default. This breaks a lot of common -# scripts that rely on bash-specific behavior, so changing the shell preempts -# a lot of these breakages. -%global _buildshell /bin/bash -%endif - - -# Setuptools install flags -%if %{_vendor} == "debbuild" -%global pyinstflags --no-compile -O0 -%global pytargetflags --install-layout=deb -%else -%global pyinstflags -O1 -%global pytargetflags %{nil} -%endif - -# For systems (mostly debian) that don't define these things ------------------- -%{!?__python2:%global __python2 /usr/bin/python2} -%{!?__python3:%global __python3 /usr/bin/python3} - -%if %{undefined python2_sitearch} -%global python2_sitearch %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))") -%endif - -%if %{undefined python3_sitearch} -%global python3_sitearch %(%{__python3} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))") -%endif - -%{!?py3dir: %global py3dir %{_builddir}/python3-%{name}-%{version}-%{release}} - -# ----------------------------------------------------------------------------- - - -Name: python-dmidecode -Summary: Python module to access DMI data -Version: 3.12.2 -Release: 10%{?dist} - -%if %{_vendor} == "debbuild" -Packager: Neal Gompa -License: GPL-2.0 -Group: python -%else -License: GPLv2 -Group: System Environment/Libraries -%endif -URL: https://github.com/nima/python-dmidecode -Source0: https://github.com/nima/%{name}/archive/v%{version}/%{name}-%{version}.tar.gz - -Patch666: Stop-linking-with-libxml2mod.patch - -%if %{_vendor} == "debbuild" -BuildRequires: libxml2-dev - -BuildRequires: python-dev -BuildRequires: python-libxml2 - -BuildRequires: python3-dev -BuildRequires: python3-libxml2 -%else -BuildRequires: libxml2-devel - -BuildRequires: python2-devel -BuildRequires: libxml2-python - -BuildRequires: python3-devel -BuildRequires: libxml2-python3 -%endif - -%description -python-dmidecode is a Python extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as python data structures or as XML data using libxml2. - -%package -n python2-dmidecode -Summary: Python 2 module to access DMI data -%if %{_vendor} == "debbuild" -Requires: python-libxml2 -# Replaces Debian's python-dmidecode -Provides: python-dmidecode -Obsoletes: python-dmidecode -# For scriptlets -Requires(preun): python-minimal -Requires(post): python-minimal -%else -Requires: libxml2-python -%endif -%{?python_provide:%python_provide python2-dmidecode} - -%description -n python2-dmidecode -python2-dmidecode is a Python 2 extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as python data structures or as XML data using libxml2. - -%package -n python3-dmidecode -Summary: Python 3 module to access DMI data -%if %{_vendor} == "debbuild" -Requires: python3-libxml2 -# For scriptlets -Requires(preun): python3-minimal -Requires(post): python3-minimal -%else -Requires: libxml2-python3 -%endif - -%description -n python3-dmidecode -python3-dmidecode is a Python 3 extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as Python 3 data structures or as XML data using libxml2. - - - -%prep -%setup -qc - -%if %{_vendor} == "debbuild" -# Apply patches for debian -pushd %{name}-%{version} -%patch666 -p1 -popd -%endif - -mv %{name}-%{version} python2 -cp -a python{2,3} - -pushd python3 -sed -i 's/python2/python3/g' Makefile unit-tests/Makefile -popd - - -%build -# Not to get undefined symbol: dmixml_GetContent -export CFLAGS="${CFLAGS-} -std=gnu89" - - -for PY in python2 python3; do - pushd $PY - make build - popd -done - -%install -pushd python2 -%{__python2} src/setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags} --prefix=%{_prefix} -popd - -pushd python3 -%{__python3} src/setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags} --prefix=%{_prefix} -popd - - -%if %{_vendor} != "debbuild" -%check -for PY in python2 python3; do - pushd $PY/unit-tests - make - popd -done -%endif - -%files -n python2-dmidecode -%license python2/doc/LICENSE python2/doc/AUTHORS python2/doc/AUTHORS.upstream -%doc python2/README python2/doc/README.upstream -%{python2_sitearch}/* -%{_datadir}/python-dmidecode/ - -%files -n python3-dmidecode -%license python3/doc/LICENSE python3/doc/AUTHORS python3/doc/AUTHORS.upstream -%doc python3/README python3/doc/README.upstream -%{python3_sitearch}/* -%{_datadir}/python-dmidecode/ - -%if %{_vendor} == "debbuild" - -%post -n python2-dmidecode -# Do late-stage bytecompilation, per debian policy -pycompile -p python2-dmidecode -V -3.0 - -%preun -n python2-dmidecode -# Ensure all *.py[co] files are deleted, per debian policy -pyclean -p python2-dmidecode - -%post -n python3-dmidecode -# Do late-stage bytecompilation, per debian policy -py3compile -p python3-dmidecode -V -4.0 - -%preun -n python3-dmidecode -# Ensure all *.py[co] files are deleted, per debian policy -py3clean -p python3-dmidecode - -%endif - - -%changelog -* Fri Jul 06 2018 Neal Gompa - 3.12.2-10 -- Add Debian/Ubuntu support - -* Sat Aug 19 2017 Zbigniew Jędrzejewski-Szmek - 3.12.2-9 -- Python 2 binary package renamed to python2-dmidecode - See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3 - -* Thu Aug 03 2017 Fedora Release Engineering - 3.12.2-8 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 3.12.2-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Sat Feb 11 2017 Fedora Release Engineering - 3.12.2-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Mon Dec 19 2016 Miro Hrončok - 3.12.2-5 -- Rebuild for Python 3.6 - -* Tue Jul 19 2016 Fedora Release Engineering - 3.12.2-4 -- https://fedoraproject.org/wiki/Changes/Automatic_Provides_for_Python_RPM_Packages - -* Thu Feb 04 2016 Fedora Release Engineering - 3.12.2-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Sat Nov 07 2015 Robert Kuska - 3.12.2-2 -- Rebuilt for Python3.5 rebuild - -* Fri Jul 10 2015 Miro Hrončok - 3.12.2-1 -- Update to 3.12.2 -- Add Python 3 subpackage (#1236000) -- Removed deprecated statements -- Moved some docs to license -- Removed pacthes -- Corrected bogus dates in %%changelog -- Build with -std=gnu89 - -* Thu Jun 18 2015 Fedora Release Engineering - 3.10.13-13 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Sun Aug 17 2014 Fedora Release Engineering - 3.10.13-12 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Sat Jun 07 2014 Fedora Release Engineering - 3.10.13-11 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Sun Aug 04 2013 Fedora Release Engineering - 3.10.13-10 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild - -* Thu Jun 20 2013 Ales Ledvinka - 3.10.13-9 -- Attribute installed may appear as duplicate and cause invalid XML. - -* Mon Jun 17 2013 Ales Ledvinka - 3.10.13-8 -- Attribute dmispec may cause invalid XML on some hardware. -- Signal handler for SIGILL. - -* Thu Feb 14 2013 Fedora Release Engineering - 3.10.13-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild - -* Sat Jul 21 2012 Fedora Release Engineering - 3.10.13-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Thu Jul 19 2012 Ales Ledvinka 3.10.14-5 -- Upstream relocated. Document source tag and tarball generation. - -* Sat Jan 14 2012 Fedora Release Engineering - 3.10.13-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild - -* Tue Feb 08 2011 Fedora Release Engineering - 3.10.13-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild - -* Thu Jul 22 2010 David Malcolm - 3.10.13-2 -- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild - -* Tue Jun 15 2010 Roman Rakus - 3.10.13-1 -- Update to new release - -* Fri Mar 12 2010 Nima Talebi - 3.10.12-1 -- Update to new release - -* Tue Feb 16 2010 Nima Talebi - 3.10.11-1 -- Update to new release - -* Tue Jan 12 2010 Nima Talebi - 3.10.10-1 -- Update to new release - -* Thu Jan 07 2010 Nima Talebi - 3.10.9-1 -- Update to new release - - -* Tue Dec 15 2009 Nima Talebi - 3.10.8-1 -- New Upstream release. -- Big-endian and little-endian approved. -- Packaged unit-test to tarball. -- Rewritten unit-test to be able to run as non-root user, where it will not - try to read /dev/mem. -- Added two dmidump data files to the unit-test. - -* Thu Nov 26 2009 David Sommerseth - 3.10.7-3 -- Fixed even more .spec file issues and removed explicit mentioning - of /usr/share/python-dmidecode/pymap.xml - -* Wed Nov 25 2009 David Sommerseth - 3.10.7-2 -- Fixed some .spec file issues (proper Requires, use _datadir macro) - -* Wed Sep 23 2009 Nima Talebi - 3.10.7-1 -- Updated source0 to new 3.10.7 tar ball - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-6 -- Only build the python-dmidecode module, not everything - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-5 -- Added missing BuildRequres for libxml2-python - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-4 -- Added missing BuildRequres for python-devel - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-3 -- Added missing BuildRequres for libxml2-devel - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-2 -- Updated release, to avoid build conflict - -* Wed Jun 10 2009 David Sommerseth - 3.10.6-1 -- Updated to work with the new XML based python-dmidecode - -* Sat Mar 7 2009 Clark Williams - 2.10.3-1 -- Initial build. - diff --git a/client/debian/python-hwdata/python-hwdata.spec b/client/debian/python-hwdata/python-hwdata.spec deleted file mode 100644 index 85f64be1a59e..000000000000 --- a/client/debian/python-hwdata/python-hwdata.spec +++ /dev/null @@ -1,255 +0,0 @@ -%if 0%{?fedora} || 0%{?rhel} > 7 -# Enable python3 build by default -%bcond_without python3 -%else -%bcond_with python3 -%endif - -%if 0%{?rhel} > 7 || 0%{?fedora} > 29 -# Disable python2 build by default -%bcond_with python2 -%else -%bcond_without python2 -%endif - -%if %{_vendor} == "debbuild" -# Debian points /bin/sh to /bin/dash by default. This breaks a lot of common -# scripts that rely on bash-specific behavior, so changing the shell preempts -# a lot of these breakages. -%global _buildshell /bin/bash -%endif - - -# Setuptools install flags -%if %{_vendor} == "debbuild" -%global pyinstflags --no-compile -O0 -%global pytargetflags --install-layout=deb -%else -%global pyinstflags -O1 -%global pytargetflags %{nil} -%endif - -# For systems (mostly debian) that don't define these things ------------------- -%{!?__python2:%global __python2 /usr/bin/python2} -%{!?__python3:%global __python3 /usr/bin/python3} - -%if %{undefined python2_sitelib} -%global python2_sitelib %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -%if %{undefined python3_sitelib} -%global python3_sitelib %(%{__python3} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -%{!?py2_build: %global py2_build CFLAGS="%{optflags}" %{__python2} setup.py build} -%{!?py2_install: %global py2_install %{__python2} setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags}} -%{!?py3_build: %global py3_build CFLAGS="%{optflags}" %{__python3} setup.py build} -%{!?py3_install: %global py3_install %{__python3} setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags}} - -%{!?py3dir: %global py3dir %{_builddir}/python3-%{name}-%{version}-%{release}} - -# ----------------------------------------------------------------------------- - -# tito tags with version-release -%global origrel 1 - -Name: python-hwdata -Version: 2.3.7 -Release: 1%{?dist} -Summary: Python bindings to hwdata package -%if %{_vendor} == "debbuild" -Group: python -Packager: Miroslav Suchý -%else -Group: Development/Libraries -%endif -BuildArch: noarch -License: GPLv2 -URL: https://github.com/xsuchy/python-hwdata -Source0: https://github.com/xsuchy/%{name}/archive/%{name}-%{version}-%{origrel}.tar.gz - -%description -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -%if %{with python2} -%package -n python2-hwdata -Summary: Python bindings to hwdata package - -%if %{_vendor} == "debbuild" -BuildRequires: python-dev -Requires(preun): python-minimal -Requires(post): python-minimal -%else -BuildRequires: python2-devel -%endif - -Requires: hwdata -%{?python_provide:%python_provide python2-hwdata} -%if 0%{?rhel} < 8 -Provides: python-hwdata = %{version}-%{release} -%endif - -%description -n python2-hwdata -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -This is the Python 2 build of the module. - -%endif # with python2 - -%if %{with python3} -%package -n python3-hwdata -Summary: Python bindings to hwdata package - -%if %{_vendor} == "debbuild" -BuildRequires: python3-dev -BuildRequires: pylint3 -Requires(preun): python3-minimal -Requires(post): python3-minimal -%else -BuildRequires: python3-devel -BuildRequires: python3-pylint -%endif -Requires: hwdata -%{?python_provide:%python_provide python3-hwdata} - -%description -n python3-hwdata -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -This is the Python 3 build of the module. -%endif # with python3 - -%prep -%setup -q -n %{name}-%{name}-%{version}-%{origrel} - -%if %{with python3} -rm -rf %{py3dir} -cp -a . %{py3dir} -%endif # with python3 - -%build -%if %{with python2} -%py2_build -%endif # with python2 - -%if %{with python3} -pushd %{py3dir} -%py3_build -popd -%endif # with python3 - -%install -%if %{with python2} -%py2_install -%endif # with python2 - -%if %{with python3} -pushd %{py3dir} -%py3_install -popd -%endif # with python3 - -%if %{_vendor} != "debbuild" -%check -%if %{with python3} -pylint-3 hwdata.py example.py || : -%endif # with python3 -%endif - -%if %{with python2} -%files -n python2-hwdata -%license LICENSE -%doc README.md example.py -%doc html -%{python2_sitelib}/* -%endif # with python2 - -%if %{with python3} -%files -n python3-hwdata -%license LICENSE -%doc README.md example.py -%doc html -%{python3_sitelib}/* -%endif # with python3 - -%if %{_vendor} == "debbuild" - -%if %{with python2} -%post -n python2-hwdata -# Do late-stage bytecompilation, per debian policy -pycompile -p python2-hwdata -V -3.0 - -%preun -n python2-hwdata -# Ensure all *.py[co] files are deleted, per debian policy -pyclean -p python2-hwdata -%endif - -%if %{with python3} -%post -n python3-hwdata -# Do late-stage bytecompilation, per debian policy -py3compile -p python3-hwdata -V -4.0 - -%preun -n python3-hwdata -# Ensure all *.py[co] files are deleted, per debian policy -py3clean -p python3-hwdata -%endif - -%endif - -%changelog -* Tue Jun 12 2018 Dalton Miner 2.3.7-2 -- Updating packaging for debian systems - -* Fri Mar 23 2018 Miroslav Suchý 2.3.7-1 -- remove python2 subpackage for F30+ - -* Mon Feb 12 2018 Miroslav Suchý 2.3.6-1 -- Update Python 2 dependency declarations to new packaging standards - -* Wed Aug 09 2017 Miroslav Suchý 2.3.5-1 -- create python2-hwdata subpackage -- use dnf instead of yum in README -- remove rhel5 compatibilities from spec - -* Thu Sep 22 2016 Miroslav Suchý 2.3.4-1 -- run pylint in %%check -- require hwdata in python 3 package too (jdobes@redhat.com) -- implement PNP interface -- errors in usb.ids should not be fatal -- change upstream url in setup.py - -* Wed Jan 28 2015 Miroslav Suchý 2.3.3-1 -- upstream location changed - -* Wed Jan 28 2015 Miroslav Suchý -- move upstream location - -* Wed Dec 04 2013 Miroslav Suchý 1.10.1-1 -- create python3-hwdata subpackage -- Bumping package versions for 1.9 -- %%defattr is not needed since rpm 4.4 - -* Fri Mar 02 2012 Miroslav Suchý 1.7.3-1 -- 798375 - fix PCI device name translation (Joshua.Roys@gtri.gatech.edu) -- use setup from distutils - -* Fri Mar 02 2012 Jan Pazdziora 1.7.2-1 -- Update the copyright year info. - -* Fri Mar 02 2012 Jan Pazdziora 1.7.1-1 -- correct indentation (mzazrivec@redhat.com) - -* Mon Oct 31 2011 Miroslav Suchý 1.6.2-1 -- point URL to specific python-hwdata page - -* Fri Jul 22 2011 Jan Pazdziora 1.6.1-1 -- We only support version 14 and newer of Fedora, removing conditions for old - versions. - -* Mon Apr 26 2010 Miroslav Suchý 1.2-1 -- 585138 - change %%files section and patial support for python3 - -* Fri Apr 23 2010 Miroslav Suchý 1.1-1 -- initial release From cec4cd88e21e9ccdc2f2060e70d18b074e40d98d Mon Sep 17 00:00:00 2001 From: Ondrej Holecek Date: Mon, 7 Aug 2023 15:53:37 +0200 Subject: [PATCH 73/80] Check for configured rhn.conf instead of rpm run status Under some circumstances like updating older unconfigured SUMA system, tomcat.xml was not configured properly as source tomcat.xml was not configured. Instead of relying if we are doing rpm update or new install, check status or rhn.conf file. --- ...lk-setup.changes.oholecek.tomcat-install-check | 1 + spacewalk/setup/spacewalk-setup.spec | 15 +++++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) create mode 100644 spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check diff --git a/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check b/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check new file mode 100644 index 000000000000..808a0993a282 --- /dev/null +++ b/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check @@ -0,0 +1 @@ +- Do not rely on rpm runtime status, rather check rhn.conf if is configured (bsc#1210935) diff --git a/spacewalk/setup/spacewalk-setup.spec b/spacewalk/setup/spacewalk-setup.spec index ef4e91bbc2ea..6eb81b161634 100644 --- a/spacewalk/setup/spacewalk-setup.spec +++ b/spacewalk/setup/spacewalk-setup.spec @@ -194,18 +194,17 @@ install -Dd -m 0755 %{buildroot}%{_prefix}/share/salt-formulas/states install -Dd -m 0755 %{buildroot}%{_prefix}/share/salt-formulas/metadata %post -if [ $1 == 1 -a -e /etc/tomcat/server.xml ]; then -#just during new installation. during upgrade the changes are already applied +if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then + # rhn.conf is configured, this is an upgrade + # during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE - xsltproc %{_datadir}/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml -fi - -if [ $1 == 2 -a -e /etc/tomcat/server.xml ]; then -#during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained + xsltproc %{_datadir}/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml +else + # rhn.conf does not exists or is empty, this is new installation or update of new installation CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE - xsltproc %{_datadir}/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + xsltproc %{_datadir}/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml fi if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then From 0bba07a992fbd1dc2427359dd7d2b7e851f3d09b Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 23 Aug 2023 16:58:17 +0200 Subject: [PATCH 74/80] move APACHE_MODULES configuration in mgr-setup --- python/spacewalk/spacewalk-backend.spec | 9 --------- susemanager/bin/mgr-setup | 5 +++++ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index 8915be87d87f..0cc2506ad923 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -351,15 +351,6 @@ install -Dd -m 0750 % $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver %{__install} satellite_tools/spacewalk-uln-resolver $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver/spacewalk-uln-resolver %{__install} satellite_tools/spacewalk-extra-http-headers $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver/spacewalk-extra-http-headers - -%post server -%if 0%{?suse_version} -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi -%endif -if [ ! -e %{rhnconf}/rhn.conf ]; then - exit 0 -fi - %pre tools %if !0%{?rhel} %service_add_pre spacewalk-diskcheck.service spacewalk-diskcheck.timer diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 797a0dc45706..bf0e49e121cd 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -408,6 +408,9 @@ if [ -f $MANAGER_COMPLETE ]; then fi } +setup_apache() { + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi +} setup_spacewalk() { CERT_COUNTRY=`echo -n $CERT_COUNTRY|tr '[:lower:]' '[:upper:]'` @@ -908,6 +911,8 @@ do_setup() { setup_spacewalk + setup_apache + # In the container case, we have the MIRROR_PATH environment variable at setup if [ -n "$MIRROR_PATH" ]; then echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf From b1618447abce9037d61f2a194a23fe1bc3731630 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 23 Aug 2023 17:00:39 +0200 Subject: [PATCH 75/80] move cobbler setup to mgr-setup --- susemanager/bin/mgr-setup | 13 +++++++++++++ .../susemanager-tftpsync/susemanager-tftpsync.spec | 9 --------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index bf0e49e121cd..ab11aacba1a5 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -411,6 +411,17 @@ fi setup_apache() { sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi } + +setup_cobbler() { + if [ -f "/etc/cobbler/settings" ]; then + if ! grep "tftpsync_timeout:" /etc/cobbler/settings >/dev/null; then + echo "" >> /etc/cobbler/settings + echo "tftpsync_timeout: 15" >> /etc/cobbler/settings + echo "" >> /etc/cobbler/settings + fi + fi +} + setup_spacewalk() { CERT_COUNTRY=`echo -n $CERT_COUNTRY|tr '[:lower:]' '[:upper:]'` @@ -912,6 +923,8 @@ do_setup() { setup_spacewalk setup_apache + + setup_cobbler # In the container case, we have the MIRROR_PATH environment variable at setup if [ -n "$MIRROR_PATH" ]; then diff --git a/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec b/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec index ccd3ee2a885d..c80f508aeb11 100644 --- a/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec +++ b/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec @@ -78,15 +78,6 @@ install -p -D -m 755 configure-tftpsync.sh %{buildroot}%{_sbindir}/configure-tf %endif %endif -%post -if [ -f "/etc/cobbler/settings" ]; then - if ! grep "tftpsync_timeout:" /etc/cobbler/settings >/dev/null; then - echo "" >> /etc/cobbler/settings - echo "tftpsync_timeout: 15" >> /etc/cobbler/settings - echo "" >> /etc/cobbler/settings - fi -fi - %files %defattr(-,root,root,-) %doc COPYING.LIB README From 5d1e7560170a21bfc3a2b5833a5c222b3a13d3d6 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 23 Aug 2023 17:34:04 +0200 Subject: [PATCH 76/80] move config file to mgr-setup --- spacewalk/setup/spacewalk-setup.spec | 15 -------------- susemanager/bin/mgr-setup | 29 ++++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/spacewalk/setup/spacewalk-setup.spec b/spacewalk/setup/spacewalk-setup.spec index 6eb81b161634..d60cec3ec2ef 100644 --- a/spacewalk/setup/spacewalk-setup.spec +++ b/spacewalk/setup/spacewalk-setup.spec @@ -200,21 +200,6 @@ if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE xsltproc %{_datadir}/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml -else - # rhn.conf does not exists or is empty, this is new installation or update of new installation - CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") - cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE - xsltproc %{_datadir}/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml -fi - -if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then - chgrp www /etc/zypp/credentials.d/SCCcredentials - chmod g+r /etc/zypp/credentials.d/SCCcredentials -fi - -if [ -d /var/cache/salt/master/thin ]; then - # clean the thin cache - rm -rf /var/cache/salt/master/thin fi # sudoers file is now in /etc/sudoers.d/spacewalk diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index ab11aacba1a5..5e01f5292258 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -412,6 +412,28 @@ setup_apache() { sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi } +setup_tomcat { + if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then + # rhn.conf is configured, this is an upgrade + # during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained + CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") + cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE + xsltproc /usr/share/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + else + # rhn.conf does not exists or is empty, this is new installation or update of new installation + CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") + cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE + xsltproc /usr/share/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + fi +} + +change_SSCcredentials_permission { + if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then + chgrp www /etc/zypp/credentials.d/SCCcredentials + chmod g+r /etc/zypp/credentials.d/SCCcredentials + fi +} + setup_cobbler() { if [ -f "/etc/cobbler/settings" ]; then if ! grep "tftpsync_timeout:" /etc/cobbler/settings >/dev/null; then @@ -420,6 +442,9 @@ setup_cobbler() { echo "" >> /etc/cobbler/settings fi fi + if grep 'authn_spacewalk' /etc/cobbler/modules.conf > /dev/null 2>&1; then + sed -i 's/module = authn_spacewalk/module = authentication.spacewalk/' /etc/cobbler/modules.conf + fi } setup_spacewalk() { @@ -921,10 +946,10 @@ do_setup() { fi setup_spacewalk - setup_apache - + setup_tomcat setup_cobbler + change_SSCcredentials_permission # In the container case, we have the MIRROR_PATH environment variable at setup if [ -n "$MIRROR_PATH" ]; then From dc18c7193931f8c8053182218b66298fa89783c4 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Wed, 23 Aug 2023 17:44:02 +0200 Subject: [PATCH 77/80] move config file from spacewalk-config to mgr-setup --- spacewalk/config/spacewalk-config.spec | 15 -------------- susemanager/bin/mgr-setup | 28 +++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index 3b7f7dbce642..40bd6685bac1 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -166,21 +166,6 @@ if [ $1 -eq 2 ] ; then fi fi -%if 0%{?suse_version} -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES version -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_ajp -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_wstunnel -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES rewrite -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES headers -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES xsendfile -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES filter -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES deflate -sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS SSL -sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS ISSUSE -sysconf_addword -r /etc/sysconfig/apache2 APACHE_MODULES access_compat -%endif - # sudo is reading every file here! So ensure we do not have duplicate definitions! if [ -e /etc/sudoers.d/spacewalk.rpmsave ]; then mv /etc/sudoers.d/spacewalk.rpmsave /root/sudoers-spacewalk.save diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 5e01f5292258..b7d05c95ceef 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -408,8 +408,31 @@ if [ -f $MANAGER_COMPLETE ]; then fi } +backup_certificates() { + # we want to remove the cert from the package. + # copy the cert to a backup place to restore them later + if [ -L /etc/pki/tls/certs/spacewalk.crt ]; then + cp /etc/pki/tls/certs/spacewalk.crt /etc/pki/tls/certs/uyuni.crt + fi + if [ -L /etc/pki/tls/private/spacewalk.key ]; then + cp /etc/pki/tls/private/spacewalk.key /etc/pki/tls/private/uyuni.key + fi +} + setup_apache() { sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES version + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_ajp + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_wstunnel + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES rewrite + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES headers + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES xsendfile + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES filter + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES deflate + sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS SSL + sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS ISSUSE + sysconf_addword -r /etc/sysconfig/apache2 APACHE_MODULES access_compat } setup_tomcat { @@ -846,6 +869,7 @@ do_migration() { cleanup_hostname remove_ssh_key + if [ -d /root/.ssh.new ]; then mv /root/.ssh /root/.ssh.orig mv /root/.ssh.new /root/.ssh @@ -950,7 +974,9 @@ do_setup() { setup_tomcat setup_cobbler change_SSCcredentials_permission - + + backup_certificates + # In the container case, we have the MIRROR_PATH environment variable at setup if [ -n "$MIRROR_PATH" ]; then echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf From 2ade35ddbda8b10943ae3cefd8805171d6b37f56 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 24 Aug 2023 09:13:04 +0200 Subject: [PATCH 78/80] move tftp permission configuration to mgr-setup --- susemanager/bin/mgr-setup | 10 ++++++++++ susemanager/susemanager.spec | 12 ------------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index b7d05c95ceef..5f84fee6b8b4 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -408,6 +408,14 @@ if [ -f $MANAGER_COMPLETE ]; then fi } +setup_tftp_permission() { + if [ ! -d /srv/tftpboot ]; then + mkdir -p /srv/tftpboot + chmod 750 /srv/tftpboot + chown wwwrun:tftp /srv/tftpboot + fi + } + backup_certificates() { # we want to remove the cert from the package. # copy the cert to a backup place to restore them later @@ -977,6 +985,8 @@ do_setup() { backup_certificates + setup_tftp_permission + # In the container case, we have the MIRROR_PATH environment variable at setup if [ -n "$MIRROR_PATH" ]; then echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index ec684d3c25cc..986a0a07798c 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -248,18 +248,6 @@ popd %post POST_ARG=$1 -if [ -f /etc/sysconfig/atftpd ]; then - . /etc/sysconfig/atftpd - if [ $ATFTPD_DIRECTORY = "/tftpboot" ]; then - sysconf_addword -r /etc/sysconfig/atftpd ATFTPD_DIRECTORY "/tftpboot" - sysconf_addword /etc/sysconfig/atftpd ATFTPD_DIRECTORY "%{serverdir}/tftpboot" - fi -fi -if [ ! -d %{serverdir}/tftpboot ]; then - mkdir -p %{serverdir}/tftpboot - chmod 750 %{serverdir}/tftpboot - chown %{apache_user}:%{tftp_group} %{serverdir}/tftpboot -fi # XE appliance overlay file created this with different user chown root.root /etc/sysconfig if [ $POST_ARG -eq 2 ] ; then From 7ed7609a9720db4b0f06b48e9ce2f94bcf43f501 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Thu, 24 Aug 2023 11:06:46 +0200 Subject: [PATCH 79/80] fix typo --- susemanager/bin/mgr-setup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 5f84fee6b8b4..709444648f83 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -443,7 +443,7 @@ setup_apache() { sysconf_addword -r /etc/sysconfig/apache2 APACHE_MODULES access_compat } -setup_tomcat { +setup_tomcat() { if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then # rhn.conf is configured, this is an upgrade # during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained @@ -458,7 +458,7 @@ setup_tomcat { fi } -change_SSCcredentials_permission { +change_SSCcredentials_permission() { if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then chgrp www /etc/zypp/credentials.d/SCCcredentials chmod g+r /etc/zypp/credentials.d/SCCcredentials From c04551bf1e13d1d0d563c3719b5a259ade4839b3 Mon Sep 17 00:00:00 2001 From: mbussolotto Date: Tue, 29 Aug 2023 16:20:50 +0200 Subject: [PATCH 80/80] cobbler setup no required --- susemanager/bin/mgr-setup | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 709444648f83..5ceda5e945f3 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -465,19 +465,6 @@ change_SSCcredentials_permission() { fi } -setup_cobbler() { - if [ -f "/etc/cobbler/settings" ]; then - if ! grep "tftpsync_timeout:" /etc/cobbler/settings >/dev/null; then - echo "" >> /etc/cobbler/settings - echo "tftpsync_timeout: 15" >> /etc/cobbler/settings - echo "" >> /etc/cobbler/settings - fi - fi - if grep 'authn_spacewalk' /etc/cobbler/modules.conf > /dev/null 2>&1; then - sed -i 's/module = authn_spacewalk/module = authentication.spacewalk/' /etc/cobbler/modules.conf - fi -} - setup_spacewalk() { CERT_COUNTRY=`echo -n $CERT_COUNTRY|tr '[:lower:]' '[:upper:]'` @@ -980,7 +967,6 @@ do_setup() { setup_spacewalk setup_apache setup_tomcat - setup_cobbler change_SSCcredentials_permission backup_certificates