diff --git a/client/debian/DEBIAN-HOWTO b/client/debian/DEBIAN-HOWTO deleted file mode 100644 index ba27aaffe8bd..000000000000 --- a/client/debian/DEBIAN-HOWTO +++ /dev/null @@ -1,43 +0,0 @@ -HOW TO BUILD DEBIAN PACKAGES - -The client packages are built using debbuild. - -The following packages make up the client stack: -* apt-spacewalk (in client/debian) -* rhn-client-tools (in client/tools) -* rhnlib (in client/rhel) -* rhnsd (in client/rhel) -* rhncfg (in client/tools) -* spacewalk-usix (in usix) - -The following external packages are also part of the client stack: -* python-dmidecode (in client/debian) -* python-hwdata (in client/debian) - -Preparation steps: - -1. Install debbuild from https://github.com/ascherer/debbuild/releases - -2. Create the debbuild package build tree - -mkdir -p ~/debbuild/{SPECS,SOURCES,SDEBS,DEBS,BUILD,BUILDROOT} - -To build the non-external packages, these are the following steps: - -1. Switch to the directory of the package source (ex. for apt-spacewalk, cd client/debian/apt-spacewalk) - -2. Use tito to build tarball (tito build --tgz) - -3. Copy the tarball to ~/debbuild/SOURCES and spec to ~/debbuild/SPECS - -4. Change to ~/debbuild/SPECS and run "debbuild -ba" on the spec. (ex. for apt-spacewalk, debbuild -ba apt-spacewalk.spec) - -For external packages, the only difference is step 2, where you use spectool to fetch the tarball instead. -For example, for python-hwdata, "spectool -g python-hwdata.spec" is sufficient to get the sources. - - -How to regenerate repo: ------------------------ -cd spacewalk/debian -dpkg-scanpackages dists/spacewalk-unstable/binary-amd64 |gzip >dists/spacewalk-unstable/binary-amd64/Packages.gz -dpkg-scanpackages dists/spacewalk-unstable/binary-i386 |gzip >dists/spacewalk-unstable/binary-i386/Packages.gz diff --git a/client/debian/apt-spacewalk/50spacewalk b/client/debian/apt-spacewalk/50spacewalk deleted file mode 100644 index a80904711033..000000000000 --- a/client/debian/apt-spacewalk/50spacewalk +++ /dev/null @@ -1,15 +0,0 @@ -# -# The configuration for apt-spacewalk -# - -APT { - Update { - List-Refresh "true"; - Pre-Invoke { - "if [ -x /usr/lib/apt-spacewalk/post_invoke.py ]; then /usr/lib/apt-spacewalk/post_invoke.py; fi"; - } - } -}; -DPkg::Post-Invoke { - "/usr/lib/apt-spacewalk/post_invoke.py"; -}; diff --git a/client/debian/apt-spacewalk/LICENSE b/client/debian/apt-spacewalk/LICENSE deleted file mode 100644 index d159169d1050..000000000000 --- a/client/debian/apt-spacewalk/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/client/debian/apt-spacewalk/Makefile.python b/client/debian/apt-spacewalk/Makefile.python deleted file mode 100644 index 86f6fbcfa101..000000000000 --- a/client/debian/apt-spacewalk/Makefile.python +++ /dev/null @@ -1,19 +0,0 @@ -THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST))) -CURRENT_DIR := $(dir $(THIS_MAKEFILE)) -include $(CURRENT_DIR)../../../rel-eng/Makefile.python - -# Docker tests variables -DOCKER_CONTAINER_BASE = systemsmanagement/uyuni/master/docker/containers/uyuni-master -DOCKER_REGISTRY = registry.opensuse.org -DOCKER_RUN_EXPORT = "PYTHONPATH=$PYTHONPATH" -DOCKER_VOLUMES = -v "$(CURDIR)/../../../:/manager" - -__pylint :: - $(call update_pip_env) - pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true - -docker_pylint :: - docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/client/debian/apt-spacewalk; make -f Makefile.python __pylint" - -docker_shell :: - docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash diff --git a/client/debian/apt-spacewalk/apt-spacewalk.spec b/client/debian/apt-spacewalk/apt-spacewalk.spec deleted file mode 100644 index e2ecd961e9ed..000000000000 --- a/client/debian/apt-spacewalk/apt-spacewalk.spec +++ /dev/null @@ -1,162 +0,0 @@ -%{!?__python2:%global __python2 /usr/bin/python2} - -%if %{undefined python2_version} -%global python2_version %(%{__python2} -Esc "import sys; sys.stdout.write('{0.major}.{0.minor}'.format(sys.version_info))") -%endif - -%if %{undefined python2_sitelib} -%global python2_sitelib %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -Name: apt-spacewalk -Summary: Spacewalk plugin for Advanced Packaging Tool -%if %{_vendor} == "debbuild" -Packager: Uyuni Project -Group: admin -%endif -Version: 1.0.15 -Release: 1%{?dist} -License: GPLv2 -Source0: %{name}-%{version}.tar.gz -URL: https://github.com/uyuni-project/uyuni -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch -BuildRequires: python - -%description -apt-spacewalk is plugin used on Debian clients -to acquire content from Spacewalk server - -%package -n apt-transport-spacewalk -Summary: APT transport for communicating with Spacewalk servers -Requires: apt -Requires: python-apt -Requires: rhn-client-tools -Requires: python-six - -Recommends: mgr-daemon - -%description -n apt-transport-spacewalk - Supplies the APT method for fetching packages from Spacewalk. - Adds transaction hooks to: - 1) Update APT's sourcelist with subscribed spacewalk channels - before updating - 2) Register the machine's installed packages with the Spacewalk - server after any dpkg invocation - -%prep -%setup -q - -%build -# Nothing to build - -%install -mkdir -p $RPM_BUILD_ROOT/%{_prefix}/lib/apt-spacewalk -cp -a *_invoke.py $RPM_BUILD_ROOT/%{_prefix}/lib/apt-spacewalk -mkdir -p $RPM_BUILD_ROOT/%{_prefix}/lib/apt/methods -cp -a spacewalk $RPM_BUILD_ROOT/%{_prefix}/lib/apt/methods -mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/apt/apt.conf.d -cp -a 50spacewalk $RPM_BUILD_ROOT/%{_sysconfdir}/apt/apt.conf.d -mkdir -p $RPM_BUILD_ROOT/%{python2_sitelib}/rhn/actions -cp -a packages.py $RPM_BUILD_ROOT/%{python2_sitelib}/rhn/actions - -%files -n apt-transport-spacewalk -%license LICENSE -%{_prefix}/lib/apt-spacewalk/ -%{_prefix}/lib/apt/methods/spacewalk -%config(noreplace) %{_sysconfdir}/apt/apt.conf.d/50spacewalk -%{python2_sitelib}/rhn/actions/packages.py - -%if %{_vendor} == "debbuild" -%pre -n apt-transport-spacewalk -hook=/etc/apt/apt.conf.d/50spacewalk -if test -f $hook.disabled -then - mv $hook.disabled $hook -fi - -%postun -n apt-transport-spacewalk -hook=/etc/apt/apt.conf.d/50spacewalk -sourcelist=/etc/apt/sources.list.d/spacewalk.list - -case "$1" in - purge) - rm -f $hook.disabled - rm -f $sourcelist.disabled - ;; - - remove) - mv $hook $hook.disabled || : - mv $sourcelist $sourcelist.disabled || : - ;; - - abort-install) - if test "x$2" != "x" && test -f $hook - then - mv $hook $hook.disabled || : - mv $sourcelist $sourcelist.disabled || : - fi - ;; - - upgrade|failed-upgrade|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 -esac -%endif - -%changelog -* Thu Oct 25 2018 Tomas Kasparek 1.0.15-1 -- client, usix: Rework how client packaging is done for Debian/Ubuntu -- Move apt-spacewalk to the client/debian/ directory - -* Mon Jun 18 2018 Michael Mraka 1.0.14-1 -- client/debian: Port apt-spacewalk to be Python 3 ready - -* Mon Apr 16 2018 Tomas Kasparek 1.0.13-1 -- apt-transport-spacewalk: missed part of patch within pre_invoke -- further modifications on apt-transport-spacewalk -- modify apt-transport-spacewalk to support signed repos - -* Fri Feb 09 2018 Michael Mraka 1.0.12-1 -- removed BuildRoot from specfiles - -* Mon Jul 17 2017 Jan Dobes 1.0.11-1 -- Migrating Fedorahosted to GitHub - -* Tue Feb 24 2015 Matej Kollar 1.0.10-1 -- Getting rid of Tabs and trailing spaces in LICENSE, COPYING, and README files - -* Mon Sep 30 2013 Michael Mraka 1.0.9-1 -- removed trailing whitespaces - -* Thu Mar 21 2013 Jan Pazdziora 1.0.8-1 -- forward port debian bugs #703207, 700821 - -* Wed Feb 06 2013 Jan Pazdziora 1.0.7-1 -- update documentation on Debian packages - -* Sun Jun 17 2012 Miroslav Suchý 1.0.6-1 -- add copyright information to header of .py files -- ListRefresh is in APT:Update namespace - -* Sun Jun 17 2012 Miroslav Suchý 1.0.5-1 -- add LICENSE file for apt-spacewalk tar.gz -- %%defattr is not needed since rpm 4.4 - -* Thu Apr 28 2011 Simon Lukasik 1.0.4-1 -- The method can be killed by the keyboard interrupt (slukasik@redhat.com) - -* Sun Apr 17 2011 Simon Lukasik 1.0.3-1 -- Introducing actions.packages dispatcher (slukasik@redhat.com) -- Do not use rpmUtils on Debian (slukasik@redhat.com) -- Skip the extra lines sent by Apt (slukasik@redhat.com) - -* Wed Apr 13 2011 Jan Pazdziora 1.0.2-1 -- utilize config.getServerlURL() (msuchy@redhat.com) - -* Thu Mar 17 2011 Simon Lukasik 1.0.1-1 -- new package - diff --git a/client/debian/apt-spacewalk/packages.py b/client/debian/apt-spacewalk/packages.py deleted file mode 100644 index 11798f53a627..000000000000 --- a/client/debian/apt-spacewalk/packages.py +++ /dev/null @@ -1,187 +0,0 @@ -# -# actions.packages dispatcher for Debian clients -# -# Author: Simon Lukasik -# Lukas Durfina -# License: GPLv2 -# -# TODO: Be strict on architectures and package versions -# Staging content -# -# Copyright (c) 2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - -from __future__ import print_function - -import os -import sys -import time -import apt - -sys.path.append("/usr/share/rhn/") -from up2date_client import up2dateLog -from up2date_client import pkgUtils -from up2date_client import rhnPackageInfo - -log = up2dateLog.initLog() - -# file used to keep track of the next time rhn_check -# is allowed to update the package list on the server -LAST_UPDATE_FILE="/var/lib/up2date/dbtimestamp" - -__rhnexport__ = [ - 'update', - 'remove', - 'refresh_list', - 'fullUpdate', - 'checkNeedUpdate', - 'runTransaction', - 'verify' -] - -def remove(package_list, cache_only=None): - """We have been told that we should remove packages""" - if cache_only: - return (0, "no-ops for caching", {}) - if type(package_list) != type([]): - return (13, "Invalid arguments passed to function", {}) - log.log_debug("Called remove_packages", package_list) - - try: - cache = apt.Cache() - cache.update() - cache.open(None) - for pkg in package_list: - try: - package = cache[pkg[0]] - package.mark_delete() - except: - log.log_debug("Failed to remove package", pkg) - return (1, "remove_packages failed", {}) - cache.commit() - return (0, "remove_packages OK", {}) - except: - return (1, "remove_packages failed", {}) - -def update(package_list, cache_only=None): - """We have been told that we should retrieve/install packages""" - if type(package_list) != type([]): - return (13, "Invalid arguments passed to function", {}) - log.log_debug("Called update", package_list) - - try: - cache = apt.Cache() - cache.update() - cache.open(None) - for pkg in package_list: - try: - package = cache[pkg[0]] - if not package.is_installed: - package.mark_install() - else: - package.mark_upgrade() - except: - log.log_debug("Failed to update package", pkg) - return (1, "update failed", {}) - cache.commit() - return (0, "update OK", {}) - except: - return (1, "update failed", {}) - -def fullUpdate(force=0, cache_only=None): - """ Update all packages on the system. """ - log.log_debug("Called packages.fullUpdate") - try: - cache = apt.Cache() - cache.update() - cache.open(None) - cache.upgrade(True) - cache.commit() - except: - return (1, "packages.fullUpdate failed", {}) - return (0, "packages.fullUpdate OK", {}) - -def checkNeedUpdate(rhnsd=None, cache_only=None): - """ Check if the locally installed package list changed, if - needed the list is updated on the server - In case of error avoid pushing data to stay safe - """ - if cache_only: - return (0, "no-ops for caching", {}) - try: - last = os.stat(LAST_UPDATE_FILE)[8] - except: - last = 0 - - # Never update the package list more than once every 1/2 hour - if int(time.time()) - last <= 60: - return (0, "dpkg database not modified since last update (or package " - "list recently updated)", {}) - - if last == 0: - try: - file = open(LAST_UPDATE_FILE, "w+") - file.close() - except: - return (0, "unable to open the timestamp file", {}) - - # call the refresh_list action with a argument so we know it's - # from rhnsd - return refresh_list(rhnsd=1) - - -def refresh_list(rhnsd=None, cache_only=None): - """ push again the list of rpm packages to the server """ - if cache_only: - return (0, "no-ops for caching", {}) - log.log_debug("Called refresh_list") - - try: - rhnPackageInfo.updatePackageProfile() - except: - print("ERROR: refreshing remote package list for System Profile") - return (20, "Error refreshing package list", {}) - - touch_time_stamp() - return (0, "package list refreshed", {}) - -def touch_time_stamp(): - try: - file_d = open(LAST_UPDATE_FILE, "w+") - file_d.close() - except: - return (0, "unable to open the timestamp file", {}) - # Never update the package list more than once every hour. - t = time.time() - try: - os.utime(LAST_UPDATE_FILE, (t, t)) - except: - return (0, "unable to set the time stamp on the time stamp file %s" - % LAST_UPDATE_FILE, {}) - -def verify(packages, cache_only=None): - log.log_debug("Called packages.verify") - if cache_only: - return (0, "no-ops for caching", {}) - - data = {} - data['name'] = "packages.verify" - data['version'] = 0 - ret, missing_packages = pkgUtils.verifyPackages(packages) - - data['verify_info'] = ret - - if len(missing_packages): - data['name'] = "packages.verify.missing_packages" - data['version'] = 0 - data['missing_packages'] = missing_packages - return(43, "packages requested to be verified are missing " - "in the Apt cache", data) - - return (0, "packages verified", data) diff --git a/client/debian/apt-spacewalk/post_invoke.py b/client/debian/apt-spacewalk/post_invoke.py deleted file mode 100755 index 0cfce4d4f35e..000000000000 --- a/client/debian/apt-spacewalk/post_invoke.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/python -# -# DPkg::Post-Invoke hook for updating Debian package profile -# -# Author: Simon Lukasik -# Date: 2011-03-14 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys - -# Once we have the up2date stuff in a site-packages, -# we won't have to do path magic. -import warnings -warnings.filterwarnings("ignore", - message='the md5 module is deprecated; use hashlib instead') -sys.path.append("/usr/share/rhn/") -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors -from up2date_client import rhnserver -from up2date_client import pkgUtils - - -if __name__ == '__main__': - systemid = up2dateAuth.getSystemId() - if systemid: - try: - print("Apt-Spacewalk: Updating package profile") - s = rhnserver.RhnServer() - s.registration.update_packages(systemid, - pkgUtils.getInstalledPackageList(getArch=1)) - except up2dateErrors.RhnServerException as e: - print("Package profile information could not be sent.") - print(str(e)) diff --git a/client/debian/apt-spacewalk/pre_invoke.py b/client/debian/apt-spacewalk/pre_invoke.py deleted file mode 100755 index 8d27f48825e3..000000000000 --- a/client/debian/apt-spacewalk/pre_invoke.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/python -# -# APT::Update::Pre-Invoke hook for updating sources.list -# -# Author: Simon Lukasik -# Date: 2011-03-14 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys -import os -from six.moves.urllib.parse import urlparse -from aptsources import sourceslist -import apt_pkg - -# Once we have the up2date stuff in a site-packages, -# we don't have to do path magic -import warnings -warnings.filterwarnings("ignore", - message='the md5 module is deprecated; use hashlib instead') -sys.path.append('/usr/share/rhn/') -from up2date_client import config -from up2date_client import rhnChannel -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors - - -def get_channels(): - """Return channels associated with a machine""" - try: - channels = ['main'] - for channel in rhnChannel.getChannelDetails(): - if channel['parent_channel']: - channels.append(channel['label']) - return channels - except up2dateErrors.Error: - return [] - -def get_server(): - """Spacewalk server fqdn""" - return urlparse(config.getServerlURL()[0]).netloc - -def get_conf_file(): - """Path to spacewalk.list configuration file""" - apt_pkg.init_config() - directory = apt_pkg.config.get('Dir::Etc::sourceparts', - 'sources.list.d') - if not os.path.isabs(directory): - directory = os.path.join('/etc/apt', directory) - return os.path.join(directory, 'spacewalk.list') - -def update_sources_list(): - sources = sourceslist.SourcesList() - sw_source = [] - for source in sources.list: - if source.uri.startswith('spacewalk://'): - source.set_enabled(False) - sw_source.append(source) - - if up2dateAuth.getSystemId(): - channels = get_channels() - if len(channels): - for source in sw_source: - sources.remove(source) - sources.add(type='deb', - uri='spacewalk://' + get_server(), - dist='channels:', - orig_comps=channels, - file=get_conf_file() - ) - sources.save() - -if __name__ == '__main__': - print("Apt-Spacewalk: Updating sources.list") - update_sources_list() diff --git a/client/debian/apt-spacewalk/pylintrc b/client/debian/apt-spacewalk/pylintrc deleted file mode 100644 index 82ba454c4c46..000000000000 --- a/client/debian/apt-spacewalk/pylintrc +++ /dev/null @@ -1,188 +0,0 @@ -# apt-spacewalk package pylint configuration - -[MASTER] - -# Profiled execution. -profile=no - -# Pickle collected data for later comparisons. -persistent=no - - -[MESSAGES CONTROL] - -# Disable the message(s) with the given id(s). - - -disable=I0011, - C0302, - C0111, - R0801, - R0902, - R0903, - R0904, - R0912, - R0913, - R0914, - R0915, - R0921, - R0922, - W0142, - W0403, - W0603, - C1001, - W0121, - useless-else-on-loop, - bad-whitespace, - unpacking-non-sequence, - superfluous-parens, - cyclic-import, - redefined-variable-type, - no-else-return, - - # Uyuni disabled - E0203, - E0611, - E1101, - E1102 - -# list of disabled messages: -#I0011: 62: Locally disabling R0201 -#C0302: 1: Too many lines in module (2425) -#C0111: 1: Missing docstring -#R0902: 19:RequestedChannels: Too many instance attributes (9/7) -#R0903: Too few public methods -#R0904: 26:Transport: Too many public methods (22/20) -#R0912:171:set_slots_from_cert: Too many branches (59/20) -#R0913:101:GETServer.__init__: Too many arguments (11/10) -#R0914:171:set_slots_from_cert: Too many local variables (38/20) -#R0915:171:set_slots_from_cert: Too many statements (169/50) -#W0142:228:MPM_Package.write: Used * or ** magic -#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog' -#W0603: 72:initLOG: Using the global statement -# for pylint-1.0 we also disable -#C1001: 46, 0: Old-style class defined. (old-style-class) -#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax) -#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop) -# pylint-1.1 checks -#C:334, 0: No space allowed after bracket (bad-whitespace) -#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence) -#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens) -#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens) - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html -output-format=parseable - -# Include message's id in output -include-ids=yes - -# Tells whether to display a full report or only the messages -reports=yes - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" - -[VARIABLES] - -# A regular expression matching names used for dummy variables (i.e. not used). -dummy-variables-rgx=_|dummy - - -[BASIC] - -# Regular expression which should only match correct module names -#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ -module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$ - -# Regular expression which should only match correct module level names -const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-zA-Z0-9_]{,42}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-zA-Z0-9_]{,42}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression which should only match correct class sttribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# List of builtins function names that should not be used, separated by a comma -bad-functions=apply,input - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=10 - -# Maximum number of locals for function / method body -max-locals=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branchs=20 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=1 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[CLASSES] - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=120 - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes= diff --git a/client/debian/apt-spacewalk/spacewalk b/client/debian/apt-spacewalk/spacewalk deleted file mode 100755 index d07d1915c30e..000000000000 --- a/client/debian/apt-spacewalk/spacewalk +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -u -# -# The Spacewalk Acquire Method -# -# Author: Simon Lukasik -# Date: 2011-01-01 -# License: GPLv2 -# -# Copyright (c) 1999--2012 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - - -from __future__ import print_function - -import sys -import os -import re -import hashlib - -import warnings -warnings.filterwarnings("ignore", message="the md5 module is deprecated; use hashlib instead") -sys.path.append("/usr/share/rhn/") - -from six.moves.urllib.parse import urlparse -from rhn.connections import HTTPConnection, HTTPSConnection -from up2date_client import config -from up2date_client import rhnChannel -from up2date_client import up2dateAuth -from up2date_client import up2dateErrors -from rhn.stringutils import bstr - - - -class pkg_acquire_method: - """ - This is slightly modified python variant of apt-pkg/acquire-method. - It is a skeleton class that implements only very basic of apt methods - functionality. - """ - __eof = False - - def __init__(self): - print("100 Capabilities\nVersion: 1.0\nSingle-Instance: true\n\n", end='') - - def __get_next_msg(self): - """ - Apt uses for communication with its methods the text protocol similar - to http. This function parses the protocol messages from stdin. - """ - if self.__eof: - return None - result = {}; - line = sys.stdin.readline() - while line == '\n': - line = sys.stdin.readline() - if not line: - self.__eof = True - return None - s = line.split(" ", 1) - result['_number'] = int(s[0]) - result['_text'] = s[1].strip() - - while not self.__eof: - line = sys.stdin.readline() - if not line: - self.__eof = True - return result - if line == '\n': - return result - s = line.split(":", 1) - result[s[0]] = s[1].strip() - - def __dict2msg(self, msg): - """Convert dictionary to http like message""" - result = "" - for item in list(msg.keys()): - if msg[item] != None: - result += item + ": " + msg[item] + "\n" - return result - - def status(self, **kwargs): - print("102 Status\n%s\n" % self.__dict2msg(kwargs), end='') - - def uri_start(self, msg): - print("200 URI Start\n%s\n" % self.__dict2msg(msg), end='') - - def uri_done(self, msg): - print("201 URI Done\n%s\n" % self.__dict2msg(msg), end='') - - def uri_failure(self, msg): - print("400 URI Failure\n%s\n" % self.__dict2msg(msg), end='') - - def run(self): - """Loop through requests on stdin""" - while True: - msg = self.__get_next_msg() - if msg == None: - return 0 - if msg['_number'] == 600: - try: - self.fetch(msg) - except Exception as e: - self.fail(e.__class__.__name__ + ": " + str(e)) - except up2dateErrors.Error as e: - self.fail(e.__class__.__name__ + ": " + str(e)) - else: - return 100 - - - -def get_ssl_ca_cert(up2date_cfg): - if not ('sslCACert' in up2date_cfg and up2date_cfg['sslCACert']): - raise BadSslCaCertConfig - - ca_certs = up2date_cfg['sslCACert'] - if type(ca_certs) == list: - return ca_certs - return [ca_certs] - - - -class spacewalk_method(pkg_acquire_method): - """ - Spacewalk acquire method - """ - up2date_cfg = None - login_info = None - current_url = None - svr_channels = None - http_headers = None - base_channel = None - conn = None - not_registered_msg = 'This system is not registered with the spacewalk server' - - def fail(self, message = not_registered_msg): - self.uri_failure({'URI': self.uri, - 'Message': message}) - - - def __load_config(self): - if self.up2date_cfg == None: - self.up2date_cfg = config.initUp2dateConfig() - self.up2date_server = urlparse(config.getServerlURL()[0]) - # TODO: proxy settings - - - def __login(self): - if self.login_info == None: - self.status(URI = self.uri, Message = 'Logging into the spacewalk server') - self.login_info = up2dateAuth.getLoginInfo() - if not self.login_info: - raise up2date_client.AuthenticationError(self.not_registered_msg) - self.status(URI = self.uri, Message = 'Logged in') - - - def __init_channels(self): - if self.svr_channels == None: - self.svr_channels = rhnChannel.getChannelDetails() - for channel in self.svr_channels: - if channel['parent_channel'] == '': - self.base_channel = channel['label'] - - - def __init_headers(self): - if self.http_headers == None: - rhn_needed_headers = ['X-RHN-Server-Id', - 'X-RHN-Auth-User-Id', - 'X-RHN-Auth', - 'X-RHN-Auth-Server-Time', - 'X-RHN-Auth-Expire-Offset'] - self.http_headers = {}; - for header in rhn_needed_headers: - if header not in self.login_info: - raise up2date_client.AuthenticationError( - "Missing required login information %s" % (header)) - self.http_headers[header] = self.login_info[header] - self.http_headers['X-RHN-Transport-Capability'] = 'follow-redirects=3' - - - def __make_conn(self): - if self.conn == None: - if self.up2date_server.scheme == 'http' \ - or self.up2date_cfg['useNoSSLForPackages'] == 1: - self.conn = HTTPConnection(self.up2date_server.netloc) - else: - self.conn = HTTPSConnection(self.up2date_server.netloc, - trusted_certs=get_ssl_ca_cert(self.up2date_cfg)) - - - def __transform_document(self, document): - """Transform url given by apt to real spacewalk url""" - document = document.replace('dists/channels:/main/', - 'dists/channels:/' + self.base_channel + '/', 1) - document = re.sub('/binary-[\d\w]*/', '/repodata/', document, 1) - document = document.replace('dists/channels:/', '/XMLRPC/GET-REQ/', 1) - return document - - - def fetch(self, msg): - """ - Fetch the content from spacewalk server to the file. - - Acording to the apt protocol msg must contain: 'URI' and 'Filename'. - Other possible keys are: 'Last-Modified', 'Index-File', 'Fail-Ignore' - """ - self.uri = msg['URI'] - self.uri_parsed = urlparse(msg['URI']) - self.filename = msg['Filename'] - - self.__load_config() - if self.uri_parsed.netloc != self.up2date_server.netloc: - return self.fail() - self.__login() - self.__init_channels() - - document = self.__transform_document(self.uri_parsed.path) - - self.__init_headers() - self.__make_conn() - - hdrs = self.http_headers; - # check is partially downloaded file present - if os.path.isfile(self.filename): - fsize = os.stat(self.filename).st_size - if fsize > 0: - # resume aborted download by requesting tail of the file - # using Range HTTP header - hdrs['Range'] = 'bytes=' + str(fsize) + '-' - - self.conn.request("GET", "/" + document, headers = hdrs) - self.status(URI = self.uri, Message = 'Waiting for headers') - - res = self.conn.getresponse() - - if res.status == 200: - f = open(self.filename, "wb") - elif res.status == 206: - f = open(self.filename, "ab") - else: - self.uri_failure({'URI': self.uri, - 'Message': str(res.status) + ' ' + res.reason, - 'FailReason': 'HttpError' + str(res.status)}) - while True: - data = res.read(4096) - if not len(data): break - res.close() - return - - self.uri_start({'URI': self.uri, - 'Size': res.getheader('content-length'), - 'Last-Modified': res.getheader('last-modified')}) - - while True: - data = res.read(4096) - if not len(data): - break - f.write(data) - res.close() - f.close() - - f = open(self.filename, "r") - hash_sha256 = hashlib.sha256() - hash_md5 = hashlib.md5() - fsize = 0 - while True: - data = f.read(4096) - if not len(data): - break - fsize += len(data) - hash_sha256.update(data) - hash_md5.update(data) - f.close() - - self.uri_done({'URI': self.uri, - 'Filename': self.filename, - 'Size': str(fsize), - 'Last-Modified': res.getheader('last-modified'), - 'MD5-Hash': hash_md5.hexdigest(), - 'MD5Sum-Hash': hash_md5.hexdigest(), - 'SHA256-Hash': hash_sha256.hexdigest()}) - - - def __del__(self): - if self.conn: - self.conn.close() - - - -if __name__ == '__main__': - try: - method = spacewalk_method() - ret = method.run() - sys.exit(ret) - except KeyboardInterrupt: - pass diff --git a/client/debian/apt-spacewalk/src/apt-spacewalk.changes b/client/debian/apt-spacewalk/src/apt-spacewalk.changes deleted file mode 100644 index f55d5c2f0c72..000000000000 --- a/client/debian/apt-spacewalk/src/apt-spacewalk.changes +++ /dev/null @@ -1,9 +0,0 @@ -------------------------------------------------------------------- -Fri Apr 20 14:24:42 CEST 2012 - mc@suse.de - -- version 1.0.4.1-1 -- The method can be killed by the keyboard interrupt -- Introducing actions.packages dispatcher -- Do not use rpmUtils on Debian -- Skip the extra lines sent by Apt - diff --git a/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch b/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch deleted file mode 100644 index 68a00e1a5ee0..000000000000 --- a/client/debian/python-dmidecode/Stop-linking-with-libxml2mod.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 6698dabbd45a2d93199d2d3d5abb84f8c260667d Mon Sep 17 00:00:00 2001 -From: Sandro Tosi -Date: Tue, 5 Dec 2017 18:52:09 -0500 -Subject: Stop linking with libxml2mod - -Copy the two funcions used instead of linking with libxml2mod. - -Author: Adrian Bunk ---- - src/dmidecodemodule.c | 27 ++++++++++++++++++++++++++- - src/setup_common.py | 3 --- - 2 files changed, 26 insertions(+), 4 deletions(-) - -diff --git a/src/dmidecodemodule.c b/src/dmidecodemodule.c -index b31c002..007a892 100644 ---- a/src/dmidecodemodule.c -+++ b/src/dmidecodemodule.c -@@ -42,7 +42,6 @@ - #include - - #include --#include "libxml_wrap.h" - - #include "dmidecodemodule.h" - #include "dmixml.h" -@@ -64,6 +63,32 @@ char *PyUnicode_AsUTF8(PyObject *unicode) { - } - #endif - -+static PyObject * -+libxml_xmlDocPtrWrap(xmlDocPtr doc) -+{ -+ PyObject *ret; -+ -+ if (doc == NULL) { -+ Py_INCREF(Py_None); -+ return (Py_None); -+ } -+ ret = PyCapsule_New((void *) doc, (char *) "xmlDocPtr", NULL); -+ return (ret); -+} -+ -+static PyObject * -+libxml_xmlNodePtrWrap(xmlNodePtr node) -+{ -+ PyObject *ret; -+ -+ if (node == NULL) { -+ Py_INCREF(Py_None); -+ return (Py_None); -+ } -+ ret = PyCapsule_New((void *) node, (char *) "xmlNodePtr", NULL); -+ return (ret); -+} -+ - static void init(options *opt) - { - opt->devmem = DEFAULT_MEM_DEV; -diff --git a/src/setup_common.py b/src/setup_common.py -index aec1f9b..6b678ef 100644 ---- a/src/setup_common.py -+++ b/src/setup_common.py -@@ -68,9 +68,6 @@ def libxml2_lib(libdir, libs): - elif l.find('-l') == 0: - libs.append(l.replace("-l", "", 1)) - -- # this library is not reported and we need it anyway -- libs.append('xml2mod') -- - - - # Get version from src/version.h diff --git a/client/debian/python-dmidecode/python-dmidecode.spec b/client/debian/python-dmidecode/python-dmidecode.spec deleted file mode 100644 index 452b5f031198..000000000000 --- a/client/debian/python-dmidecode/python-dmidecode.spec +++ /dev/null @@ -1,324 +0,0 @@ -%if %{_vendor} == "debbuild" -# Debian points /bin/sh to /bin/dash by default. This breaks a lot of common -# scripts that rely on bash-specific behavior, so changing the shell preempts -# a lot of these breakages. -%global _buildshell /bin/bash -%endif - - -# Setuptools install flags -%if %{_vendor} == "debbuild" -%global pyinstflags --no-compile -O0 -%global pytargetflags --install-layout=deb -%else -%global pyinstflags -O1 -%global pytargetflags %{nil} -%endif - -# For systems (mostly debian) that don't define these things ------------------- -%{!?__python2:%global __python2 /usr/bin/python2} -%{!?__python3:%global __python3 /usr/bin/python3} - -%if %{undefined python2_sitearch} -%global python2_sitearch %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))") -%endif - -%if %{undefined python3_sitearch} -%global python3_sitearch %(%{__python3} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))") -%endif - -%{!?py3dir: %global py3dir %{_builddir}/python3-%{name}-%{version}-%{release}} - -# ----------------------------------------------------------------------------- - - -Name: python-dmidecode -Summary: Python module to access DMI data -Version: 3.12.2 -Release: 10%{?dist} - -%if %{_vendor} == "debbuild" -Packager: Neal Gompa -License: GPL-2.0 -Group: python -%else -License: GPLv2 -Group: System Environment/Libraries -%endif -URL: https://github.com/nima/python-dmidecode -Source0: https://github.com/nima/%{name}/archive/v%{version}/%{name}-%{version}.tar.gz - -Patch666: Stop-linking-with-libxml2mod.patch - -%if %{_vendor} == "debbuild" -BuildRequires: libxml2-dev - -BuildRequires: python-dev -BuildRequires: python-libxml2 - -BuildRequires: python3-dev -BuildRequires: python3-libxml2 -%else -BuildRequires: libxml2-devel - -BuildRequires: python2-devel -BuildRequires: libxml2-python - -BuildRequires: python3-devel -BuildRequires: libxml2-python3 -%endif - -%description -python-dmidecode is a Python extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as python data structures or as XML data using libxml2. - -%package -n python2-dmidecode -Summary: Python 2 module to access DMI data -%if %{_vendor} == "debbuild" -Requires: python-libxml2 -# Replaces Debian's python-dmidecode -Provides: python-dmidecode -Obsoletes: python-dmidecode -# For scriptlets -Requires(preun): python-minimal -Requires(post): python-minimal -%else -Requires: libxml2-python -%endif -%{?python_provide:%python_provide python2-dmidecode} - -%description -n python2-dmidecode -python2-dmidecode is a Python 2 extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as python data structures or as XML data using libxml2. - -%package -n python3-dmidecode -Summary: Python 3 module to access DMI data -%if %{_vendor} == "debbuild" -Requires: python3-libxml2 -# For scriptlets -Requires(preun): python3-minimal -Requires(post): python3-minimal -%else -Requires: libxml2-python3 -%endif - -%description -n python3-dmidecode -python3-dmidecode is a Python 3 extension module that uses the -code-base of the 'dmidecode' utility, and presents the data -as Python 3 data structures or as XML data using libxml2. - - - -%prep -%setup -qc - -%if %{_vendor} == "debbuild" -# Apply patches for debian -pushd %{name}-%{version} -%patch666 -p1 -popd -%endif - -mv %{name}-%{version} python2 -cp -a python{2,3} - -pushd python3 -sed -i 's/python2/python3/g' Makefile unit-tests/Makefile -popd - - -%build -# Not to get undefined symbol: dmixml_GetContent -export CFLAGS="${CFLAGS-} -std=gnu89" - - -for PY in python2 python3; do - pushd $PY - make build - popd -done - -%install -pushd python2 -%{__python2} src/setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags} --prefix=%{_prefix} -popd - -pushd python3 -%{__python3} src/setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags} --prefix=%{_prefix} -popd - - -%if %{_vendor} != "debbuild" -%check -for PY in python2 python3; do - pushd $PY/unit-tests - make - popd -done -%endif - -%files -n python2-dmidecode -%license python2/doc/LICENSE python2/doc/AUTHORS python2/doc/AUTHORS.upstream -%doc python2/README python2/doc/README.upstream -%{python2_sitearch}/* -%{_datadir}/python-dmidecode/ - -%files -n python3-dmidecode -%license python3/doc/LICENSE python3/doc/AUTHORS python3/doc/AUTHORS.upstream -%doc python3/README python3/doc/README.upstream -%{python3_sitearch}/* -%{_datadir}/python-dmidecode/ - -%if %{_vendor} == "debbuild" - -%post -n python2-dmidecode -# Do late-stage bytecompilation, per debian policy -pycompile -p python2-dmidecode -V -3.0 - -%preun -n python2-dmidecode -# Ensure all *.py[co] files are deleted, per debian policy -pyclean -p python2-dmidecode - -%post -n python3-dmidecode -# Do late-stage bytecompilation, per debian policy -py3compile -p python3-dmidecode -V -4.0 - -%preun -n python3-dmidecode -# Ensure all *.py[co] files are deleted, per debian policy -py3clean -p python3-dmidecode - -%endif - - -%changelog -* Fri Jul 06 2018 Neal Gompa - 3.12.2-10 -- Add Debian/Ubuntu support - -* Sat Aug 19 2017 Zbigniew Jędrzejewski-Szmek - 3.12.2-9 -- Python 2 binary package renamed to python2-dmidecode - See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3 - -* Thu Aug 03 2017 Fedora Release Engineering - 3.12.2-8 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 3.12.2-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Sat Feb 11 2017 Fedora Release Engineering - 3.12.2-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Mon Dec 19 2016 Miro Hrončok - 3.12.2-5 -- Rebuild for Python 3.6 - -* Tue Jul 19 2016 Fedora Release Engineering - 3.12.2-4 -- https://fedoraproject.org/wiki/Changes/Automatic_Provides_for_Python_RPM_Packages - -* Thu Feb 04 2016 Fedora Release Engineering - 3.12.2-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Sat Nov 07 2015 Robert Kuska - 3.12.2-2 -- Rebuilt for Python3.5 rebuild - -* Fri Jul 10 2015 Miro Hrončok - 3.12.2-1 -- Update to 3.12.2 -- Add Python 3 subpackage (#1236000) -- Removed deprecated statements -- Moved some docs to license -- Removed pacthes -- Corrected bogus dates in %%changelog -- Build with -std=gnu89 - -* Thu Jun 18 2015 Fedora Release Engineering - 3.10.13-13 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Sun Aug 17 2014 Fedora Release Engineering - 3.10.13-12 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Sat Jun 07 2014 Fedora Release Engineering - 3.10.13-11 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Sun Aug 04 2013 Fedora Release Engineering - 3.10.13-10 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild - -* Thu Jun 20 2013 Ales Ledvinka - 3.10.13-9 -- Attribute installed may appear as duplicate and cause invalid XML. - -* Mon Jun 17 2013 Ales Ledvinka - 3.10.13-8 -- Attribute dmispec may cause invalid XML on some hardware. -- Signal handler for SIGILL. - -* Thu Feb 14 2013 Fedora Release Engineering - 3.10.13-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild - -* Sat Jul 21 2012 Fedora Release Engineering - 3.10.13-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Thu Jul 19 2012 Ales Ledvinka 3.10.14-5 -- Upstream relocated. Document source tag and tarball generation. - -* Sat Jan 14 2012 Fedora Release Engineering - 3.10.13-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild - -* Tue Feb 08 2011 Fedora Release Engineering - 3.10.13-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild - -* Thu Jul 22 2010 David Malcolm - 3.10.13-2 -- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild - -* Tue Jun 15 2010 Roman Rakus - 3.10.13-1 -- Update to new release - -* Fri Mar 12 2010 Nima Talebi - 3.10.12-1 -- Update to new release - -* Tue Feb 16 2010 Nima Talebi - 3.10.11-1 -- Update to new release - -* Tue Jan 12 2010 Nima Talebi - 3.10.10-1 -- Update to new release - -* Thu Jan 07 2010 Nima Talebi - 3.10.9-1 -- Update to new release - - -* Tue Dec 15 2009 Nima Talebi - 3.10.8-1 -- New Upstream release. -- Big-endian and little-endian approved. -- Packaged unit-test to tarball. -- Rewritten unit-test to be able to run as non-root user, where it will not - try to read /dev/mem. -- Added two dmidump data files to the unit-test. - -* Thu Nov 26 2009 David Sommerseth - 3.10.7-3 -- Fixed even more .spec file issues and removed explicit mentioning - of /usr/share/python-dmidecode/pymap.xml - -* Wed Nov 25 2009 David Sommerseth - 3.10.7-2 -- Fixed some .spec file issues (proper Requires, use _datadir macro) - -* Wed Sep 23 2009 Nima Talebi - 3.10.7-1 -- Updated source0 to new 3.10.7 tar ball - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-6 -- Only build the python-dmidecode module, not everything - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-5 -- Added missing BuildRequres for libxml2-python - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-4 -- Added missing BuildRequres for python-devel - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-3 -- Added missing BuildRequres for libxml2-devel - -* Mon Jul 13 2009 David Sommerseth - 3.10.6-2 -- Updated release, to avoid build conflict - -* Wed Jun 10 2009 David Sommerseth - 3.10.6-1 -- Updated to work with the new XML based python-dmidecode - -* Sat Mar 7 2009 Clark Williams - 2.10.3-1 -- Initial build. - diff --git a/client/debian/python-hwdata/python-hwdata.spec b/client/debian/python-hwdata/python-hwdata.spec deleted file mode 100644 index 85f64be1a59e..000000000000 --- a/client/debian/python-hwdata/python-hwdata.spec +++ /dev/null @@ -1,255 +0,0 @@ -%if 0%{?fedora} || 0%{?rhel} > 7 -# Enable python3 build by default -%bcond_without python3 -%else -%bcond_with python3 -%endif - -%if 0%{?rhel} > 7 || 0%{?fedora} > 29 -# Disable python2 build by default -%bcond_with python2 -%else -%bcond_without python2 -%endif - -%if %{_vendor} == "debbuild" -# Debian points /bin/sh to /bin/dash by default. This breaks a lot of common -# scripts that rely on bash-specific behavior, so changing the shell preempts -# a lot of these breakages. -%global _buildshell /bin/bash -%endif - - -# Setuptools install flags -%if %{_vendor} == "debbuild" -%global pyinstflags --no-compile -O0 -%global pytargetflags --install-layout=deb -%else -%global pyinstflags -O1 -%global pytargetflags %{nil} -%endif - -# For systems (mostly debian) that don't define these things ------------------- -%{!?__python2:%global __python2 /usr/bin/python2} -%{!?__python3:%global __python3 /usr/bin/python3} - -%if %{undefined python2_sitelib} -%global python2_sitelib %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -%if %{undefined python3_sitelib} -%global python3_sitelib %(%{__python3} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -%endif - -%{!?py2_build: %global py2_build CFLAGS="%{optflags}" %{__python2} setup.py build} -%{!?py2_install: %global py2_install %{__python2} setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags}} -%{!?py3_build: %global py3_build CFLAGS="%{optflags}" %{__python3} setup.py build} -%{!?py3_install: %global py3_install %{__python3} setup.py install %{?pyinstflags} --skip-build --root %{buildroot} %{?pytargetflags}} - -%{!?py3dir: %global py3dir %{_builddir}/python3-%{name}-%{version}-%{release}} - -# ----------------------------------------------------------------------------- - -# tito tags with version-release -%global origrel 1 - -Name: python-hwdata -Version: 2.3.7 -Release: 1%{?dist} -Summary: Python bindings to hwdata package -%if %{_vendor} == "debbuild" -Group: python -Packager: Miroslav Suchý -%else -Group: Development/Libraries -%endif -BuildArch: noarch -License: GPLv2 -URL: https://github.com/xsuchy/python-hwdata -Source0: https://github.com/xsuchy/%{name}/archive/%{name}-%{version}-%{origrel}.tar.gz - -%description -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -%if %{with python2} -%package -n python2-hwdata -Summary: Python bindings to hwdata package - -%if %{_vendor} == "debbuild" -BuildRequires: python-dev -Requires(preun): python-minimal -Requires(post): python-minimal -%else -BuildRequires: python2-devel -%endif - -Requires: hwdata -%{?python_provide:%python_provide python2-hwdata} -%if 0%{?rhel} < 8 -Provides: python-hwdata = %{version}-%{release} -%endif - -%description -n python2-hwdata -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -This is the Python 2 build of the module. - -%endif # with python2 - -%if %{with python3} -%package -n python3-hwdata -Summary: Python bindings to hwdata package - -%if %{_vendor} == "debbuild" -BuildRequires: python3-dev -BuildRequires: pylint3 -Requires(preun): python3-minimal -Requires(post): python3-minimal -%else -BuildRequires: python3-devel -BuildRequires: python3-pylint -%endif -Requires: hwdata -%{?python_provide:%python_provide python3-hwdata} - -%description -n python3-hwdata -Provide python interface to database stored in hwdata package. -It allows you to get human readable description of USB and PCI devices. - -This is the Python 3 build of the module. -%endif # with python3 - -%prep -%setup -q -n %{name}-%{name}-%{version}-%{origrel} - -%if %{with python3} -rm -rf %{py3dir} -cp -a . %{py3dir} -%endif # with python3 - -%build -%if %{with python2} -%py2_build -%endif # with python2 - -%if %{with python3} -pushd %{py3dir} -%py3_build -popd -%endif # with python3 - -%install -%if %{with python2} -%py2_install -%endif # with python2 - -%if %{with python3} -pushd %{py3dir} -%py3_install -popd -%endif # with python3 - -%if %{_vendor} != "debbuild" -%check -%if %{with python3} -pylint-3 hwdata.py example.py || : -%endif # with python3 -%endif - -%if %{with python2} -%files -n python2-hwdata -%license LICENSE -%doc README.md example.py -%doc html -%{python2_sitelib}/* -%endif # with python2 - -%if %{with python3} -%files -n python3-hwdata -%license LICENSE -%doc README.md example.py -%doc html -%{python3_sitelib}/* -%endif # with python3 - -%if %{_vendor} == "debbuild" - -%if %{with python2} -%post -n python2-hwdata -# Do late-stage bytecompilation, per debian policy -pycompile -p python2-hwdata -V -3.0 - -%preun -n python2-hwdata -# Ensure all *.py[co] files are deleted, per debian policy -pyclean -p python2-hwdata -%endif - -%if %{with python3} -%post -n python3-hwdata -# Do late-stage bytecompilation, per debian policy -py3compile -p python3-hwdata -V -4.0 - -%preun -n python3-hwdata -# Ensure all *.py[co] files are deleted, per debian policy -py3clean -p python3-hwdata -%endif - -%endif - -%changelog -* Tue Jun 12 2018 Dalton Miner 2.3.7-2 -- Updating packaging for debian systems - -* Fri Mar 23 2018 Miroslav Suchý 2.3.7-1 -- remove python2 subpackage for F30+ - -* Mon Feb 12 2018 Miroslav Suchý 2.3.6-1 -- Update Python 2 dependency declarations to new packaging standards - -* Wed Aug 09 2017 Miroslav Suchý 2.3.5-1 -- create python2-hwdata subpackage -- use dnf instead of yum in README -- remove rhel5 compatibilities from spec - -* Thu Sep 22 2016 Miroslav Suchý 2.3.4-1 -- run pylint in %%check -- require hwdata in python 3 package too (jdobes@redhat.com) -- implement PNP interface -- errors in usb.ids should not be fatal -- change upstream url in setup.py - -* Wed Jan 28 2015 Miroslav Suchý 2.3.3-1 -- upstream location changed - -* Wed Jan 28 2015 Miroslav Suchý -- move upstream location - -* Wed Dec 04 2013 Miroslav Suchý 1.10.1-1 -- create python3-hwdata subpackage -- Bumping package versions for 1.9 -- %%defattr is not needed since rpm 4.4 - -* Fri Mar 02 2012 Miroslav Suchý 1.7.3-1 -- 798375 - fix PCI device name translation (Joshua.Roys@gtri.gatech.edu) -- use setup from distutils - -* Fri Mar 02 2012 Jan Pazdziora 1.7.2-1 -- Update the copyright year info. - -* Fri Mar 02 2012 Jan Pazdziora 1.7.1-1 -- correct indentation (mzazrivec@redhat.com) - -* Mon Oct 31 2011 Miroslav Suchý 1.6.2-1 -- point URL to specific python-hwdata page - -* Fri Jul 22 2011 Jan Pazdziora 1.6.1-1 -- We only support version 14 and newer of Fedora, removing conditions for old - versions. - -* Mon Apr 26 2010 Miroslav Suchý 1.2-1 -- 585138 - change %%files section and patial support for python3 - -* Fri Apr 23 2010 Miroslav Suchý 1.1-1 -- initial release diff --git a/client/tools/mgr-push/rhnpush_config.py b/client/tools/mgr-push/rhnpush_config.py index 938ff841d75b..22c7c7f35a23 100644 --- a/client/tools/mgr-push/rhnpush_config.py +++ b/client/tools/mgr-push/rhnpush_config.py @@ -66,7 +66,7 @@ def __init__(self, filename=None, ensure_consistency=False): 'no_session_caching': '0', 'proxy': '', 'tolerant': '0', - 'ca_chain': '/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT', + 'ca_chain': '/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT', 'timeout': None } diff --git a/client/tools/mgr-push/rhnpushrc b/client/tools/mgr-push/rhnpushrc index 1d63522f73a9..670c09256911 100644 --- a/client/tools/mgr-push/rhnpushrc +++ b/client/tools/mgr-push/rhnpushrc @@ -74,7 +74,7 @@ no_session_caching = 0 tolerant = 0 #The CA cert used to verify the ssl server -ca_chain = /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT +ca_chain = /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT #Default connection timeout, (no value for default) timeout = 300 diff --git a/containers/doc/server-kubernetes/README.md b/containers/doc/server-kubernetes/README.md new file mode 100644 index 000000000000..65298462a83a --- /dev/null +++ b/containers/doc/server-kubernetes/README.md @@ -0,0 +1,327 @@ +# Prerequisites + +The following assumes you have either a single-node RKE2 or K3s cluster ready or a server with Podman installed and enough resources for the Uyuni server. +When installing on a Kubernetes cluster, it also assumes that `kubectl` and `helm` are installed on the server and configured to connect to the cluster. + +# Preparing the installation + +## Podman specific setup + +There is nothing to prepare for a Podman installation. + +## RKE2 specific setup + +Copy the `rke2-ingress-nginx-config.yaml` file to `/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml` on your RKE2 node. +Wait for the ingress controller to restart. +Run this command to watch it restart: + +``` +watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=rke2-ingress-nginx +``` + +## K3s specific setup + + +Copy the `k3s-traefik-config.yaml` file to `/var/lib/rancher/k3s/server/manifests/` on your K3s node. +Wait for trafik to restart. +Run this commant to watch it restart: + +``` +watch kubectl get -n kube-system pod -lapp.kubernetes.io/name=traefik +``` + +# Offline installation + + +## For K3s + +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: + +⚠️ **TODO**: Verify instructions +``` +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + podman pull quay.io/jetstack/$image + podman save --output $image.tar quay.io/jetstack/$image:latest +done + +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest + +podman save --output server.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +or + +⚠️ **TODO**: Verify instructions +``` +for image in cert-manager-cainjector cert-manager-controller cert-manager-ctl cert-manager-webhook; do + skopeo copy docker://quay.io/jetstack/$image:latest docker-archive:$image.tar:quay.io/jetstack/$image:latest +done + +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +Copy the `cert-manager` and `uyuni/server` helm charts locally: + +⚠️ **TODO**: verify instructions + +``` +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server +``` + +Transfer the resulting `*.tar` images to the K3s node and load them using the following command: + +``` +for archive in `ls *.tar`; do + k3s ctr images import $archive +done +``` + +In order to tell K3s to not pull the images, set the image pull policy needs to be set to `Never`. +This needs to be done for both Uyuni and cert-manager helm charts. + +For the Uyuni helm chart, set the `pullPolicy` chart value to `Never` by passing a `--helm-uyuni-values=uyuni-values.yaml` parameter to `uyuniadm install` with the following `uyuni-values.yaml` file content: + +``` +pullPolicy: Never +``` + +For the cert-manager helm chart, create a `cert-values.yaml` file with the following content and pass `--helm-certmanager-values=values.yaml` parameter to `uyuniadm install`: + +``` +image: + pullPolicy: Never +``` + +⚠️ **TODO**: verify the file names +To use the downloaded helm charts instead of the default ones, pass `--helm-uyuni-chart=server.tgz` and `--helm-certmanager-chart=cert-manager.tgz` or add the following to the `uyuniadm` configuration file: + +``` +helm: + uyuni: + chart: server.tgz + values: uyuni-values.yaml + certmanager: + chart: cert-manager.tgz + values: cert.values.yaml +``` + +## For RKE2 + +RKE2 doesn't allow to preload images on the nodes. +Instead, use `skopeo` to import the images in a local registry and use this one to install. + +Copy the `cert-manager` and `uyuni/server` helm charts locally: + +⚠️ **TODO**: verify instructions + +``` +helm pull --repo https://charts.jetstack.io --destination . cert-manager +helm pull --destination . oci://registry.opensuse.org/uyuni/server +``` + +⚠️ **TODO** Prepare instructions +``` +# TODO Copy the cert-manager and uyuni images +# TODO Set the uyuniadm parameters +``` + +## For Podman + +With K3s it is possible to preload the container images and avoid it to be fetched from a registry. +For this, on a machine with internet access, pull the image using `podman`, `docker` or `skopeo` and save it as a `tar` archive. +For example: + +``` +podman pull registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +podman save --output server-image.tar registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +or + +``` +skopeo copy docker://registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest docker-archive:server-image.tar:registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server:latest +``` + +Transfer the resulting `server-image.tar` to the server and load it using the following command: + +``` +podman load -i server-image.tar +``` + +# Migrating from a regular server + +In order to migrate a regular Uyuni server to containers, a new machine is required: it is not possible to perform an in-place migration. +The old server is designated as the source server and the new machine is the destination one. + +The migration procedure does not perform any hostname rename. +The fully qualified domain name will be the same on the new server than on the source one. +This means the DNS records need to be adjusted after the migration to use the new server. + +## Preparing + +### Stop the source server + +Stop the source services: + +``` +spacewalk-service stop +systemctl stop postgresql +``` + +### Preparing the SSH connection + +The `SSH` configuration and agent should be ready on the host for a password less connection to the source server. +The migration script only uses the source server fully qualified domain name in the SSH command. +This means that every other configuration required to connect needs to be defined in the `~/.ssh/config` file. + +For a password less connection, the migration script will use an SSH agent on the server. +If none is running yet, run `eval $(ssh-agent)`. +Add the SSH key to the running agent using `ssh-add /path/to/the/private/key`. +The private key password will be prompted. + +### Prepare for Kubernetes + +Since the migration job will start the container from scratch the Persistent Volumes need to be defined before running the `uyuniadm migrate command`. +Refer to the installation section for more details on the volumes preparation. + +## Migrating + +Run the following command to install a new Uyuni server from the source one after replacing the `uyuni.source.fqdn` by the proper source server FQDN: +This command will synchronize all the data from the source server to the new one: this can take time! + +``` +uyuniadm migrate uyuni.source.fqdn +``` + +## Notes for Kubernetes + +⚠️ **TODO** Revisit this section! + +Once done, both the job and its pod will remain until the user deletes them to allow checking logs. + +Certificates migration also needs to be documented, but that can be guessed for now with the instructions to setup a server from scratch. + + +# Installing Uyuni + +## Volumes preparation + +### For Kubernetes + +⚠️ **TODO** Document this + +### For Podman + +⚠️ **TODO** Document this + +## Installing + +The installation using `uyuniadm install` will ask for the password if those are not provided using the command line parameters or the configuration file. +For security reason, using command line parameters to specify passwords should be avoided: use the configuration file with proper permissions instead. + +Prepare an `uyuniadm.yaml` file like the following: + +``` +db: + password: MySuperSecretDBPass +cert: + password: MySuperSecretCAPass +``` + +To dismiss the email prompts add the `email` and `emailFrom` configurations to the above file or use the `--email` and `--emailFrom` parameters for `uyuniadm install`. + +Run the following command to install after replacing the `uyuni.example.com` by the FQDN of the server to install: + +``` +uyuniadm -c uyuniadm.yaml install --image registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server uyuni.example.com +``` + +### Podman specific configuration + +Additional parameters can be passed to Podman using `--podman-arg` parameters or configuration like the following in `uyuniadm.yaml`: + +``` +podman: + arg: + - -p 8000:8000 + - -p 8001:8001 +``` + +is equivalent to passing `--podman-arg "-p 8000:8000" --podman-arg "-p 8001:8001"` to `uyuniadm install` + +This can be usefull to expose ports like the Java debugging ones or mount additional volumes. + +### Kubernetes specific configuration + +The `uyuniadm install` command comes with parameters and thus configuration values for advanced helm chart configuration. +To pass additional values to the Uyuni helm chart at installation time, use the `--helm-uyuni-values chart-values.yaml` parameter or a configuration like the following: + +``` +helm: + uyuni: + values: chart-values.yaml +``` + +The path set as value for this configuration is a YAML file passed to the Uyuni Helm chart. +Be aware that some of the values in this file will be overriden by the `uyuniadm install` parameters. + +For example, to expose the Java debugging ports, add the `exposeJavaDebug: true` line to the helm chart values file. +You can also set more variables like `sccUser` or `sccPass`. +Check the [server-helm/values.yaml](https://github.com/uyuni-project/uyuni/blob/server-container/containers/server-helm/values.yaml) file for the complete list. + +If deploying on RKE2, add the `ingress: nginx` line to the Helm chart values file. + +Note that the Helm chart installs a deployment with one replica. +The pod name is automatically generated by Kubernetes and changes at every start. + + +# Using Uyuni in containers + +To getting a shell in the pod run `uyunictl exec -ti bash`. +Note that this command can be use to run any command to run inside the server like `uyunictl exec tail /var/log/rhn/rhn_web_ui.log` + +To copy files to the server, use the `uyunictl cp server:` command. +Conversely to copy files from the server use `uyunictl cp server: `. + +# Developping with the containers + +## Deploying code + +To deploy java code on the pod change to the `java` directory and run: + +``` +ant -f manager-build.xml refresh-branding-jar deploy-restart-kube +``` + +In case you changed the pod namespace, pass the corresponding `-Ddeploy.namespace=` parameter. + +**Note** To deploy TSX or Salt code, use the `deploy-static-resources-kube` and `deploy-salt-files-kube` tasks of the ant file. + +## Attaching a java debugger + +First enable the JDWP options in both tomcat and taskomatic using the following command: + +``` +ant -f manager-build.xml enable-java-debug-kube +``` + +Then restart tomcat and taskomatic using ant too: + +``` +ant -f manager-build.xml restart-tomcat-kube restart-taskomatic-kube +``` + +The debugger can now be attached to the usual ports (8000 for tomcat and 8001 for taskomatic) on the host FQDN. + +# Uninstalling + +To remove everything including the volumes, run the following command: + +``` +uyuniadm uninstall --purge-volumes +``` + +Note that `cert-manager` will not be uninstalled if it was not installed by `uyuniadm`. diff --git a/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml b/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml new file mode 100644 index 000000000000..afa7660e0b66 --- /dev/null +++ b/containers/doc/server-kubernetes/cert-manager-selfsigned-issuer.yaml @@ -0,0 +1,44 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: uyuni-issuer + namespace: default +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: uyuni-ca + namespace: default +spec: + isCA: true + subject: + countries: ["FR"] + provinces: ["Burgundy"] + localities: ["Macon"] + organizations: ["SUSE"] + organizationalUnits: ["BCL"] + emailAddresses: + - sylvestre@world-co.com + commonName: uyuni-dev.world-co.com + dnsNames: + - uyuni-dev.world-co.com + secretName: uyuni-ca + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: uyuni-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: uyuni-ca-issuer + namespace: default +spec: + ca: + secretName: + uyuni-ca diff --git a/containers/doc/server-kubernetes/k3s-traefik-config.yaml b/containers/doc/server-kubernetes/k3s-traefik-config.yaml new file mode 100644 index 000000000000..fd78e0024d47 --- /dev/null +++ b/containers/doc/server-kubernetes/k3s-traefik-config.yaml @@ -0,0 +1,53 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: traefik + namespace: kube-system +spec: + valuesContent: |- + ports: + postgres: + port: 5432 + expose: true + exposedPort: 5432 + protocol: TCP + salt-publish: + port: 4505 + expose: true + exposedPort: 4505 + protocol: TCP + salt-request: + port: 4506 + expose: true + exposedPort: 4506 + protocol: TCP + cobbler: + port: 25151 + expose: true + exposedPort: 25151 + protocol: TCP + tomcat-debug: + port: 8080 + expose: true + exposedPort: 8000 + protocol: TCP + tasko-debug: + port: 8081 + expose: true + exposedPort: 8001 + protocol: TCP + psql-metrics: + port: 9187 + expose: true + exposedPort: 9187 + protocol: TCP + node-metrics: + port: 9101 + expose: true + exposedPort: 9101 + protocol: TCP + tftp: + port: 69 + expose: true + exposedPort: 69 + protocol: UDP diff --git a/containers/doc/server-kubernetes/rhn-ssl-tool.yaml b/containers/doc/server-kubernetes/rhn-ssl-tool.yaml new file mode 100644 index 000000000000..2e0aad7049b8 --- /dev/null +++ b/containers/doc/server-kubernetes/rhn-ssl-tool.yaml @@ -0,0 +1,54 @@ +apiVersion: batch/v1 +kind: Job +metadata: + labels: + run: rhn-ssl-tool + name: rhn-ssl-tool +spec: + backoffLimit: 0 + template: + spec: + restartPolicy: Never + initContainers: + - name: gen-ca + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server + command: + - rhn-ssl-tool + - --gen-ca + - --no-rpm + - --set-common-name=uyuni-dev.world-co.com + - --set-country=FR + - --set-state=Burgundy + - --set-city=Macon + - --set-org=SUSE + - --set-org-unit=BCL + - --set-email=sylvestre@world-co.com + - --password-file=/ssl-build/password + volumeMounts: + - name: ssl-build + mountPath: /ssl-build + containers: + - name: gen-server + image: registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni/server + command: + - rhn-ssl-tool + - --gen-server + - --no-rpm + - --set-cname=uyuni-dev.world-co.com + - --set-country=FR + - --set-state=Burgundy + - --set-city=Macon + - --set-org=SUSE + - --set-org-unit=BCL + - --set-email=sylvestre@world-co.com + - --set-hostname=uyuni-dev + - --set-cname=uyuni.world-co.com + - --password-file=/ssl-build/password + volumeMounts: + - name: ssl-build + mountPath: /ssl-build + volumes: + - name: ssl-build + hostPath: + path: /var/uyuni/ssl-build + type: Directory diff --git a/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml b/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml new file mode 100644 index 000000000000..2180adcf6c87 --- /dev/null +++ b/containers/doc/server-kubernetes/rke2-ingress-nginx-config.yaml @@ -0,0 +1,19 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-ingress-nginx + namespace: kube-system +spec: + valuesContent: |- + controller: + config: + hsts: "false" + tcp: + 4505: "default/uyuni-tcp:4505" + 4506: "default/uyuni-tcp:4506" + 5432: "default/uyuni-tcp:5432" + 8000: "default/uyuni-tcp:8000" + 8001: "default/uyuni-tcp:8001" + 25151: "default/uyuni-tcp:25151" + udp: + 69: "default/uyuni-udp:69" diff --git a/containers/hub-xmlrpc-api-image/Dockerfile b/containers/hub-xmlrpc-api-image/Dockerfile new file mode 100644 index 000000000000..7f5103edf688 --- /dev/null +++ b/containers/hub-xmlrpc-api-image/Dockerfile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/hub-xmlrpc-api:latest uyuni/hub-xmlrpc-api:4.4.0 uyuni/hub-xmlrpc-api:4.4.0.%RELEASE% + +ARG INIT_BASE=registry.opensuse.org/bci/bci-micro +FROM $INIT_BASE + +RUN zypper --gpg-auto-import-keys --non-interactive install hub-xmlrpc-api + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.hub-xmlrpc-api +LABEL org.opencontainers.image.title="${PRODUCT} Hub XML-RPC API container" +LABEL org.opencontainers.image.description="${PRODUCT} Hub XML-RPC API image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/bin/hub-xmlrpc-api"] diff --git a/containers/hub-xmlrpc-api-image/_service b/containers/hub-xmlrpc-api-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/hub-xmlrpc-api-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes new file mode 100644 index 000000000000..e2637480450b --- /dev/null +++ b/containers/hub-xmlrpc-api-image/hub-xmlrpc-api.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Jun 22 07:30:36 UTC 2023 - Cédric Bosdonnat + +- Initial image for Uyuni Hub XML-RPC API diff --git a/containers/hub-xmlrpc-api-image/tito.props b/containers/hub-xmlrpc-api-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/hub-xmlrpc-api-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-helm/.helmignore b/containers/server-helm/.helmignore new file mode 100644 index 000000000000..0e8a0eb36f4c --- /dev/null +++ b/containers/server-helm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/containers/server-helm/Chart.yaml b/containers/server-helm/Chart.yaml new file mode 100644 index 000000000000..84bebb20b9db --- /dev/null +++ b/containers/server-helm/Chart.yaml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest +#!BuildTag: uyuni/server:4.4.0 +#!BuildTag: uyuni/server:4.4.0-build%RELEASE% +apiVersion: v2 +name: server +description: Uyuni server containers. +type: application +home: https://www.uyuni-project.org/ +icon: https://www.uyuni-project.org/img/uyuni-logo.svg +version: 4.4.0 diff --git a/containers/server-helm/_service b/containers/server-helm/_service new file mode 100644 index 000000000000..dc713a1f9381 --- /dev/null +++ b/containers/server-helm/_service @@ -0,0 +1,3 @@ + + + diff --git a/containers/server-helm/charts/.gitkeep b/containers/server-helm/charts/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/containers/server-helm/server-helm.changes b/containers/server-helm/server-helm.changes new file mode 100644 index 000000000000..efc36b9365e0 --- /dev/null +++ b/containers/server-helm/server-helm.changes @@ -0,0 +1,4 @@ +------------------------------------------------------------------- +Thu Mar 9 13:43:51 UTC 2023 - Cédric Bosdonnat + +- Initial version diff --git a/containers/server-helm/templates/_helpers.tpl b/containers/server-helm/templates/_helpers.tpl new file mode 100644 index 000000000000..2cba9281b6b8 --- /dev/null +++ b/containers/server-helm/templates/_helpers.tpl @@ -0,0 +1,11 @@ +{{- define "deployment.container.image" -}} +{{- $imageName := .name -}} +{{- $uri := (printf "%s/%s:%s" .global.Values.repository $imageName .global.Values.version) | default .global.Chart.AppVersion -}} +{{- if .global.Values.images -}} +{{- $image := (get .global.Values.images $imageName) -}} +{{- if $image -}} +{{- $uri = $image -}} +{{- end -}} +{{- end -}} +{{- $uri -}} +{{- end -}} \ No newline at end of file diff --git a/containers/server-helm/templates/deployment.yaml b/containers/server-helm/templates/deployment.yaml new file mode 100644 index 000000000000..524a934ac099 --- /dev/null +++ b/containers/server-helm/templates/deployment.yaml @@ -0,0 +1,583 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: uyuni + namespace: "{{ .Release.Namespace }}" +spec: + replicas: 1 + selector: + matchLabels: + app: uyuni + template: + metadata: + labels: + app: uyuni + spec: + initContainers: + - name: init-etc-tls + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/pki/tls /mnt; + chmod --reference=/etc/pki/tls /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/pki/tls/. /mnt; + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/private/pg-spacewalk.key; + chown postgres:postgres /mnt/private/pg-spacewalk.key; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tls + - name: tls-key + mountPath: /etc/pki/spacewalk-tls + - name: init-var-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/cobbler /mnt; + chmod --reference=/var/lib/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cobbler + - name: init-var-pgsql + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/lib/pgsql /mnt; + chmod --reference=/var/lib/pgsql /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/lib/pgsql/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-pgsql + - name: init-var-cache + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/cache /mnt; + chmod --reference=/var/cache /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/cache/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-cache + - name: init-var-log + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/var/log /mnt; + chmod --reference=/var/log /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /var/log/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: var-log + - name: init-srv-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/salt /mnt; + chmod --reference=/srv/salt /mnt + volumeMounts: + - mountPath: /mnt + name: srv-salt + - name: init-srv-www-pub + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/htdocs/pub /mnt; + chmod --reference=/srv/www/htdocs/pub /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/htdocs/pub/. /mnt; + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt/RHN-ORG-TRUSTED-SSL-CERT; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-pub + - name: init-srv-www-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/cobbler /mnt; + chmod --reference=/srv/www/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/www/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-www-cobbler + - name: init-srv-www-osimages + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/www/os-images /mnt; + chmod --reference=/srv/www/os-images /mnt + volumeMounts: + - mountPath: /mnt + name: srv-www-osimages + - name: init-srv-tftpboot + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/tftpboot /mnt; + chmod --reference=/srv/tftpboot /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/tftpboot/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-tftpboot + - name: init-srv-formulametadata + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/formula_metadata /mnt; + chmod --reference=/srv/formula_metadata /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/formula_metadata/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-formulametadata + - name: init-srv-pillar + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/pillar /mnt; + chmod --reference=/srv/pillar /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/pillar/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-pillar + - name: init-srv-susemanager + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/susemanager /mnt; + chmod --reference=/srv/susemanager /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/susemanager/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-susemanager + - name: init-srv-spacewalk + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/srv/spacewalk /mnt; + chmod --reference=/srv/spacewalk /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /srv/spacewalk/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: srv-spacewalk + - name: init-root + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/root /mnt; + chmod --reference=/root /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /root/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: root + - name: init-etc-apache2 + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/apache2 /mnt; + chmod --reference=/etc/apache2 /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/apache2/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-apache2 + - name: init-etc-rhn + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/rhn /mnt; + chmod --reference=/etc/rhn /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/rhn/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-rhn + - name: init-etc-systemd + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/systemd/system/multi-user.target.wants /mnt; + chmod --reference=/etc/systemd/system/multi-user.target.wants /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/systemd/system/multi-user.target.wants/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-systemd + - name: init-etc-salt + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/salt /mnt; + chmod --reference=/etc/salt /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/salt/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-salt + - name: init-etc-tomcat + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/tomcat /mnt; + chmod --reference=/etc/tomcat /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/tomcat/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-tomcat + - name: init-etc-cobbler + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/cobbler /mnt; + chmod --reference=/etc/cobbler /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/cobbler/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-cobbler + - name: init-etc-sysconfig + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/sysconfig /mnt; + chmod --reference=/etc/sysconfig /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/sysconfig/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-sysconfig + - name: init-etc-postfix + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + command: + - sh + - -x + - -c + - > + chown --reference=/etc/postfix /mnt; + chmod --reference=/etc/postfix /mnt; + if [ -z "$(ls -A /mnt)" ]; then + cp -a /etc/postfix/. /mnt; + fi + volumeMounts: + - mountPath: /mnt + name: etc-postfix + containers: + - name: uyuni + image: {{- include "deployment.container.image" (dict "name" "server" "global" .) | indent 1}} + imagePullPolicy: {{ .Values.pullPolicy }} + ports: + - containerPort: 443 + - containerPort: 80 + - containerPort: 4505 + - containerPort: 4506 + - containerPort: 69 +{{- if .Values.enableMonitoring | default true }} + - containerPort: 9100 + - containerPort: 9187 + - containerPort: 9800 +{{- end }} + protocol: UDP + - containerPort: 25151 + - containerPort: 5432 +{{- if .Values.exposeJavaDebug | default false }} + - containerPort: 8000 + - containerPort: 8001 +{{- end }} + env: + - name: TZ + value: {{ .Values.timezone | default "Etc/UTC" }} +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + - name: MIRROR_PATH + value: /mirror +{{- end }} + volumeMounts: + - mountPath: /run + name: tmp + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /var/lib/cobbler + name: var-cobbler + - mountPath: /var/lib/pgsql + name: var-pgsql + - mountPath: /var/cache + name: var-cache + - mountPath: /var/spacewalk + name: var-spacewalk + - mountPath: /var/log + name: var-log + - mountPath: /srv/salt + name: srv-salt + - mountPath: /srv/www/htdocs/pub + name: srv-www-pub + - mountPath: /srv/www/cobbler + name: srv-www-cobbler + - mountPath: /srv/www/os-images + name: srv-www-osimages + - mountPath: /srv/tftpboot + name: srv-tftpboot + - mountPath: /srv/formula_metadata + name: srv-formulametadata + - mountPath: /srv/pillar + name: srv-pillar + - mountPath: /srv/susemanager + name: srv-susemanager + - mountPath: /srv/spacewalk + name: srv-spacewalk + - mountPath: /root + name: root + - mountPath: /etc/apache2 + name: etc-apache2 + - mountPath: /etc/rhn + name: etc-rhn + - mountPath: /etc/systemd/system/multi-user.target.wants + name: etc-systemd + - mountPath: /etc/salt + name: etc-salt + - mountPath: /etc/tomcat + name: etc-tomcat + - mountPath: /etc/cobbler + name: etc-cobbler + - mountPath: /etc/sysconfig + name: etc-sysconfig + - mountPath: /etc/pki/tls + name: etc-tls + - mountPath: /etc/postfix + name: etc-postfix + - name: ca-cert + mountPath: /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + readOnly: true + subPath: ca.crt + - name: tls-key + mountPath: /etc/pki/spacewalk-tls +{{- if and .Values.mirror (or .Values.mirror.claimName .Values.mirror.hostPath) }} + - name: mirror + mountPath: /mirror +{{- end }} + volumes: + - name: tmp + emptyDir: + medium: Memory + sizeLimit: 256Mi + - name: cgroup + hostPath: + path: /sys/fs/cgroup + type: Directory + - name: var-cobbler + persistentVolumeClaim: + claimName: var-cobbler + - name: var-pgsql + persistentVolumeClaim: + claimName: var-pgsql + - name: var-cache + persistentVolumeClaim: + claimName: var-cache + - name: var-spacewalk + persistentVolumeClaim: + claimName: var-spacewalk + - name: var-log + persistentVolumeClaim: + claimName: var-log + - name: srv-salt + persistentVolumeClaim: + claimName: srv-salt + - name: srv-www-pub + persistentVolumeClaim: + claimName: srv-www-pub + - name: srv-www-cobbler + persistentVolumeClaim: + claimName: srv-www-cobbler + - name: srv-www-osimages + persistentVolumeClaim: + claimName: srv-www-osimages + - name: srv-tftpboot + persistentVolumeClaim: + claimName: srv-tftpboot + - name: srv-formulametadata + persistentVolumeClaim: + claimName: srv-formulametadata + - name: srv-pillar + persistentVolumeClaim: + claimName: srv-pillar + - name: srv-susemanager + persistentVolumeClaim: + claimName: srv-susemanager + - name: srv-spacewalk + persistentVolumeClaim: + claimName: srv-spacewalk + - name: root + persistentVolumeClaim: + claimName: root + - name: etc-apache2 + persistentVolumeClaim: + claimName: etc-apache2 + - name: etc-rhn + persistentVolumeClaim: + claimName: etc-rhn + - name: etc-systemd + persistentVolumeClaim: + claimName: etc-systemd + - name: etc-salt + persistentVolumeClaim: + claimName: etc-salt + - name: etc-tomcat + persistentVolumeClaim: + claimName: etc-tomcat + - name: etc-cobbler + persistentVolumeClaim: + claimName: etc-cobbler + - name: etc-sysconfig + persistentVolumeClaim: + claimName: etc-sysconfig + - name: etc-postfix + persistentVolumeClaim: + claimName: etc-postfix + - name: ca-cert + configMap: + name: uyuni-ca + - name: etc-tls + persistentVolumeClaim: + claimName: etc-tls + - name: tls-key + secret: + secretName: uyuni-cert + items: + - key: tls.crt + path: spacewalk.crt + - key: tls.key + path: spacewalk.key + mode: 0600 +{{- if .Values.mirror }} + {{- if .Values.mirror.claimName }} + - name: mirror + persistentVolumeClaim: + claimName: {{ .Values.mirror.claimName }} + {{- else if .Values.mirror.hostPath }} + - name: mirror + hostPath: + path: {{ .Values.mirror.hostPath }} + {{- end }} +{{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/containers/server-helm/templates/ingress.yaml b/containers/server-helm/templates/ingress.yaml new file mode 100644 index 000000000000..0d24433f18b1 --- /dev/null +++ b/containers/server-helm/templates/ingress.yaml @@ -0,0 +1,185 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl + namespace: "{{ .Release.Namespace }}" + annotations: +{{- if eq .Values.ingress "traefik" }} + traefik.ingress.kubernetes.io/router.tls: "true" + traefik.ingress.kubernetes.io/router.tls.domains.n.main: "{{ .Values.fqdn }}" + traefik.ingress.kubernetes.io/router.entrypoints: "websecure,web" +{{- end }} +{{- if .Values.ingressSslAnnotations }} +{{ toYaml .Values.ingressSslAnnotations | indent 4 }} +{{- end }} + labels: + app: uyuni +spec: + tls: + - hosts: + - {{ .Values.fqdn }} + secretName: uyuni-cert + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix +{{- if eq .Values.ingress "traefik" }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-ssl-redirect + namespace: "{{ .Release.Namespace }}" + annotations: + traefik.ingress.kubernetes.io/router.middlewares: "default-uyuni-https-redirect@kubernetescrd" + traefik.ingress.kubernetes.io/router.entrypoints: "web" + labels: + app: uyuni +spec: + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: / + pathType: Prefix +{{- end }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + creationTimestamp: null + name: uyuni-ingress-nossl + namespace: "{{ .Release.Namespace }}" + annotations: +{{- if eq .Values.ingress "nginx" }} + nginx.ingress.kubernetes.io/ssl-redirect: "false" +{{- else if eq .Values.ingress "traefik" }} + traefik.ingress.kubernetes.io/router.tls: "false" + traefik.ingress.kubernetes.io/router.entrypoints: "web" +{{- end }} + labels: + app: uyuni +spec: + rules: + - host: {{ .Values.fqdn }} + http: + paths: + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /pub + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/([^/])+/DownloadFile + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /(rhn/)?rpc/api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/errors + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/ty/TinyUrl + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/websocket + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /rhn/metrics + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler_api + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cblr + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /httpboot + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /cobbler + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /os-images + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /tftp + pathType: Prefix + - backend: + service: + name: uyuni-tcp + port: + number: 80 + path: /docs + pathType: Prefix diff --git a/containers/server-helm/templates/k3s-ingress-routes.yaml b/containers/server-helm/templates/k3s-ingress-routes.yaml new file mode 100644 index 000000000000..1a2ccad1e0d3 --- /dev/null +++ b/containers/server-helm/templates/k3s-ingress-routes.yaml @@ -0,0 +1,140 @@ +{{- if eq .Values.ingress "traefik" }} +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: uyuni-https-redirect + namespace: "{{ .Release.Namespace }}" +spec: + redirectScheme: + scheme: https + permanent: true +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgresql-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - postgres + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 5432 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-publish-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - salt-publish + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4505 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: salt-request-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - salt-request + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 4506 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: cobbler-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - cobbler + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 25151 +{{- if .Values.enableMonitoring | default true }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: node-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - node-metrics + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9100 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: postgresql-exporter-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - psql-metrics + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 9187 +{{- end }} +{{- if .Values.exposeJavaDebug }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tomcat-debug-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tomcat-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8000 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: tasko-debug-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tasko-debug + routes: + - match: HostSNI(`*`) + services: + - name: uyuni-tcp + port: 8001 +{{- end }} +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteUDP +metadata: + name: tftp-router + namespace: "{{ .Release.Namespace }}" +spec: + entryPoints: + - tftp + routes: + - services: + - name: uyuni-udp + port: 69 +{{- end }} diff --git a/containers/server-helm/templates/service.yaml b/containers/server-helm/templates/service.yaml new file mode 100644 index 000000000000..a0fa7301d8b7 --- /dev/null +++ b/containers/server-helm/templates/service.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: uyuni + name: uyuni-tcp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: salt-publish + port: 4505 + protocol: TCP + targetPort: 4505 + - name: salt-request + port: 4506 + protocol: TCP + targetPort: 4506 + - name: cobbler + port: 25151 + protocol: TCP + targetPort: 25151 + - name: postgresql + port: 5432 + protocol: TCP + targetPort: 5432 +{{- if .Values.enableMonitoring | default true }} + - name: node-exporter + port: 9100 + protocol: TCP + targetPort: 9100 + - name: postgres-exporter + port: 9187 + protocol: TCP + targetPort: 9187 + - name: taskomatic + port: 9800 + protocol: TCP + targetPort: 9800 +{{- end }} +{{- if .Values.exposeJavaDebug | default false }} + - name: tomcat-debug + port: 8000 + protocol: TCP + targetPort: 8000 + - name: tasko-debug + port: 8001 + protocol: TCP + targetPort: 8001 +{{- end }} + selector: + app: uyuni + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: uyuni + name: uyuni-udp + namespace: "{{ .Release.Namespace }}" +{{- if .Values.servicesAnnotations }} + annotations: +{{ toYaml .Values.servicesAnnotations | indent 4 }} +{{- end }} +spec: + ports: + - name: tftp + port: 69 + protocol: UDP + targetPort: 69 + selector: + app: uyuni + type: ClusterIP diff --git a/containers/server-helm/templates/volumes.yaml b/containers/server-helm/templates/volumes.yaml new file mode 100644 index 000000000000..8ad678d3842b --- /dev/null +++ b/containers/server-helm/templates/volumes.yaml @@ -0,0 +1,575 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-pgsql + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-pgsql +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-cache + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-cache +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: var-log + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 2Gi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: var-log +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-pub + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-pub +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-www-osimages + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-www-osimages +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-tftpboot + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-tftpboot +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-formulametadata + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-formulametadata +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-pillar + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-pillar +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-susemanager + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-susemanager +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: srv-spacewalk + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 100Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: srv-spacewalk +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: root + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: root +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-apache2 + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-apache2 +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-rhn + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-rhn +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-systemd + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-systemd +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-salt + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-salt +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tomcat + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 10Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tomcat +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-cobbler + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-cobbler +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-sysconfig + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-sysconfig +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-tls + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-tls +{{- end }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: etc-postfix + namespace: "{{ .Release.Namespace }}" +spec: +{{- if .Values.storageClass }} +{{- if (eq "-" .Values.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storageClass }}" +{{- end }} +{{- end }} + accessModes: +{{ toYaml .Values.accessModes | indent 4 }} + resources: + requests: + storage: 1Mi +{{- if .Values.matchPvByLabel }} + selector: + matchLabels: + data: etc-postfix +{{- end }} diff --git a/containers/server-helm/tito.props b/containers/server-helm/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-helm/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-helm/values.yaml b/containers/server-helm/values.yaml new file mode 100644 index 000000000000..cf85d631e853 --- /dev/null +++ b/containers/server-helm/values.yaml @@ -0,0 +1,70 @@ +# The default repository and image version if not defined otherwise +repository: registry.opensuse.org/uyuni +version: latest + +## Allows to override the default URI for an image if defined +## Requires a full URI in a form of /: +## +images: + # server: // + + +## Ref: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy +## +pullPolicy: "IfNotPresent" + +## uyuni server overall Persistent Volume access modes +## Must match those of existing PV or dynamic provisioner +## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +accessModes: + - ReadWriteOnce + +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +# storageClass: "-" + +## matchPvByLabel adds selectors on each claim to select a PV with a 'data' label matching the PVC name. +## This can be helpful for static PV management. +matchPvByLabel: false + +## mirror defines a volume or host path to mount in the container as server.susemanager.fromdir value. +## Use either claimName or hostPath to reference the volume source. +## +## When using claimName, both claims and PVs need to be defined before running the chart +## Note that hostPath will not work on multi-node cluster +## +## If the value is set before the first run of the server, the rhn.conf file will be adjusted during the setup. +#mirror: +# claimName: mirror +# hostPath: /srv/mirror + +# TODO Parametrize big volumes sizes + +## servicesAnnotations are annotations to set on both TCP and UDP services. +## This can be useful to share the same IP when using metallb +# servicesAnnotations: + +## exposeJavaDebug will expose the 8000 and 8001 ports to connect a Java debugger +## to tomcat and taskomatic respectively +# exposeJavaDebug: true + +## enableMonitoring will expose the 9100 9187 5556 5557 9500 9800 ports for prometheus to scrape +enableMonitoring: true + +## ingress defines the ingress that is used in the cluster. +## It can be either "nginx", "traefik" or any other value. +ingress: "traefik" + +## ingressSsl are annotations to pass the SSL ingress. +## This can be used to set a cert-manager issuer like: +## ingressSslAnnotations: +## cert-manager.io/cluster-issuer: uyuniIssuer +# ingressSslAnnotations: + +# The time zone to set in the containers +timezone: "Etc/UTC" diff --git a/containers/server-image/.env b/containers/server-image/.env new file mode 100644 index 000000000000..cce186fe910a --- /dev/null +++ b/containers/server-image/.env @@ -0,0 +1,23 @@ +# MANAGER_USER= +# MANAGER_PASS= +# MANAGER_ADMIN_EMAIL= +# CERT_O= +# CERT_OU= +# CERT_CITY= +# CERT_STATE= +# CERT_COUNTRY= +# CERT_EMAIL= +# CERT_PASS= +# USE_EXISTING_CERTS= +# MANAGER_DB_NAME= +# MANAGER_DB_HOST= +# MANAGER_DB_PORT= +# MANAGER_DB_PROTOCOL= +# MANAGER_ENABLE_TFTP= +# SCC_USER= +# SCC_PASS= +# REPORT_DB_HOST= +# REPORT_DB_PORT= +# REPORT_DB_NAME= +# REPORT_DB_USER= +# REPORT_DB_PASS= diff --git a/containers/server-image/Dockerfile b/containers/server-image/Dockerfile new file mode 100644 index 000000000000..958765818f84 --- /dev/null +++ b/containers/server-image/Dockerfile @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT +#!BuildTag: uyuni/server:latest uyuni/server:4.4.0 uyuni/server:4.4.0.%RELEASE% + +ARG INIT_BASE=registry.suse.com/bci/bci-base:15.4 +FROM $INIT_BASE + +ARG PRODUCT_REPO +ARG PRODUCT_PATTERN_PREFIX="patterns-uyuni" + +# Add distro and product repos +COPY add_repos.sh /usr/bin +COPY timezone_alignment.sh /usr/bin +RUN sh add_repos.sh ${PRODUCT_REPO} + +COPY timezone_alignment.service /usr/lib/systemd/system/ + +COPY remove_unused.sh . +RUN echo "rpm.install.excludedocs = yes" >>/etc/zypp/zypp.conf + +# Main packages +RUN zypper ref && zypper --non-interactive up +RUN zypper --gpg-auto-import-keys --non-interactive install --auto-agree-with-licenses --force-resolution \ + ${PRODUCT_PATTERN_PREFIX}_server \ + ${PRODUCT_PATTERN_PREFIX}_retail \ + susemanager-tftpsync \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + prometheus-jmx_exporter-tomcat \ + spacecmd \ + grafana-formula \ + locale-formula \ + prometheus-exporters-formula \ + prometheus-formula \ + registry-formula \ + virtualization-formulas \ + uyuni-config-formula \ + inter-server-sync \ + golang-github-lusitaniae-apache_exporter \ + golang-github-prometheus-node_exporter \ + prometheus-postgres_exporter \ + golang-github-QubitProducts-exporter_exporter \ + prometheus-jmx_exporter \ + spacecmd \ + javamail \ + libyui-ncurses-pkg16 \ + virtual-host-gatherer-Kubernetes \ + virtual-host-gatherer-libcloud \ + virtual-host-gatherer-Libvirt \ + virtual-host-gatherer-Nutanix \ + virtual-host-gatherer-VMware \ + vim \ + ipmitool + +RUN sed -i 's/sysctl kernel.shmmax/#sysctl kernel.shmmax/g' /usr/bin/uyuni-setup-reportdb + +RUN mkdir -p /etc/postgres_exporter \ + /etc/prometheus-jmx_exporter/tomcat \ + /usr/lib/systemd/system/tomcat.service.d \ + /etc/prometheus-jmx_exporter/taskomatic \ + /usr/lib/systemd/system/taskomatic.service.d + +COPY postgres_exporter_queries.yaml /etc/postgres_exporter/postgres_exporter_queries.yaml +COPY postgres-exporter /etc/sysconfig/prometheus-postgres_exporter +COPY java_agent.yaml /etc/prometheus-jmx_exporter/tomcat/java_agent.yml +COPY java_agent.yaml /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml +COPY tomcat_jmx.conf /usr/lib/systemd/system/tomcat.service.d/jmx.conf +COPY taskomatic_jmx.conf /usr/lib/systemd/system/taskomatic.service.d/jmx.conf + +RUN chmod -R 755 /usr/bin/timezone_alignment.sh + +RUN systemctl enable prometheus-node_exporter; \ + systemctl enable timezone_alignment; + +# LABELs +ARG PRODUCT=Uyuni +ARG VENDOR="Uyuni project" +ARG URL="https://www.uyuni-project.org/" +ARG REFERENCE_PREFIX="registry.opensuse.org/uyuni" + +# Build Service required labels +# labelprefix=org.opensuse.uyuni.server +LABEL org.opencontainers.image.title="${PRODUCT} server container" +LABEL org.opencontainers.image.description="All-in-one ${PRODUCT} server image" +LABEL org.opencontainers.image.created="%BUILDTIME%" +LABEL org.opencontainers.image.vendor="${VENDOR}" +LABEL org.opencontainers.image.url="${URL}" +LABEL org.opencontainers.image.version="4.4.0" +LABEL org.openbuildservice.disturl="%DISTURL%" +LABEL org.opensuse.reference="${REFERENCE_PREFIX}/server:4.4.0.%RELEASE%" +# endlabelprefix + +CMD ["/usr/lib/systemd/systemd"] +HEALTHCHECK --interval=5s --timeout=5s --retries=5 CMD ["/usr/bin/systemctl", "is-active", "multi-user.target"] diff --git a/containers/server-image/README.md b/containers/server-image/README.md new file mode 100644 index 000000000000..5739e65d5f95 --- /dev/null +++ b/containers/server-image/README.md @@ -0,0 +1,5 @@ +# Known issues + +* Apache fails to start to start in the container is apparmor is enabled on the host. +* Avahi names are not resolved inside the container + diff --git a/containers/server-image/_constraints b/containers/server-image/_constraints new file mode 100644 index 000000000000..e6e2c4c0e019 --- /dev/null +++ b/containers/server-image/_constraints @@ -0,0 +1,7 @@ + + + + 10 + + + diff --git a/containers/server-image/_service b/containers/server-image/_service new file mode 100644 index 000000000000..bde87fa5bc1f --- /dev/null +++ b/containers/server-image/_service @@ -0,0 +1,4 @@ + + + + diff --git a/containers/server-image/add_repos.sh b/containers/server-image/add_repos.sh new file mode 100644 index 000000000000..a77e8b755b18 --- /dev/null +++ b/containers/server-image/add_repos.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ -n "$1" ]; then + # update + zypper ar -G http://download.opensuse.org/update/leap/15.4/sle/ sle_update_repo + zypper ar -G http://download.opensuse.org/update/leap/15.4/oss/ os_update_repo + zypper ar -G http://download.opensuse.org/update/leap/15.4/backports/ backports_update_repo + + # distribution + zypper ar -G http://download.opensuse.org/distribution/leap/15.4/repo/oss/ os_pool_repo + + # product + #TODO uncomment when changes are in master branch + #zypper ar -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/ server_pool_repo + zypper ar -G http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Testing-Overlay-POOL-x86_64-Media1/ testing_overlay_devel_repo + + zypper addrepo $1 product +fi diff --git a/containers/server-image/java_agent.yaml b/containers/server-image/java_agent.yaml new file mode 100644 index 000000000000..50cd72ebba9f --- /dev/null +++ b/containers/server-image/java_agent.yaml @@ -0,0 +1,6 @@ +whitelistObjectNames: + - java.lang:type=Threading,* + - java.lang:type=Memory,* + - Catalina:type=ThreadPool,name=* +rules: + - pattern: ".*" diff --git a/containers/server-image/postgres-exporter b/containers/server-image/postgres-exporter new file mode 100644 index 000000000000..4a8011acf428 --- /dev/null +++ b/containers/server-image/postgres-exporter @@ -0,0 +1,19 @@ +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "postgresql://user:passwd@localhost:5432/database?sslmode=disable" +## ServiceRestart: postgres-exporter +# +# Connection URL to postgresql instance +# +DATA_SOURCE_NAME="postgresql://spacewalk:spacewalk@localhost:5432/susemanager?sslmode=disable" + +## Path: Applications/PostgreSQLExporter +## Description: Prometheus exporter for PostgreSQL +## Type: string() +## Default: "" +## ServiceRestart: postgres-exporter +# +# Extra options for postgres-exporter +# +POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml" diff --git a/containers/server-image/postgres_exporter_queries.yaml b/containers/server-image/postgres_exporter_queries.yaml new file mode 100644 index 000000000000..f6b3d362880f --- /dev/null +++ b/containers/server-image/postgres_exporter_queries.yaml @@ -0,0 +1,52 @@ +mgr_serveractions: + query: | + SELECT ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Queued' + ) + ) AS queued, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name = 'Picked Up' + ) + ) AS picked_up, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Completed') + ) + ) AS completed, + ( + SELECT COUNT(*) + FROM rhnServerAction + WHERE status = ( + SELECT id FROM rhnActionStatus WHERE name IN ('Failed') + ) + ) AS failed; + metrics: + - queued: + usage: "GAUGE" + description: "Count of queued Actions" + - picked_up: + usage: "GAUGE" + description: "Count of picked up Actions" + - completed: + usage: "COUNTER" + description: "Count of completed Actions" + - failed: + usage: "COUNTER" + description: "Count of failed Actions" + salt_events: + query: | + SELECT COUNT(*) + FROM suseSaltEvent + AS salt_events_count; + metrics: + - salt_events_count: + usage: "GAUGE" + description: "Count of suse salt events" diff --git a/containers/server-image/remove_unused.sh b/containers/server-image/remove_unused.sh new file mode 100755 index 000000000000..ac95e70ecf0f --- /dev/null +++ b/containers/server-image/remove_unused.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Removes any unnecessary files and packages before moving to the next build stage + +set -xe + +zypper clean --all +rpm -e zypper diff --git a/containers/server-image/server-image.changes b/containers/server-image/server-image.changes new file mode 100644 index 000000000000..3d281727b618 --- /dev/null +++ b/containers/server-image/server-image.changes @@ -0,0 +1 @@ +- Initialized a server image diff --git a/containers/server-image/taskomatic_jmx.conf b/containers/server-image/taskomatic_jmx.conf new file mode 100644 index 000000000000..7f19d11ddb83 --- /dev/null +++ b/containers/server-image/taskomatic_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml" diff --git a/containers/server-image/timezone_alignment.service b/containers/server-image/timezone_alignment.service new file mode 100644 index 000000000000..d091ff8329f9 --- /dev/null +++ b/containers/server-image/timezone_alignment.service @@ -0,0 +1,10 @@ +[Unit] +Description=Timezone alignment +After=postgresql.service + +[Service] +ExecStart=timezone_alignment.sh +Type=oneshot + +[Install] +WantedBy=multi-user.target diff --git a/containers/server-image/timezone_alignment.sh b/containers/server-image/timezone_alignment.sh new file mode 100755 index 000000000000..9f66b822c86a --- /dev/null +++ b/containers/server-image/timezone_alignment.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [[ ! -z "$TZ" ]]; then + timedatectl set-timezone $TZ +fi diff --git a/containers/server-image/tito.props b/containers/server-image/tito.props new file mode 100644 index 000000000000..f22069cb8efa --- /dev/null +++ b/containers/server-image/tito.props @@ -0,0 +1,2 @@ +[buildconfig] +tagger = tito.tagger.SUSEContainerTagger diff --git a/containers/server-image/tomcat_jmx.conf b/containers/server-image/tomcat_jmx.conf new file mode 100644 index 000000000000..a31b816897fe --- /dev/null +++ b/containers/server-image/tomcat_jmx.conf @@ -0,0 +1,2 @@ +[Service] +Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml" diff --git a/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java b/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java index b773ca1fcae3..94e78316dfc6 100644 --- a/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java +++ b/java/code/src/com/redhat/rhn/common/conf/ConfigDefaults.java @@ -846,7 +846,7 @@ private String buildConnectionString(String name, String backend, String host, S } connectionUrl.append(name); - if (useSsl) { + if (!"localhost".equals(host) && useSsl) { connectionUrl.append("?ssl=true&sslrootcert=" + sslrootcert + "&sslmode=" + sslmode); } diff --git a/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java b/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java index bf425bd87963..fc1a25c47ad6 100644 --- a/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java +++ b/java/code/src/com/suse/manager/gatherer/GathererJsonIO.java @@ -26,6 +26,7 @@ import com.google.gson.TypeAdapter; import com.google.gson.reflect.TypeToken; import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonToken; import com.google.gson.stream.JsonWriter; import java.io.IOException; @@ -100,11 +101,18 @@ public GathererModule read(JsonReader reader) throws IOException { reader.beginObject(); while (reader.hasNext()) { String key = reader.nextName(); + String value = null; + if (reader.peek() == JsonToken.NULL) { + reader.nextNull(); + } + else { + value = reader.nextString(); + } if (key.equals("module")) { - gm.setName(reader.nextString()); + gm.setName(value); } else { - gm.addParameter(key, reader.nextString()); + gm.addParameter(key, value); } } reader.endObject(); diff --git a/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java b/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java index e3f7869795b6..b0dffff72ed1 100644 --- a/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java +++ b/java/code/src/com/suse/manager/gatherer/test/GathererJsonIOTest.java @@ -55,9 +55,10 @@ public void testReadGathererModules() throws Exception { FileUtils.readStringFromFile(TestUtils.findTestData(MODULELIST).getPath()); Map mods = new GathererJsonIO().readGathererModules(json); - assertEquals(2, mods.keySet().size()); + assertEquals(3, mods.keySet().size()); assertTrue(mods.keySet().contains("VMware")); assertTrue(mods.keySet().contains("SUSECloud")); + assertTrue(mods.keySet().contains("Libvirt")); for (GathererModule g : mods.values()) { if (g.getName().equals("VMware")) { @@ -76,6 +77,11 @@ else if (g.getName().equals("SUSECloud")) { assertTrue(g.getParameters().containsKey("protocol")); assertTrue(g.getParameters().containsKey("tenant")); } + else if (g.getName().equals("Libvirt")) { + assertTrue(g.getParameters().containsKey("uri")); + assertTrue(g.getParameters().containsKey("sasl_username")); + assertTrue(g.getParameters().containsKey("sasl_password")); + } else { fail("Unknown Module"); } diff --git a/java/code/src/com/suse/manager/gatherer/test/modulelist.json b/java/code/src/com/suse/manager/gatherer/test/modulelist.json index 12f3d18e73f1..64613aab280c 100644 --- a/java/code/src/com/suse/manager/gatherer/test/modulelist.json +++ b/java/code/src/com/suse/manager/gatherer/test/modulelist.json @@ -14,6 +14,12 @@ "port": 443, "username": "", "password": "" + }, + "Libvirt": { + "module": "Libvirt", + "uri": "", + "sasl_username": null, + "sasl_password": null } } diff --git a/java/manager-build.xml b/java/manager-build.xml index c2689a520d62..c9eedcc229a6 100644 --- a/java/manager-build.xml +++ b/java/manager-build.xml @@ -21,6 +21,7 @@ + @@ -226,7 +227,14 @@ yarn is not in the PATH. Please install yarn first. - + + + + + + + + @@ -235,10 +243,6 @@ - - - - @@ -294,6 +298,136 @@ + + + + + + + + + + + + + + + kubectl is not in the PATH. Please install kubectl first. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/java/spacewalk-java.changes.cbosdo.local-reportdb b/java/spacewalk-java.changes.cbosdo.local-reportdb new file mode 100644 index 000000000000..d7338755b62a --- /dev/null +++ b/java/spacewalk-java.changes.cbosdo.local-reportdb @@ -0,0 +1 @@ +- Don't force ssl verification on reportdb using localhost diff --git a/java/spacewalk-java.changes.mbussolotto.parse_null b/java/spacewalk-java.changes.mbussolotto.parse_null new file mode 100644 index 000000000000..6e9a1cd25822 --- /dev/null +++ b/java/spacewalk-java.changes.mbussolotto.parse_null @@ -0,0 +1 @@ +- parse virtual-host-gatherer null value diff --git a/python/spacewalk/common/rhnConfig.py b/python/spacewalk/common/rhnConfig.py index 0fce7040b747..a05b4d452fda 100644 --- a/python/spacewalk/common/rhnConfig.py +++ b/python/spacewalk/common/rhnConfig.py @@ -100,6 +100,9 @@ def is_initialized(self): def modifiedYN(self): """returns last modified time diff if rhn.conf has changed.""" + if not os.path.exists(self.filename): + return 0 + try: si = os.stat(self.filename) except OSError: @@ -142,7 +145,8 @@ def parse(self): # Now that we parsed the defaults, we parse the multi-key # self.filename configuration (ie, /etc/rhn/rhn.conf) - self.__parsedConfig = parse_file(self.filename) + if os.path.exists(self.filename): + self.__parsedConfig = parse_file(self.filename) # And now generate and cache the current component self.__merge() diff --git a/python/spacewalk/common/rhnLog.py b/python/spacewalk/common/rhnLog.py index 6d189a8c36f1..d3ff21ea5ea3 100644 --- a/python/spacewalk/common/rhnLog.py +++ b/python/spacewalk/common/rhnLog.py @@ -100,7 +100,7 @@ def initLOG(log_file="stderr", level=0, component=""): # fetch uid, gid so we can do a "chown ..." with cfg_component(component=None) as CFG: - apache_uid, apache_gid = getUidGid(CFG.httpd_user, CFG.httpd_group) + apache_uid, apache_gid = getUidGid(CFG.get('httpd_user', 'wwwrun'), CFG.get('httpd_group', 'www')) try: os.makedirs(log_path) @@ -187,7 +187,7 @@ def __init__(self, log_file, level, component): set_close_on_exec(self.fd) if newfileYN: with cfg_component(component=None) as CFG: - apache_uid, apache_gid = getUidGid(CFG.httpd_user, CFG.httpd_group) + apache_uid, apache_gid = getUidGid(CFG.get('httpd_user', 'wwwrun'), CFG.get('httpd_group', 'www')) os.chown(self.file, apache_uid, apache_gid) os.chmod(self.file, int('0660', 8)) except: diff --git a/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile b/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile new file mode 100644 index 000000000000..71b461e69276 --- /dev/null +++ b/python/spacewalk/spacewalk-backend.changes.cbosdo.rhnconfig-nofile @@ -0,0 +1 @@ +- Accept missing rhn.conf file diff --git a/python/spacewalk/spacewalk-backend.spec b/python/spacewalk/spacewalk-backend.spec index 9ab31d2a699b..0cc2506ad923 100644 --- a/python/spacewalk/spacewalk-backend.spec +++ b/python/spacewalk/spacewalk-backend.spec @@ -351,15 +351,6 @@ install -Dd -m 0750 % $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver %{__install} satellite_tools/spacewalk-uln-resolver $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver/spacewalk-uln-resolver %{__install} satellite_tools/spacewalk-extra-http-headers $RPM_BUILD_ROOT%{_prefix}/lib/zypp/plugins/urlresolver/spacewalk-extra-http-headers - -%post server -%if 0%{?suse_version} -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi -%endif -if [ ! -e %{rhnconf}/rhn.conf ]; then - exit 0 -fi - %pre tools %if !0%{?rhel} %service_add_pre spacewalk-diskcheck.service spacewalk-diskcheck.timer @@ -372,9 +363,6 @@ fi %else %service_add_post spacewalk-diskcheck.service spacewalk-diskcheck.timer %endif -if test -f /var/log/rhn/rhn_server_satellite.log; then - chown -f %{apache_user}:%{apache_group} /var/log/rhn/rhn_server_satellite.log -fi %preun tools %if 0%{?rhel} @@ -657,7 +645,7 @@ fi %{!?_licensedir:%global license %doc} %license LICENSE %doc README.ULN -%attr(644,root,%{apache_group}) %{rhnconfigdefaults}/rhn_server_satellite.conf +%attr(644,%{apache_user},%{apache_group}) %{rhnconfigdefaults}/rhn_server_satellite.conf %config(noreplace) %{_sysconfdir}/logrotate.d/spacewalk-backend-tools %config(noreplace) %{rhnconf}/signing.conf %attr(755,root,root) %{_bindir}/rhn-charsets diff --git a/python/uyuni/common/fileutils.py b/python/uyuni/common/fileutils.py index 3c6cb06a013c..91396583680f 100644 --- a/python/uyuni/common/fileutils.py +++ b/python/uyuni/common/fileutils.py @@ -301,9 +301,9 @@ def createPath(path, user=None, group=None, chmod=int('0755', 8)): """ with cfg_component(component=None) as CFG: if user is None: - user = CFG.httpd_user + user = CFG.get('httpd_user', 'wwwrun') if group is None: - group = CFG.httpd_group + group = CFG.get('httpd_group', 'www') path = cleanupAbsPath(path) if not os.path.exists(path): @@ -324,7 +324,7 @@ def setPermsPath(path, user=None, group='root', chmod=int('0750', 8)): """chown user.group and set permissions to chmod""" if user is None: with cfg_component(component=None) as CFG: - user = CFG.httpd_user + user = CFG.get('httpd_user', 'wwwrun') if not os.path.exists(path): raise OSError("*** ERROR: Path doesn't exist (can't set permissions): %s" % path) diff --git a/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile b/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile new file mode 100644 index 000000000000..71b461e69276 --- /dev/null +++ b/python/uyuni/uyuni-common-libs.changes.cbosdo.rhnconfig-nofile @@ -0,0 +1 @@ +- Accept missing rhn.conf file diff --git a/rel-eng/packages/hub-xmlrpc-api-image b/rel-eng/packages/hub-xmlrpc-api-image new file mode 100644 index 000000000000..45971d5a183c --- /dev/null +++ b/rel-eng/packages/hub-xmlrpc-api-image @@ -0,0 +1 @@ +4.4.0 containers/hub-xmlrpc-api-image/ diff --git a/rel-eng/packages/server-helm b/rel-eng/packages/server-helm new file mode 100644 index 000000000000..7ffc33013560 --- /dev/null +++ b/rel-eng/packages/server-helm @@ -0,0 +1 @@ +4.4.0 containers/server-helm/ diff --git a/rel-eng/packages/server-image b/rel-eng/packages/server-image new file mode 100644 index 000000000000..fe257db88808 --- /dev/null +++ b/rel-eng/packages/server-image @@ -0,0 +1 @@ +4.4.0 containers/server-image/ diff --git a/spacewalk/certs-tools/Makefile.certs b/spacewalk/certs-tools/Makefile.certs index 65513d5ad06a..8cc42ee4ca1b 100644 --- a/spacewalk/certs-tools/Makefile.certs +++ b/spacewalk/certs-tools/Makefile.certs @@ -18,7 +18,7 @@ SUBDIR = certs FILES = __init__ rhn_ssl_tool sslToolCli sslToolConfig sslToolLib \ - timeLib rhn_bootstrap rhn_bootstrap_strings client_config_update \ + timeLib rhn_bootstrap rhn_bootstrap_strings \ mgr_ssl_cert_setup INSTALL_ROOT_FILES = gen-rpm.sh sign.sh update-ca-cert-trust.sh @@ -51,17 +51,9 @@ install :: $(SBINFILES) $(BINFILES) $(PYBINFILES) $(MANS) $(PREFIX)/$(MANDIR) $(INSTALL_BIN) $(f) $(PREFIX)$(BINDIR)/$(f) ; ) $(foreach f,$(PYBINFILES), \ $(INSTALL_BIN) $(f) $(PREFIX)$(BINDIR)/$(f)-$(PYTHONVERSION) ; ) - -install :: instClientScript @$(foreach f,$(INSTALL_ROOT_FILES), \ $(INSTALL_DATA) $(f) $(PREFIX)$(ROOT)/$(SUBDIR)/$(f) ; ) -# note: this file is in two places. One in the RPM and one in pub/bootstrap/ -instClientScript: $(PUB_BOOTSTRAP_DIR)/client_config_update.py - -$(PUB_BOOTSTRAP_DIR)/client_config_update.py : $(PREFIX)/$(PUB_BOOTSTRAP_DIR) client_config_update.py - install -m 0755 client_config_update.py $(PREFIX)/$@ - %.$(MANSECT) : %.sgml /usr/bin/docbook2man $< diff --git a/spacewalk/certs-tools/client_config_update.py b/spacewalk/certs-tools/client_config_update.py deleted file mode 100755 index bf8bebf2ede3..000000000000 --- a/spacewalk/certs-tools/client_config_update.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/python -u -# -# Copyright (c) 2008--2013 Red Hat, Inc. -# -# This software is licensed to you under the GNU General Public License, -# version 2 (GPLv2). There is NO WARRANTY for this software, express or -# implied, including the implied warranties of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 -# along with this software; if not, see -# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. -# -# Red Hat trademarks are not licensed under GPLv2. No permission is -# granted to use or replicate Red Hat trademarks that are incorporated -# in this software or its documentation. -# -# key=value formatted "config file" mapping script -# -# NOT TO BE USED DIRECTLY -# This is called by a script generated by the rhn-bootstrap utility. -# -# Specifically engineered with the RHN Update Agent configuration files -# in mind though it is relatively generic in nature. -# -# Author: Todd Warner -# - -""" -Client configuration mapping script that writes to an SUSE Manager Update Agent-type -config file(s) - -I.e., maps a file with SUSE Manager Update Agent-like key=value pairs e.g., -serverURL=https://test-satellite.example.redhat.com/XMLRPC -enableProxy=0 -sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT - -And maps that to the client's configuration files. - -------------- -To map new settings to a file that uses the format key=value, where -key[comment]=value is a comment line you do this (e.g., mapping -key=value pairs to /etc/sysconfig/rhn/up2date): - - 1. edit a file (e.g., 'client-config-overrides.txt'), inputing new key=value pairs - to replace in config file (e.g., /etc/sysconfig/rhn/up2date). - Specifically: -serverURL=https://test-satellite.example.redhat.com/XMLRPC - - 2. ./client_config_update.py /etc/sysconfig/rhn/up2date client-config-overrides.txt - -That's all there is to it. - -If you are running an older RHN Update Agent, the rhn_register file can be -mapped as well: - - ./client_config_update.py /etc/sysconfig/rhn/rhn_register client-config-overrides.txt -""" - - -from __future__ import print_function -import os -import sys -import tempfile - -DEFAULT_CLIENT_CONFIG_OVERRIDES = 'client-config-overrides.txt' - -RHN_REGISTER = "/etc/sysconfig/rhn/rhn_register" -UP2DATE = "/etc/sysconfig/rhn/up2date" - - -def _parseConfigLine(line): - """parse a line from a config file. Format can be either "key=value\n" - or "whatever text\n" - - return either: - (key, value) - or - None - The '\n' is always stripped from the value. - """ - - kv = line.decode('utf8').split('=') - if len(kv) < 2: - # not a setting - return None - - if len(kv) > 2: - # '=' is part of the value, need to rejoin it. - kv = [kv[0], '='.join(kv[1:])] - - if kv[0].find('[comment]') > 0: - # comment; not a setting - return None - - # it's a setting, trim the '\n' and return the (key, value) pair. - kv[0] = kv[0].strip() - kv[1] = kv[1].strip() - return tuple(kv) - -def readConfigFile(configFile): - "read in config file, return dictionary of key/value pairs" - - fin = open(configFile, 'rb') - - d = {} - - for line in fin.readlines(): - kv = _parseConfigLine(line) - if kv: - d[kv[0]] = kv[1] - return d - - -def dumpConfigFile(configFile): - "print out dictionary of key/value pairs from configFile" - - import pprint - pprint.pprint(readConfigFile(configFile)) - - -def mapNewSettings(configFile, dnew): - fo = tempfile.TemporaryFile(prefix = '/tmp/client-config-overrides-', mode = 'r+b') - fin = open(configFile, 'rb') - - changedYN = 0 - - # write to temp file - for line in fin.readlines(): - kv = _parseConfigLine(line) - if not kv: - # not a setting, write the unaltered line - fo.write(line) - else: - # it's a setting, populate from the dictionary - if kv[0] in dnew: - if dnew[kv[0]] != kv[1]: - fo.write(('%s=%s\n' % (kv[0], dnew[kv[0]])).encode('utf8')) - changedYN = 1 - else: - fo.write(line) - # it's a setting but not being mapped - else: - fo.write(line) - fin.close() - - if changedYN: - # write from temp file to configFile - fout = open(configFile, 'wb') - fo.seek(0) - fout.write(fo.read()) - print('*', configFile, 'written') - - -def parseCommandline(): - """parse/process the commandline - - Commandline is dead simple for easiest portability. - """ - - # USAGE & HELP! - if '--usage' in sys.argv or '-h' in sys.argv or '--help' in sys.argv: - print("""\ -usage: python %s CONFIG_FILENAME NEW_MAPPINGS [options] -arguments: - CONFIG_FILENAME config file to alter - NEW_MAPPINGS file containing new settings that map onto the - config file -options: - -h, --help show this help message and exit - --usage show brief usage summary - -examples: - python %s %s %s - python %s %s %s -""" % (sys.argv[0], - sys.argv[0], RHN_REGISTER, DEFAULT_CLIENT_CONFIG_OVERRIDES, - sys.argv[0], UP2DATE, DEFAULT_CLIENT_CONFIG_OVERRIDES)) - - sys.exit(0) - - - if len(sys.argv) != 3: - msg = "ERROR: exactly two arguments are required, see --help" - raise TypeError(msg) - - configFilename = os.path.abspath(sys.argv[1]) - newMappings = os.path.abspath(sys.argv[2]) - - if not os.path.exists(configFilename): - msg = ("ERROR: filename to alter (1st argument), does not exist:\n" - " %s" - % configFilename) - raise IOError(msg) - - if not os.path.exists(newMappings): - msg = ("ERROR: filename that contains the mappings (2nd argument), " - "does not exist:\n" - " %s" % newMappings) - raise IOError(msg) - - return configFilename, newMappings - - -def main(): - "parse commandline, process config file key=value mappings" - - configFilename, newMappings = parseCommandline() - #dumpConfigFile(configFilename) - #mapNewSettings('test-up2date', readConfigFile(DEFAULT_CLIENT_CONFIG_OVERRIDES)) - - mapNewSettings(configFilename, readConfigFile(newMappings)) - -if __name__ == '__main__': - try: - sys.exit(main() or 0) - except Exception as err: - print(err) diff --git a/spacewalk/certs-tools/mgr-bootstrap.sgml b/spacewalk/certs-tools/mgr-bootstrap.sgml index 981ee85d66a7..17093f450488 100644 --- a/spacewalk/certs-tools/mgr-bootstrap.sgml +++ b/spacewalk/certs-tools/mgr-bootstrap.sgml @@ -185,7 +185,6 @@ Files /usr/bin/mgr-bootstrap - /usr/bin/client_config_update.py /usr/bin/client-config-overrides.txt diff --git a/spacewalk/certs-tools/rhn_bootstrap.py b/spacewalk/certs-tools/rhn_bootstrap.py index 3aaffcc2e19d..8b098b186087 100755 --- a/spacewalk/certs-tools/rhn_bootstrap.py +++ b/spacewalk/certs-tools/rhn_bootstrap.py @@ -39,7 +39,6 @@ ## local imports from uyuni.common import rhn_rpm from spacewalk.common.rhnConfig import CFG, initCFG -from .client_config_update import readConfigFile from .rhn_bootstrap_strings import \ getHeader, getGPGKeyImportSh, \ getCorpCACertSh, getRegistrationStackSh, \ @@ -66,6 +65,8 @@ initCFG('server') DOC_ROOT = CFG.DOCUMENTROOT +initCFG('java') + DEFAULT_APACHE_PUB_DIRECTORY = DOC_ROOT + '/pub' DEFAULT_OVERRIDES = 'client-config-overrides.txt' DEFAULT_SCRIPT = 'bootstrap.sh' @@ -83,6 +84,47 @@ errnoCANotFound = 16 errnoGPGNotFound = 17 +def _parseConfigLine(line): + """parse a line from a config file. Format can be either "key=value\n" + or "whatever text\n" + + return either: + (key, value) + or + None + The '\n' is always stripped from the value. + """ + + kv = line.decode('utf8').split('=') + if len(kv) < 2: + # not a setting + return None + + if len(kv) > 2: + # '=' is part of the value, need to rejoin it. + kv = [kv[0], '='.join(kv[1:])] + + if kv[0].find('[comment]') > 0: + # comment; not a setting + return None + + # it's a setting, trim the '\n' and return the (key, value) pair. + kv[0] = kv[0].strip() + kv[1] = kv[1].strip() + return tuple(kv) + +def readConfigFile(configFile): + "read in config file, return dictionary of key/value pairs" + + fin = open(configFile, 'rb') + + d = {} + + for line in fin.readlines(): + kv = _parseConfigLine(line) + if kv: + d[kv[0]] = kv[1] + return d # should come out of common code when we move this code out of # rhns-certs-tools @@ -154,7 +196,7 @@ def getDefaultOptions(): 'activation-keys': '', 'overrides': DEFAULT_OVERRIDES, 'script': DEFAULT_SCRIPT, - 'hostname': socket.getfqdn(), + 'hostname': CFG.HOSTNAME if CFG.has_key('hostname') else socket.getfqdn(), 'ssl-cert': '', # will trigger a search 'gpg-key': "", 'http-proxy': "", @@ -498,8 +540,6 @@ def writeClientConfigOverrides(options): fout.write("""\ # RHN Client (rhn_register/up2date) config-overrides file v4.0 # -# To be used only in conjuction with client_config_update.py -# # This file was autogenerated. # # The simple rules: diff --git a/spacewalk/certs-tools/rhn_bootstrap_strings.py b/spacewalk/certs-tools/rhn_bootstrap_strings.py index 89dc320575f9..abe2ed4e01af 100644 --- a/spacewalk/certs-tools/rhn_bootstrap_strings.py +++ b/spacewalk/certs-tools/rhn_bootstrap_strings.py @@ -605,7 +605,7 @@ def getRegistrationStackSh(): call_tukit "zypper --non-interactive update {PKG_NAME_VENV_UPDATE} ||:" fi else - if [ -z "$SNAPSHOT_ID"]; then + if [ -z "$SNAPSHOT_ID" ]; then zypper --non-interactive up {PKG_NAME_UPDATE} $RHNLIB_PKG ||: else call_tukit "zypper --non-interactive update {PKG_NAME_UPDATE} $RHNLIB_PKG ||:" @@ -896,12 +896,14 @@ def getRegistrationSaltSh(productName): fi MINION_ID_FILE="${{SNAPSHOT_PREFIX}}/etc/salt/minion_id" +MINION_PKI_CONF="${{SNAPSHOT_PREFIX}}/etc/salt/pki" MINION_CONFIG_DIR="${{SNAPSHOT_PREFIX}}/etc/salt/minion.d" SUSEMANAGER_MASTER_FILE="${{MINION_CONFIG_DIR}}/susemanager.conf" MINION_SERVICE="salt-minion" if [ $VENV_ENABLED -eq 1 ]; then MINION_ID_FILE="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/minion_id" + MINION_PKI_CONF="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/pki" MINION_CONFIG_DIR="${{SNAPSHOT_PREFIX}}/etc/venv-salt-minion/minion.d" SUSEMANAGER_MASTER_FILE="${{MINION_CONFIG_DIR}}/susemanager.conf" MINION_SERVICE="venv-salt-minion" @@ -951,6 +953,11 @@ def getRegistrationSaltSh(productName): SALT_RUNNING: 1 EOF +# Remove old minion keys so reregistration do different master works +if [ -d "$MINION_PKI_CONF" ]; then + rm -r "$MINION_PKI_CONF" +fi + if [ -n "$SNAPSHOT_ID" ]; then cat <> "${{MINION_CONFIG_DIR}}/transactional_update.conf" # Enable the transactional_update executor diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.changes b/spacewalk/certs-tools/spacewalk-certs-tools.changes index 05b2d2956e89..75043df1d282 100644 --- a/spacewalk/certs-tools/spacewalk-certs-tools.changes +++ b/spacewalk/certs-tools/spacewalk-certs-tools.changes @@ -1,4 +1,5 @@ - Add openssl3 compatibility. +- mgr-bootstrap read the hostname from rhn.conf if possible - Read CA password from a file - Also ship SUSE specific files on Enterprise Linux. - Use the CA cert in the pki config to generate build host rpm diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update b/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update new file mode 100644 index 000000000000..e5dff0de0831 --- /dev/null +++ b/spacewalk/certs-tools/spacewalk-certs-tools.changes.mbussolotto.remove_client_config_update @@ -0,0 +1 @@ +- Remove client_config_update.py diff --git a/spacewalk/certs-tools/spacewalk-certs-tools.spec b/spacewalk/certs-tools/spacewalk-certs-tools.spec index fbb69106fb9c..ff9c2dd391fd 100644 --- a/spacewalk/certs-tools/spacewalk-certs-tools.spec +++ b/spacewalk/certs-tools/spacewalk-certs-tools.spec @@ -87,7 +87,7 @@ sed -i 's|etc/httpd/conf|etc/apache2|g' ssl-howto.txt %install install -d -m 755 $RPM_BUILD_ROOT/%{rhnroot}/certs -sed -i '1s|python\b|python3|' rhn-ssl-tool mgr-package-rpm-certificate-osimage rhn-bootstrap client_config_update.py +sed -i '1s|python\b|python3|' rhn-ssl-tool mgr-package-rpm-certificate-osimage rhn-bootstrap make -f Makefile.certs install PREFIX=$RPM_BUILD_ROOT ROOT=%{rhnroot} \ PYTHONPATH=%{python3_sitelib} PYTHONVERSION=%{python3_version} \ MANDIR=%{_mandir} PUB_BOOTSTRAP_DIR=%{pub_bootstrap_dir} @@ -126,7 +126,6 @@ ln -s spacewalk-ssh-push-init $RPM_BUILD_ROOT/%{_sbindir}/mgr-ssh-push-init %doc %{_mandir}/man1/mgr-*.1* %doc ssl-howto-simple.txt ssl-howto.txt %license LICENSE -%{pub_bootstrap_dir}/client_config_update.py* %dir %{rhnroot} %dir %{pub_dir} %dir %{pub_bootstrap_dir} diff --git a/spacewalk/certs-tools/spacewalk-ssh-push-init b/spacewalk/certs-tools/spacewalk-ssh-push-init index d95629abf93a..08e505a505fc 100755 --- a/spacewalk/certs-tools/spacewalk-ssh-push-init +++ b/spacewalk/certs-tools/spacewalk-ssh-push-init @@ -268,7 +268,7 @@ if [ "${USE_TUNNEL}" = "Y" ]; then exit_in_case_of_error echo "* Cleaning up temporary files" - ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv enable.sh bootstrap.sh client-config-overrides-tunnel.txt client_config_update.py" + ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv enable.sh bootstrap.sh client-config-overrides-tunnel.txt" cleanup_temp_files elif [ -n "${BOOTSTRAP}" ]; then # Simple registration with given bootstrap script @@ -278,5 +278,5 @@ elif [ -n "${BOOTSTRAP}" ]; then exit_in_case_of_error echo "* Cleaning up temporary files remotely" - ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv bootstrap.sh client-config-overrides.txt client_config_update.py" + ssh -i ${SSH_IDENTITY} ${OPTIONS} ${USER}@${CLIENT} "rm -fv bootstrap.sh client-config-overrides.txt" fi diff --git a/spacewalk/config/spacewalk-config.spec b/spacewalk/config/spacewalk-config.spec index 3b7f7dbce642..40bd6685bac1 100644 --- a/spacewalk/config/spacewalk-config.spec +++ b/spacewalk/config/spacewalk-config.spec @@ -166,21 +166,6 @@ if [ $1 -eq 2 ] ; then fi fi -%if 0%{?suse_version} -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES version -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_ajp -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_wstunnel -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES rewrite -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES headers -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES xsendfile -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES filter -sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES deflate -sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS SSL -sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS ISSUSE -sysconf_addword -r /etc/sysconfig/apache2 APACHE_MODULES access_compat -%endif - # sudo is reading every file here! So ensure we do not have duplicate definitions! if [ -e /etc/sudoers.d/spacewalk.rpmsave ]; then mv /etc/sudoers.d/spacewalk.rpmsave /root/sudoers-spacewalk.save diff --git a/spacewalk/setup/bin/spacewalk-setup b/spacewalk/setup/bin/spacewalk-setup index 3be2d0e9d5ae..3bea4401c2dc 100755 --- a/spacewalk/setup/bin/spacewalk-setup +++ b/spacewalk/setup/bin/spacewalk-setup @@ -244,7 +244,7 @@ sub setup_cobbler { my %options = (); Spacewalk::Setup::read_config('/usr/share/rhn/config-defaults/rhn.conf',\%options); - system(COBBLER_COMMAND . " --apache2-config-directory " . $options{'httpd_config_dir'}); + system(COBBLER_COMMAND . " --apache2-config-directory " . $options{'httpd_config_dir'} . " -f " . $answers->{'hostname'}); my $skip_rhnconf = 0; open(FILE, "<" . Spacewalk::Setup::DEFAULT_RHN_CONF_LOCATION); diff --git a/spacewalk/setup/bin/spacewalk-setup-cobbler b/spacewalk/setup/bin/spacewalk-setup-cobbler index bdf99ef1790b..707a5f111170 100755 --- a/spacewalk/setup/bin/spacewalk-setup-cobbler +++ b/spacewalk/setup/bin/spacewalk-setup-cobbler @@ -24,6 +24,7 @@ parser.add_argument('--cobbler-config-directory', '-c', dest='cobbler_config_dir help='The directory where "settings" and "modules.conf" are in.') parser.add_argument('--apache2-config-directory', '-a', dest='httpd_config_directory', default="/etc/apache2/conf.d", help='The directory where the Apache config file "cobbler.conf" is in.') +parser.add_argument('--fqdn', '-f', dest='fqdn', default=None) COBBLER_CONFIG_DIRECTORY = "/etc/cobbler/" COBBLER_CONFIG_FILES = ["modules.conf", "settings.yaml"] @@ -41,20 +42,21 @@ def backup_file(file_path: str): copyfile(file_path, "%s.backup" % file_path) -def manipulate_cobbler_settings(config_dir: str, settings_yaml: str): +def manipulate_cobbler_settings(config_dir: str, settings_yaml: str, fqdn: str): """ Manipulate the main Cobbler configuration file which is in YAML format. This function backs the original configuration up and writes a new one with the required changes to the disk. :param config_dir: The directory of Cobbler where the config files are. :param settings_yaml: The name of the main YAML file of Cobbler. + :param fqdn: The FQDN of the server. If None (default), the FQDN is resolved from the system """ full_path = os.path.join(config_dir, settings_yaml) backup_file(full_path) with open(full_path) as settings_file: filecontent = yaml.safe_load(settings_file.read()) - filecontent["server"] = socket.getfqdn() + filecontent["server"] = fqdn or socket.getfqdn() # In case of failing DNS resolution, we get a OSError (socket.gaierror) try: @@ -72,7 +74,7 @@ def manipulate_cobbler_settings(config_dir: str, settings_yaml: str): exit(1) filecontent["pxe_just_once"] = True - filecontent["redhat_management_server"] = socket.getfqdn() + filecontent["redhat_management_server"] = fqdn or socket.getfqdn() yaml_dump = yaml.safe_dump(filecontent) with open(full_path, "w") as settings_file: settings_file.write(yaml_dump) @@ -128,7 +130,7 @@ def main(): """ args = parser.parse_args() sanitize_args(args) - manipulate_cobbler_settings(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[1]) + manipulate_cobbler_settings(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[1], args.fqdn) manipulate_cobbler_modules(COBBLER_CONFIG_DIRECTORY, COBBLER_CONFIG_FILES[0]) remove_virtual_host(HTTPD_CONFIG_DIRECTORY, COBBLER_HTTP_CONFIG) diff --git a/spacewalk/setup/lib/Spacewalk/Setup.pm b/spacewalk/setup/lib/Spacewalk/Setup.pm index 7ff72ddbe492..ee42ec90b278 100644 --- a/spacewalk/setup/lib/Spacewalk/Setup.pm +++ b/spacewalk/setup/lib/Spacewalk/Setup.pm @@ -672,6 +672,7 @@ sub print_progress { err_code => 1, system_opts => 1, }); + print "Running " . join(" ", @{$params{system_opts}}) . "\n"; local *LOGFILE; open(LOGFILE, ">>", $params{log_file_name}) or do { @@ -893,7 +894,9 @@ sub postgresql_reportdb_setup { } $ENV{PGSSLROOTCERT} = $answers->{'report-db-ca-cert'}; - $ENV{PGSSLMODE} = "verify-full"; + if ($answers->{'report-db-host'} ne 'localhost') { + $ENV{PGSSLMODE} = "verify-full"; + } write_rhn_conf($answers, 'externaldb-admin-user','externaldb-admin-password', 'report-db-backend', 'report-db-host', 'report-db-port', 'report-db-name', 'report-db-user', 'report-db-password', 'report-db-ssl-enabled'); diff --git a/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb b/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb new file mode 100644 index 000000000000..372a906b5d98 --- /dev/null +++ b/spacewalk/setup/spacewalk-setup.changes.cbosdo.local-reportdb @@ -0,0 +1 @@ +- Don't force ssl verification to setup reportdb using localhost diff --git a/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check b/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check new file mode 100644 index 000000000000..808a0993a282 --- /dev/null +++ b/spacewalk/setup/spacewalk-setup.changes.oholecek.tomcat-install-check @@ -0,0 +1 @@ +- Do not rely on rpm runtime status, rather check rhn.conf if is configured (bsc#1210935) diff --git a/spacewalk/setup/spacewalk-setup.spec b/spacewalk/setup/spacewalk-setup.spec index ef4e91bbc2ea..d60cec3ec2ef 100644 --- a/spacewalk/setup/spacewalk-setup.spec +++ b/spacewalk/setup/spacewalk-setup.spec @@ -194,30 +194,14 @@ install -Dd -m 0755 %{buildroot}%{_prefix}/share/salt-formulas/states install -Dd -m 0755 %{buildroot}%{_prefix}/share/salt-formulas/metadata %post -if [ $1 == 1 -a -e /etc/tomcat/server.xml ]; then -#just during new installation. during upgrade the changes are already applied - CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") - cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE - xsltproc %{_datadir}/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml -fi - -if [ $1 == 2 -a -e /etc/tomcat/server.xml ]; then -#during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained +if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then + # rhn.conf is configured, this is an upgrade + # during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE xsltproc %{_datadir}/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml fi -if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then - chgrp www /etc/zypp/credentials.d/SCCcredentials - chmod g+r /etc/zypp/credentials.d/SCCcredentials -fi - -if [ -d /var/cache/salt/master/thin ]; then - # clean the thin cache - rm -rf /var/cache/salt/master/thin -fi - # sudoers file is now in /etc/sudoers.d/spacewalk if [ -f /etc/sudoers.d/spacewalk -a -f /etc/sudoers.d/susemanager ]; then # do not fail if one is just a link to the other one diff --git a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb index 801d7727c6d4..bd59cac6fe79 100755 --- a/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb +++ b/spacewalk/uyuni-setup-reportdb/bin/uyuni-setup-reportdb @@ -112,7 +112,6 @@ if [ -x /usr/bin/lsof ]; then LSOF="/usr/bin/lsof" fi RUNUSER=runuser -FQDN=$(hostname -f) SSL_CERT=/etc/pki/tls/certs/spacewalk.crt SSL_KEY=/etc/pki/tls/private/pg-spacewalk.key CA_CERT=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT @@ -126,7 +125,7 @@ create() { if $LOCAL ; then ADDRESS="127.0.0.1" REMOTE="127.0.0.1/32,::1/128" - FQDN="localhost" + HOST="localhost" else [ ! -s "$SSL_CERT" ] && { echo "SSL Certificate ($SSL_CERT) is required to setup the reporting database" >&2 @@ -254,7 +253,7 @@ report_db_backend = postgresql report_db_user = $PGUSER report_db_password = $PGPASSWORD report_db_name = $PGNAME -report_db_host = $FQDN +report_db_host = $HOST report_db_port = $PORT report_db_ssl_enabled = 1 report_db_sslrootcert = $CA_CERT @@ -264,11 +263,7 @@ EOF rhn_reconfig "report_db_user" "$PGUSER" rhn_reconfig "report_db_password" "$PGPASSWORD" rhn_reconfig "report_db_name" "$PGNAME" - if [ $EXTERNALDB = "0" ] ; then - rhn_reconfig "report_db_host" "$FQDN" - else - rhn_reconfig "report_db_host" "$HOST" - fi + rhn_reconfig "report_db_host" "$HOST" rhn_reconfig "report_db_port" "$PORT" if ! $LOCAL ; then rhn_reconfig "report_db_ssl_enabled" "1" @@ -289,8 +284,7 @@ EOF if $LSOF /proc > /dev/null ; then while [ -f "$PG_PIDFILE" ] ; do # wait for postmaster to be ready - $LSOF -t -p $(head -1 "$PG_PIDFILE" 2>/dev/null) -a "$PG_SOCKET" > /dev/null \ - && break + pg_isready -q -U $(grep -oP '^db_user ?= ?\K.*' $RHN_CONF) && break sleep 1 done fi @@ -617,12 +611,14 @@ while true ; do shift done -case $HOST in - "localhost"|$(hostname -s)|$(hostname -f)|"") - EXTERNALDB=0 ;; - *) - EXTERNALDB=1 ;; -esac +EXTERNALDB=0 +if [ -n "$EXTERNALDB_ADMIN_USER" ]; then + EXTERNALDB=1 +fi + +if [ -z "$HOST" ]; then + HOST=$(hostname -f) +fi case $1 in create) create diff --git a/susemanager/bin/mgr-setup b/susemanager/bin/mgr-setup index 3f4f6384d349..5ceda5e945f3 100755 --- a/susemanager/bin/mgr-setup +++ b/susemanager/bin/mgr-setup @@ -46,6 +46,10 @@ if [ ! $? -eq 0 ]; then exit 1 fi +if [[ ! -z "$TZ" ]]; then + timedatectl set-timezone $TZ +fi + TMPDIR="/var/spacewalk/tmp" DO_MIGRATION=0 DO_SETUP=0 @@ -53,6 +57,7 @@ LOGFILE="0" WAIT_BETWEEN_STEPS=0 MANAGER_FORCE_INSTALL=0 PROGRAM="/usr/lib/susemanager/bin/mgr-setup" +NON_INTERACTIVE=0 MIGRATION_ENV="/root/migration_env.sh" SETUP_ENV="/root/setup_env.sh" @@ -100,6 +105,7 @@ helper script to do migration or setup of $PRODUCT_NAME -s fresh setup of the $PRODUCT_NAME installation -w wait between steps (in case you do -r -m) -l LOGFILE write a log to LOGFILE + -n Don't ask anything, use default values -h this help screen @@ -118,6 +124,19 @@ wait_step() { fi; } +ask_input() { + # Ask for input if the variable is not already defined, could be set using an env variable + # Set to an empty string if running in non-interactive mode + VARIABLE=$1 + if [ -z ${!VARIABLE+x} ]; then + if [ $NON_INTERACTIVE -eq 0 ]; then + echo -n "$VARIABLE="; read $VARIABLE + else + declare $VARIABLE= + fi + fi +} + setup_swap() { SWAP=`LANG=C free | grep Swap: | sed -e "s/ \+/\t/g" | cut -f 2` @@ -129,8 +148,11 @@ if [ $SWAP -eq 0 ]; then echo "Not enough space on /. Not adding swap space. Good luck..." else FSTYPE=`df -T / | tail -1 | awk '{print $2}'` + # Ignore for overlay too if [ $FSTYPE == "btrfs" ]; then echo "Will *NOT* create swapfile on btrfs. Make sure you have enough RAM!" + elif [ $FSTYPE == "overlay" ]; then + echo "Will *NOT* create swapfile in a container!" else if [ -f /SWAPFILE ]; then swapoff /SWAPFILE @@ -150,25 +172,10 @@ fi } setup_mail () { - -# fix hostname for postfix -REALHOSTNAME=`hostname -f` -if [ -z "$REALHOSTNAME" ]; then - for i in `ip -f inet -o addr show scope global | awk '{print $4}' | awk -F \/ '{print $1}'`; do - for j in `dig +noall +answer +time=2 +tries=1 -x $i | awk '{print $5}' | sed 's/\.$//'`; do - if [ -n "$j" ]; then - REALHOSTNAME=$j - break 2 - fi - done - done -fi -if [ -n "$REALHOSTNAME" ]; then - echo "$REALHOSTNAME" > /etc/hostname -fi -# bsc#979664 - SUSE Manager requires a working mail system -systemctl --quiet enable postfix 2>&1 -systemctl restart postfix + postconf -e myhostname=$HOSTNAME + # bsc#979664 - SUSE Manager requires a working mail system + systemctl --quiet enable postfix 2>&1 + systemctl restart postfix } setup_hostname() { @@ -316,47 +323,52 @@ check_mksubvolume() { } check_btrfs_dirs() { -DIR="/var/spacewalk" -if [ ! -d $DIR ]; then - FSTYPE=`df -T \`dirname $DIR\` | tail -1 | awk '{print $2}'` - echo -n "Filesystem type for $DIR is $FSTYPE - " - if [ $FSTYPE == "btrfs" ]; then - check_mksubvolume - echo "creating nCoW subvolume." - mksubvolume --nocow $DIR +ROOT_FSTYPE=`df -T / | tail -1 | awk '{print $2}'` +if [ $ROOT_FSTYPE == "overlay" ]; then + echo "Skipping btrfs check in containers" +else + DIR="/var/spacewalk" + if [ ! -d $DIR ]; then + FSTYPE=`df -T \`dirname $DIR\` | tail -1 | awk '{print $2}'` + echo -n "Filesystem type for $DIR is $FSTYPE - " + if [ $FSTYPE == "btrfs" ]; then + check_mksubvolume + echo "creating nCoW subvolume." + mksubvolume --nocow $DIR + else + echo "ok." + fi else - echo "ok." + echo "$DIR already exists. Leaving it untouched." fi -else - echo "$DIR already exists. Leaving it untouched." -fi -DIR="/var/cache" -if [ ! -d $DIR ]; then - mkdir $DIR -fi -FSTYPE=`df -T $DIR | tail -1 | awk '{print $2}'` -echo -n "Filesystem type for $DIR is $FSTYPE - " -if [ $FSTYPE == "btrfs" ]; then - TESTDIR=`basename $DIR` - btrfs subvolume list /var | grep "$TESTDIR" > /dev/null - if [ ! $? -eq 0 ]; then - check_mksubvolume - echo "creating subvolume." - mv $DIR ${DIR}.sav - mksubvolume $DIR - touch ${DIR}.sav/foobar.dummy - if [ ! -d $DIR ]; then - mkdir $DIR + DIR="/var/cache" + if [ ! -d $DIR ]; then + mkdir $DIR + fi + FSTYPE=`df -T $DIR | tail -1 | awk '{print $2}'` + echo -n "Filesystem type for $DIR is $FSTYPE - " + if [ $FSTYPE == "btrfs" ]; then + TESTDIR=`basename $DIR` + btrfs subvolume list /var | grep "$TESTDIR" > /dev/null + if [ ! $? -eq 0 ]; then + check_mksubvolume + echo "creating subvolume." + mv $DIR ${DIR}.sav + mksubvolume $DIR + touch ${DIR}.sav/foobar.dummy + if [ ! -d $DIR ]; then + mkdir $DIR + fi + mv ${DIR}.sav/* $DIR + rmdir ${DIR}.sav + rm -f $DIR/foobar.dummy + else + echo "subvolume for $DIR already exists. Fine." fi - mv ${DIR}.sav/* $DIR - rmdir ${DIR}.sav - rm -f $DIR/foobar.dummy else - echo "subvolume for $DIR already exists. Fine." + echo "ok." fi -else - echo "ok." fi } @@ -396,6 +408,63 @@ if [ -f $MANAGER_COMPLETE ]; then fi } +setup_tftp_permission() { + if [ ! -d /srv/tftpboot ]; then + mkdir -p /srv/tftpboot + chmod 750 /srv/tftpboot + chown wwwrun:tftp /srv/tftpboot + fi + } + +backup_certificates() { + # we want to remove the cert from the package. + # copy the cert to a backup place to restore them later + if [ -L /etc/pki/tls/certs/spacewalk.crt ]; then + cp /etc/pki/tls/certs/spacewalk.crt /etc/pki/tls/certs/uyuni.crt + fi + if [ -L /etc/pki/tls/private/spacewalk.key ]; then + cp /etc/pki/tls/private/spacewalk.key /etc/pki/tls/private/uyuni.key + fi +} + +setup_apache() { + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES wsgi + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES version + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_ajp + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES proxy_wstunnel + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES rewrite + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES headers + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES xsendfile + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES filter + sysconf_addword /etc/sysconfig/apache2 APACHE_MODULES deflate + sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS SSL + sysconf_addword /etc/sysconfig/apache2 APACHE_SERVER_FLAGS ISSUSE + sysconf_addword -r /etc/sysconfig/apache2 APACHE_MODULES access_compat +} + +setup_tomcat() { + if [ -f /etc/rhn/rhn.conf -a $(filesize /etc/rhn/rhn.conf) -gt 1 ]; then + # rhn.conf is configured, this is an upgrade + # during upgrade, setup new connectionTimeout if the user didn't change it. Keeping it until SUMA 4.2 is maintained + CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") + cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE + xsltproc /usr/share/spacewalk/setup/server_update.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + else + # rhn.conf does not exists or is empty, this is new installation or update of new installation + CURRENT_DATE=$(date +"%%Y-%%m-%%dT%%H:%%M:%%S.%%3N") + cp /etc/tomcat/server.xml /etc/tomcat/server.xml.$CURRENT_DATE + xsltproc /usr/share/spacewalk/setup/server.xml.xsl /etc/tomcat/server.xml.$CURRENT_DATE > /etc/tomcat/server.xml + fi +} + +change_SSCcredentials_permission() { + if [ -e /etc/zypp/credentials.d/SCCcredentials ]; then + chgrp www /etc/zypp/credentials.d/SCCcredentials + chmod g+r /etc/zypp/credentials.d/SCCcredentials + fi +} + setup_spacewalk() { CERT_COUNTRY=`echo -n $CERT_COUNTRY|tr '[:lower:]' '[:upper:]'` @@ -437,6 +506,7 @@ report-db-user=$REPORT_DB_USER report-db-password=$REPORT_DB_PASS enable-tftp=$MANAGER_ENABLE_TFTP product_name=$PRODUCT_NAME +hostname=$HOSTNAME " > /root/spacewalk-answers if [ -n "$SCC_USER" ]; then @@ -794,6 +864,7 @@ do_migration() { cleanup_hostname remove_ssh_key + if [ -d /root/.ssh.new ]; then mv /root/.ssh /root/.ssh.orig mv /root/.ssh.new /root/.ssh @@ -808,43 +879,43 @@ do_migration() { } do_setup() { - NO_SSL= if [ -f $SETUP_ENV ]; then . $SETUP_ENV else # ask for the needed values if the setup_env file does not exist - echo -n "MANAGER_USER="; read MANAGER_USER - echo -n "MANAGER_PASS="; read MANAGER_PASS - echo -n "MANAGER_ADMIN_EMAIL="; read MANAGER_ADMIN_EMAIL - echo -n "CERT_CNAMES=" ; read CERT_CNAMES - echo -n "CERT_O=" ; read CERT_O - echo -n "CERT_OU=" ; read CERT_OU - echo -n "CERT_CITY=" ; read CERT_CITY - echo -n "CERT_STATE=" ; read CERT_STATE - echo -n "CERT_COUNTRY=" ; read CERT_COUNTRY - echo -n "CERT_EMAIL=" ; read CERT_EMAIL - echo -n "CERT_PASS=" ; read CERT_PASS - echo -n "LOCAL_DB=" ; read LOCAL_DB - echo -n "DB_BACKEND=" ; read DB_BACKEND - echo -n "MANAGER_DB_NAME=" ; read MANAGER_DB_NAME - echo -n "MANAGER_DB_HOST=" ; read MANAGER_DB_HOST - echo -n "MANAGER_DB_PORT=" ; read MANAGER_DB_PORT - echo -n "MANAGER_DB_CA_CERT=" ; read MANAGER_DB_CA_CERT - echo -n "MANAGER_DB_PROTOCOL="; read MANAGER_DB_PROTOCOL - echo -n "MANAGER_ENABLE_TFTP="; read MANAGER_ENABLE_TFTP - echo -n "EXTERNALDB_ADMIN_USER=" ; read EXTERNALDB_ADMIN_USER - echo -n "EXTERNALDB_ADMIN_PASS=" ; read EXTERNALDB_ADMIN_PASS - echo -n "EXTERNALDB_PROVIDER="; read EXTERNALDB_PROVIDER - echo -n "SCC_USER=" ; read SCC_USER - echo -n "SCC_PASS=" ; read SCC_PASS - echo -n "ISS_PARENT=" ; read ISS_PARENT - echo -n "ACTIVATE_SLP=" ; read ACTIVATE_SLP - echo -n "REPORT_DB_NAME=" ; read REPORT_DB_NAME - echo -n "REPORT_DB_HOST=" ; read REPORT_DB_HOST - echo -n "REPORT_DB_PORT=" ; read REPORT_DB_PORT - echo -n "REPORT_DB_USER=" ; read REPORT_DB_USER - echo -n "REPORT_DB_PASS=" ; read REPORT_DB_PASS - echo -n "REPORT_DB_CA_CERT=" ; read REPORT_DB_CA_CERT + ask_input MANAGER_USER + ask_input MANAGER_PASS + ask_input MANAGER_ADMIN_EMAIL + ask_input CERT_CNAMES + ask_input CERT_O + ask_input CERT_OU + ask_input CERT_CITY + ask_input CERT_STATE + ask_input CERT_COUNTRY + ask_input CERT_EMAIL + ask_input CERT_PASS + ask_input LOCAL_DB + ask_input DB_BACKEND + ask_input MANAGER_DB_NAME + ask_input MANAGER_DB_HOST + ask_input MANAGER_DB_PORT + ask_input MANAGER_DB_CA_CERT + ask_input MANAGER_DB_PROTOCOL + ask_input MANAGER_ENABLE_TFTP + ask_input EXTERNALDB_ADMIN_USER + ask_input EXTERNALDB_ADMIN_PASS + ask_input EXTERNALDB_PROVIDER + ask_input SCC_USER + ask_input SCC_PASS + ask_input ISS_PARENT + ask_input ACTIVATE_SLP + ask_input REPORT_DB_NAME + ask_input REPORT_DB_HOST + ask_input REPORT_DB_PORT + ask_input REPORT_DB_USER + ask_input REPORT_DB_PASS + ask_input REPORT_DB_CA_CERT + ask_input UYUNI_FQDN fi; if [ -z "$SYS_DB_PASS" ]; then SYS_DB_PASS=`dd if=/dev/urandom bs=16 count=4 2> /dev/null | md5sum | cut -b 1-8` @@ -878,6 +949,13 @@ do_setup() { EXTERNALDB=1 ;; esac + if [ -z "$NO_SSL" ]; then + NO_SSL= + fi + if [ -n "$UYUNI_FQDN" ]; then + HOSTNAME=$UYUNI_FQDN + fi + check_re_install echo "Do not delete this file unless you know what you are doing!" > $MANAGER_COMPLETE setup_swap @@ -887,6 +965,18 @@ do_setup() { fi setup_spacewalk + setup_apache + setup_tomcat + change_SSCcredentials_permission + + backup_certificates + + setup_tftp_permission + + # In the container case, we have the MIRROR_PATH environment variable at setup + if [ -n "$MIRROR_PATH" ]; then + echo "server.susemanager.fromdir = $MIRROR_PATH" >> /etc/rhn/rhn.conf + fi if [ -n "$ISS_PARENT" ]; then local certname=`echo "MASTER-$ISS_PARENT-TRUSTED-SSL-CERT" | sed 's/\./_/g'` @@ -963,6 +1053,9 @@ do -w) WAIT_BETWEEN_STEPS=1 ;; + -n) + NON_INTERACTIVE=1 + ;; *) echo echo "Option \"$p\" is not recognized. Type \"$PROGRAM -h\" for help." diff --git a/susemanager/empty-repo.conf b/susemanager/empty-repo.conf new file mode 100644 index 000000000000..82b468721cff --- /dev/null +++ b/susemanager/empty-repo.conf @@ -0,0 +1,2 @@ +RewriteRule ^/pub/repositories/empty/(.*)$ /gpg/repositories/empty/$1 [L,PT] +RewriteRule ^/pub/repositories/empty-deb/(.*)$ /gpg/repositories/empty-deb/$1 [L,PT] diff --git a/susemanager/susemanager.spec b/susemanager/susemanager.spec index 5ea49028fd0a..986a0a07798c 100644 --- a/susemanager/susemanager.spec +++ b/susemanager/susemanager.spec @@ -30,7 +30,6 @@ %global salt_group root %global serverdir %{_sharedstatedir} %global wwwroot %{_localstatedir}/www -%global wwwdocroot %{wwwroot}/html %endif %if 0%{?suse_version} @@ -41,9 +40,10 @@ %global salt_group salt %global serverdir /srv %global wwwroot %{serverdir}/www -%global wwwdocroot %{wwwroot}/htdocs %endif +%global reporoot %{_datarootdir}/susemanager/gpg/ + %global debug_package %{nil} Name: susemanager @@ -156,6 +156,7 @@ Requires: spacewalk-backend-sql Requires: spacewalk-common Requires: susemanager-build-keys Requires: susemanager-sync-data +Requires: uyuni-build-keys BuildRequires: docbook-utils %description tools @@ -189,13 +190,15 @@ install -m 0644 etc/logrotate.d/susemanager-tools %{buildroot}/%{_sysconfdir}/lo install -m 0644 etc/slp.reg.d/susemanager.reg %{buildroot}/%{_sysconfdir}/slp.reg.d make -C src install PREFIX=$RPM_BUILD_ROOT PYTHON_BIN=%{pythonX} MANDIR=%{_mandir} install -d -m 755 %{buildroot}/%{wwwroot}/os-images/ +mkdir -p %{buildroot}/etc/apache2/conf.d +install empty-repo.conf %{buildroot}/etc/apache2/conf.d/empty-repo.conf # empty repo for rhel base channels -mkdir -p %{buildroot}%{wwwdocroot}/pub/repositories/ -cp -r pub/empty %{buildroot}%{wwwdocroot}/pub/repositories/ +mkdir -p %{buildroot}%{reporoot}/repositories/ +cp -r pub/empty %{buildroot}%{reporoot}/repositories/ # empty repo for Ubuntu base fake channel -cp -r pub/empty-deb %{buildroot}%{wwwdocroot}/pub/repositories/ +cp -r pub/empty-deb %{buildroot}%{reporoot}/repositories/ # YaST configuration mkdir -p %{buildroot}%{_datadir}/YaST2/clients @@ -245,18 +248,6 @@ popd %post POST_ARG=$1 -if [ -f /etc/sysconfig/atftpd ]; then - . /etc/sysconfig/atftpd - if [ $ATFTPD_DIRECTORY = "/tftpboot" ]; then - sysconf_addword -r /etc/sysconfig/atftpd ATFTPD_DIRECTORY "/tftpboot" - sysconf_addword /etc/sysconfig/atftpd ATFTPD_DIRECTORY "%{serverdir}/tftpboot" - fi -fi -if [ ! -d %{serverdir}/tftpboot ]; then - mkdir -p %{serverdir}/tftpboot - chmod 750 %{serverdir}/tftpboot - chown %{apache_user}:%{tftp_group} %{serverdir}/tftpboot -fi # XE appliance overlay file created this with different user chown root.root /etc/sysconfig if [ $POST_ARG -eq 2 ] ; then @@ -324,11 +315,13 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %dir %{pythonsmroot}/susemanager %dir %{_prefix}/share/rhn/ %dir %{_datadir}/susemanager -%dir %{wwwdocroot}/pub -%dir %{wwwdocroot}/pub/repositories -%dir %{wwwdocroot}/pub/repositories/empty -%dir %{wwwdocroot}/pub/repositories/empty/repodata -%dir %{wwwdocroot}/pub/repositories/empty-deb +%dir %{reporoot} +%dir %{reporoot}/repositories +%dir %{reporoot}/repositories/empty +%dir %{reporoot}/repositories/empty/repodata +%dir %{reporoot}/repositories/empty-deb +%dir /etc/apache2 +%dir /etc/apache2/conf.d %config(noreplace) %{_sysconfdir}/logrotate.d/susemanager-tools %{_prefix}/share/rhn/config-defaults/rhn_*.conf %attr(0755,root,root) %{_bindir}/mgr-salt-ssh @@ -351,8 +344,9 @@ sed -i '/You can access .* via https:\/\//d' /tmp/motd 2> /dev/null ||: %{_datadir}/susemanager/__pycache__/ %endif %{_mandir}/man8/mgr-sync.8* -%{wwwdocroot}/pub/repositories/empty/repodata/*.xml* -%{wwwdocroot}/pub/repositories/empty-deb/Packages -%{wwwdocroot}/pub/repositories/empty-deb/Release +%{reporoot}/repositories/empty/repodata/*.xml* +%{reporoot}/repositories/empty-deb/Packages +%{reporoot}/repositories/empty-deb/Release +/etc/apache2/conf.d/empty-repo.conf %changelog diff --git a/testsuite/features/core/allcli_sanity.feature b/testsuite/features/core/allcli_sanity.feature index 4949116b4fca..e28d0750c9f5 100644 --- a/testsuite/features/core/allcli_sanity.feature +++ b/testsuite/features/core/allcli_sanity.feature @@ -6,8 +6,7 @@ Feature: Sanity checks I want to be sure to use a sane environment Scenario: The server is healthy - Then "server" should have a FQDN - And reverse resolution should work for "server" + Then reverse resolution should work for "server" And the clock from "server" should be exact And service "apache2" is enabled on "server" And service "apache2" is active on "server" diff --git a/testsuite/features/core/srv_channels_add.feature b/testsuite/features/core/srv_channels_add.feature index 20a94fc833b6..2baa076144c8 100644 --- a/testsuite/features/core/srv_channels_add.feature +++ b/testsuite/features/core/srv_channels_add.feature @@ -3,7 +3,7 @@ # # This feature can cause failures in: # - features/core/srv_create_activationkey.feature -# - features/core/srv_create_repository.feature +# - features/reposync/srv_create_repository.feature # - features/init_client/sle_minion.feature # - features/init_client/sle_ssh_minion.feature # - features/init_client/min_rhlike.feature diff --git a/testsuite/features/core/srv_create_repository.feature b/testsuite/features/reposync/srv_create_repository.feature similarity index 100% rename from testsuite/features/core/srv_create_repository.feature rename to testsuite/features/reposync/srv_create_repository.feature diff --git a/testsuite/features/secondary/allcli_system_group.feature b/testsuite/features/secondary/allcli_system_group.feature index 78e748ded1ec..109e26cf7f59 100644 --- a/testsuite/features/secondary/allcli_system_group.feature +++ b/testsuite/features/secondary/allcli_system_group.feature @@ -60,9 +60,13 @@ Feature: Manage a group of systems And I should see "rhlike_minion" as link And I should see "sle_minion" as link - Scenario: Install some formula on the server - When I manually install the "locale" formula on the server - And I synchronize all Salt dynamic modules on "sle_minion" + #container already has locale formula installed + @skip_if_container_server + Scenario: Install the locale formula package on the server + When I manually install the "locale" formula on the server + + Scenario: I synchronize all Salt dynamic modules on "sle_minion" + When I synchronize all Salt dynamic modules on "sle_minion" Scenario: New formula page is rendered for the system group When I follow the left menu "Systems > System Groups" @@ -101,6 +105,7 @@ Feature: Manage a group of systems # Red Hat-like minion is intentionally not removed from group + @skip_if_container_server Scenario: Cleanup: uninstall formula from the server When I manually uninstall the "locale" formula from the server diff --git a/testsuite/features/secondary/min_salt_formulas.feature b/testsuite/features/secondary/min_salt_formulas.feature index 646adcee0577..d5177dcc453e 100644 --- a/testsuite/features/secondary/min_salt_formulas.feature +++ b/testsuite/features/secondary/min_salt_formulas.feature @@ -11,9 +11,13 @@ Feature: Use salt formulas Scenario: Log in as admin user Given I am authorized for the "Admin" section + #container already has locale formula installed + @skip_if_container_server Scenario: Install the locale formula package on the server When I manually install the "locale" formula on the server - And I synchronize all Salt dynamic modules on "sle_minion" + + Scenario: I synchronize all Salt dynamic modules on "sle_minion" + When I synchronize all Salt dynamic modules on "sle_minion" Scenario: The new formula appears on the server When I follow the left menu "Salt > Formula Catalog" @@ -161,6 +165,7 @@ Feature: Use salt formulas And the keymap on "sle_minion" should be "us" And the language on "sle_minion" should be "en_US.UTF-8" + @skip_if_container_server Scenario: Cleanup: uninstall formula package from the server When I manually uninstall the "locale" formula from the server diff --git a/testsuite/features/step_definitions/command_steps.rb b/testsuite/features/step_definitions/command_steps.rb index 2704bb975bbd..30043bb211a0 100644 --- a/testsuite/features/step_definitions/command_steps.rb +++ b/testsuite/features/step_definitions/command_steps.rb @@ -455,7 +455,7 @@ When(/^I fetch "([^"]*)" to "([^"]*)"$/) do |file, host| node = get_target(host) - node.run("wget http://#{$server.full_hostname}/#{file}") + node.run("curl -s -O http://#{$server.full_hostname}/#{file}") end When(/^I wait until file "([^"]*)" contains "([^"]*)" on server$/) do |file, content| @@ -581,8 +581,8 @@ return_code = file_inject($server, source, dest) raise 'File injection failed' unless return_code.zero? end - $server.run('curl --output DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') - $server.run('unzip DSP2043_2019.1.zip') + $server.run('curl --output /root/DSP2043_2019.1.zip https://www.dmtf.org/sites/default/files/standards/documents/DSP2043_2019.1.zip') + $server.run('unzip /root/DSP2043_2019.1.zip -d /root/') cmd = "/usr/bin/python3 /root/Redfish-Mockup-Server/redfishMockupServer.py " \ "-H #{$server.full_hostname} -p 8443 " \ "-S -D /root/DSP2043_2019.1/public-catfish/ " \ @@ -1002,35 +1002,91 @@ end When(/^I copy server\'s keys to the proxy$/) do - %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| - return_code = file_extract($server, '/root/ssl-build/' + file, '/tmp/' + file) - raise 'File extraction failed' unless return_code.zero? - $proxy.run('mkdir -p /root/ssl-build') - return_code = file_inject($proxy, '/tmp/' + file, '/root/ssl-build/' + file) - raise 'File injection failed' unless return_code.zero? + _out, code = $server.run_local("systemctl is-active k3s", check_errors: false) + if code.zero? + # Server running in Kubernetes doesn't know anything about SSL CA + certificate = "apiVersion: cert-manager.io/v1\\n"\ + "kind: Certificate\\n"\ + "metadata:\\n"\ + " name: uyuni-proxy\\n"\ + "spec:\\n"\ + " secretName: uyuni-proxy-cert\\n"\ + " subject:\\n"\ + " countries: ['DE']\\n"\ + " provinces: ['Bayern']\\n"\ + " localities: ['Nuernberg']\\n"\ + " organizations: ['SUSE']\\n"\ + " organizationalUnits: ['SUSE']\\n"\ + " emailAddresses:\\n"\ + " - galaxy-noise@suse.de\\n"\ + " commonName: #{$proxy.full_hostname}\\n"\ + " dnsNames:\\n"\ + " - #{$proxy.full_hostname}\\n"\ + " issuerRef:\\n"\ + " name: uyuni-ca-issuer\\n"\ + " kind: Issuer" + _out, return_code = $server.run_local("echo -e \"#{certificate}\" | kubectl apply -f -") + raise 'Failed to define proxy Certificate resource' unless return_code.zero? + # cert-manager takes some time to generate the secret, wait for it before continuing + repeat_until_timeout(timeout: 600, message: "Kubernetes uyuni-proxy-cert secret has not been defined") do + _result, code = $server.run_local("kubectl get secret uyuni-proxy-cert", check_errors: false) + break if code.zero? + sleep 1 + end + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.crt}' | base64 -d >/tmp/proxy.crt") + raise 'Failed to store proxy certificate' unless return_code.zero? + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.tls\\.key}' | base64 -d >/tmp/proxy.key") + raise 'Failed to store proxy key' unless return_code.zero? + _out, return_code = $server.run_local("kubectl get secret uyuni-proxy-cert -o jsonpath='{.data.ca\\.crt}' | base64 -d >/tmp/ca.crt") + raise 'Failed to store CA certificate' unless return_code.zero? + + %w[proxy.crt proxy.key ca.crt].each do |file| + return_code, = $server.extract_file("/tmp/#{file}", "/tmp/#{file}") + raise 'File extraction failed' unless return_code.zero? + return_code = file_inject($proxy, "/tmp/#{file}", "/tmp/#{file}") + raise 'File injection failed' unless return_code.zero? + end + else + %w[RHN-ORG-PRIVATE-SSL-KEY RHN-ORG-TRUSTED-SSL-CERT rhn-ca-openssl.cnf].each do |file| + return_code = file_extract($server, '/root/ssl-build/' + file, '/tmp/' + file) + raise 'File extraction failed' unless return_code.zero? + $proxy.run('mkdir -p /root/ssl-build') + return_code = file_inject($proxy, '/tmp/' + file, '/root/ssl-build/' + file) + raise 'File injection failed' unless return_code.zero? + end end end When(/^I configure the proxy$/) do + _out, code = $server.run_local("systemctl is-active k3s", check_errors: false) + # prepare the settings file settings = "RHN_PARENT=#{$server.full_hostname}\n" \ "HTTP_PROXY=''\n" \ "VERSION=''\n" \ "TRACEBACK_EMAIL=galaxy-noise@suse.de\n" \ - "USE_EXISTING_CERTS=n\n" \ "INSTALL_MONITORING=n\n" \ - "SSL_PASSWORD=spacewalk\n" \ - "SSL_ORG=SUSE\n" \ - "SSL_ORGUNIT=SUSE\n" \ - "SSL_COMMON=#{$proxy.full_hostname}\n" \ - "SSL_CITY=Nuremberg\n" \ - "SSL_STATE=Bayern\n" \ - "SSL_COUNTRY=DE\n" \ - "SSL_EMAIL=galaxy-noise@suse.de\n" \ - "SSL_CNAME_ASK=proxy.example.org\n" \ "POPULATE_CONFIG_CHANNEL=y\n" \ "RHN_USER=admin\n" \ "ACTIVATE_SLP=y\n" + if code.zero? + settings += "USE_EXISTING_CERTS=y\n" \ + "CA_CERT=/tmp/ca.crt\n" \ + "SERVER_KEY=/tmp/proxy.key\n" \ + "SERVER_CERT=/tmp/proxy.crt\n" + else + settings += "USE_EXISTING_CERTS=n\n" \ + "INSTALL_MONITORING=n\n" \ + "SSL_PASSWORD=spacewalk\n" \ + "SSL_ORG=SUSE\n" \ + "SSL_ORGUNIT=SUSE\n" \ + "SSL_COMMON=#{$proxy.full_hostname}\n" \ + "SSL_CITY=Nuremberg\n" \ + "SSL_STATE=Bayern\n" \ + "SSL_COUNTRY=DE\n" \ + "SSL_EMAIL=galaxy-noise@suse.de\n" \ + "SSL_CNAME_ASK=proxy.example.org\n" + end path = generate_temp_file('config-answers.txt', settings) step 'I copy "' + path + '" to "proxy"' `rm #{path}` @@ -1120,7 +1176,7 @@ next unless refresh_result.include? node node_refreshes += "^#{refresh_id}|" end - cmd = "spacecmd -u admin -p admin schedule_list #{current_time} #{timeout_time} | egrep '#{node_refreshes.delete_suffix('|')}'" + cmd = "spacecmd -u admin -p admin schedule_list #{current_time} #{timeout_time} | egrep '#{node_refreshes.delete_suffix('\|')}'" repeat_until_timeout(timeout: long_wait_delay, message: "'refresh package list' did not finish") do result, code = $server.run(cmd, check_errors: false) sleep 1 diff --git a/testsuite/features/step_definitions/common_steps.rb b/testsuite/features/step_definitions/common_steps.rb index 26f0b089b218..c976e1234011 100644 --- a/testsuite/features/step_definitions/common_steps.rb +++ b/testsuite/features/step_definitions/common_steps.rb @@ -504,7 +504,7 @@ end When(/^I push package "([^"]*)" into "([^"]*)" channel$/) do |arg1, arg2| - srvurl = "http://#{ENV['SERVER']}/APP" + srvurl = "https://#{ENV['SERVER']}/APP" command = "rhnpush --server=#{srvurl} -u admin -p admin --nosig -c #{arg2} #{arg1} " $server.run(command, timeout: 500) $server.run('ls -lR /var/spacewalk/packages', timeout: 500) diff --git a/testsuite/features/step_definitions/navigation_steps.rb b/testsuite/features/step_definitions/navigation_steps.rb index ed095647bc65..681fd6d790cb 100644 --- a/testsuite/features/step_definitions/navigation_steps.rb +++ b/testsuite/features/step_definitions/navigation_steps.rb @@ -452,9 +452,8 @@ system_name = get_system_name(host) rescue raise "Host #{host} not found" if if_present.empty? - log "Host #{host} is not deployed, not trying to select it" - return + next end step %(I select "#{system_name}" from "#{field}") end diff --git a/testsuite/features/step_definitions/salt_steps.rb b/testsuite/features/step_definitions/salt_steps.rb index 1d683c6b91cf..3cbf15a0b3cc 100644 --- a/testsuite/features/step_definitions/salt_steps.rb +++ b/testsuite/features/step_definitions/salt_steps.rb @@ -198,11 +198,14 @@ end # Salt formulas + +@skip_if_container_server When(/^I manually install the "([^"]*)" formula on the server$/) do |package| $server.run("zypper --non-interactive refresh") $server.run("zypper --non-interactive install --force #{package}-formula") end +@skip_if_container_server When(/^I manually uninstall the "([^"]*)" formula from the server$/) do |package| $server.run("zypper --non-interactive remove #{package}-formula") # Remove automatically installed dependency if needed diff --git a/testsuite/features/support/commonlib.rb b/testsuite/features/support/commonlib.rb index 7b135caf9d6a..3d1e15892803 100644 --- a/testsuite/features/support/commonlib.rb +++ b/testsuite/features/support/commonlib.rb @@ -129,7 +129,7 @@ def format_detail(message, last_result, report_result) def click_button_and_wait(locator = nil, **options) click_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -138,7 +138,7 @@ def click_button_and_wait(locator = nil, **options) def click_link_and_wait(locator = nil, **options) click_link(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -147,7 +147,7 @@ def click_link_and_wait(locator = nil, **options) def click_link_or_button_and_wait(locator = nil, **options) click_link_or_button(locator, options) begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end @@ -158,7 +158,7 @@ module CapybaraNodeElementExtension def click super begin - raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 5) + raise 'Timeout: Waiting AJAX transition (click link)' unless has_no_css?('.senna-loading', wait: 20) rescue StandardError, Capybara::ExpectationNotMet => e STDOUT.puts e.message # Skip errors related to .senna-loading element end diff --git a/testsuite/features/support/env.rb b/testsuite/features/support/env.rb index 4ce81dcd97a3..97d7989081ba 100644 --- a/testsuite/features/support/env.rb +++ b/testsuite/features/support/env.rb @@ -180,7 +180,7 @@ def process_code_coverage AfterStep do if has_css?('.senna-loading', wait: 0) log 'WARN: Step ends with an ajax transition not finished, let\'s wait a bit!' - log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 20) + log 'Timeout: Waiting AJAX transition' unless has_no_css?('.senna-loading', wait: 40) end end diff --git a/testsuite/features/support/lavanda.rb b/testsuite/features/support/lavanda.rb index 79e8bbe9c3d3..94dece26c33f 100644 --- a/testsuite/features/support/lavanda.rb +++ b/testsuite/features/support/lavanda.rb @@ -77,6 +77,12 @@ def init_os_version(os_version) @in_os_version = os_version end + ## + # Initializes the @in_has_uyunictl variable to true. + def init_has_uyunictl + @in_has_uyunictl = true + end + # getter functions, executed on testsuite def hostname raise 'empty hostname, something wrong' if @in_hostname.empty? @@ -145,6 +151,27 @@ def os_version # buffer_size: The maximum buffer size in bytes. Defaults to 65536. # verbose: Whether to log the output of the command in case of success. Defaults to false. def run(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) + cmd_prefixed = cmd + if @in_has_uyunictl + cmd_prefixed = "uyunictl exec -i '#{cmd.gsub(/'/, '\'"\'"\'')}'" + print "#{cmd_prefixed}\n" + end + run_local(cmd_prefixed, separated_results: separated_results, check_errors: check_errors, timeout: timeout, user: user, successcodes: successcodes, buffer_size: buffer_size, verbose: verbose) + end + + ## + # It runs a command, and returns the output, error, and exit code. + # + # Args: + # cmd: The command to run. + # separated_results: Whether the results should be stored separately. Defaults to false. + # check_errors: Whether to check for errors or not. Defaults to true. + # timeout: The timeout to be used, in seconds. Defaults to 250 or the value of the DEFAULT_TIMEOUT environment variable. + # user: The user to be used to run the command. Defaults to root. + # successcodes: An array with the values to be accepted as success codes from the command run. + # buffer_size: The maximum buffer size in bytes. Defaults to 65536. + # verbose: Whether to log the output of the command in case of success. Defaults to false. + def run_local(cmd, separated_results: false, check_errors: true, timeout: DEFAULT_TIMEOUT, user: 'root', successcodes: [0], buffer_size: 65536, verbose: false) if separated_results out, err, _lo, _rem, code = test_and_store_results_separately(cmd, user, timeout, buffer_size) else @@ -202,4 +229,75 @@ def wait_while_process_running(process) result end end + + def inject(local_file, remote_file, user = "root", dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local("mktemp -d") + tmp_file = File.join(tmp_folder.strip, File.basename(local_file)) + code, _remote = inject_file(local_file, tmp_file, user, dots) + if code.zero? + _out, code = run_local("uyunictl cp --user #{user} #{tmp_file} server:#{remote_file}") + raise "Failed to copy #{tmp_file} to container" unless code.zero? + end + run_local("rm -r #{tmp_folder}") + else + code, _remote = inject_file(local_file, remote_file, user, dots) + end + code + end + + def extract(remote_file, local_file, user = "root", dots = true) + if @in_has_uyunictl + tmp_folder, _code = run_local("mktemp -d") + tmp_file = File.join(tmp_folder.strip, File.basename(remote_file)) + _out, code = run_local("uyunictl cp --user #{user} server:#{remote_file} #{tmp_file}") + raise "Failed to extract #{remote_file} from container" unless code.zero? + code, _remote = extract_file(tmp_file, local_file, user, dots) + raise "Failed to extract #{tmp_file} from host" unless code.zero? + run_local("rm -r #{tmp_folder}") + else + code, _local = extract_file(remote_file, local_file, user, dots) + end + code + end + + def file_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -f #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -f #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + def folder_exists(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'test -d #{file}'", check_errors: false) + exists = code.zero? + else + _out, local, _remote, code = test_and_store_results_together("test -d #{file}", 'root', 500) + exists = code.zero? && local.zero? + end + exists + end + + def file_delete(file) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm #{file}'") + else + _out, _local, _remote, code = test_and_store_results_together("rm #{file}", 'root', 500) + end + code + end + + def folder_delete(folder) + if @in_has_uyunictl + _out, code = run_local("uyunictl exec -- 'rm -rf #{folder}'") + else + _out, _local, _remote, code = test_and_store_results_together("rm-rf #{folder}", 'root', 500) + end + code + end end diff --git a/testsuite/features/support/twopence_init.rb b/testsuite/features/support/twopence_init.rb index 611eb0b6c9d0..128a698970d5 100644 --- a/testsuite/features/support/twopence_init.rb +++ b/testsuite/features/support/twopence_init.rb @@ -139,25 +139,38 @@ def twopence_init(target) node.extend(LavandaBasic) end +_out, code = $server.run('which uyunictl', check_errors: false) +if code.zero? + $server.init_has_uyunictl +end + # Initialize hostname $nodes.each do |node| next if node.nil? - hostname, local, remote, code = node.test_and_store_results_together('hostname', 'root', 500) - # special handling for nested VMs since they will only be crated later in the test suite - # we to a late hostname initialization in a special step for those - next if hostname.empty? || node == $salt_migration_minion - - raise "Cannot connect to get hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? - raise "No hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}" if hostname.empty? - node.init_hostname(hostname) - - fqdn, local, remote, code = node.test_and_store_results_together('hostname -f', 'root', 500) - raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? - raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? - node.init_full_hostname(fqdn) + if node == $server + fqdn, code = node.run('sed -n \'s/^java.hostname *= *\(.\+\)$/\1/p\' /etc/rhn/rhn.conf') + raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? + raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? + node.init_full_hostname(fqdn) + node.init_hostname(fqdn.split(".")[0]) + else + hostname, local, remote, code = node.test_and_store_results_together('hostname', 'root', 500) + # special handling for nested VMs since they will only be crated later in the test suite + # we to a late hostname initialization in a special step for those + next if hostname.empty? || node == $salt_migration_minion + + raise "Cannot connect to get hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? + raise "No hostname for '#{$named_nodes[node.hash]}'. Response code: #{code}" if hostname.empty? + node.init_hostname(hostname) + + fqdn, local, remote, code = node.test_and_store_results_together('hostname -f', 'root', 500) + raise "Cannot connect to get FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}, local: #{local}, remote: #{remote}" if code.nonzero? || remote.nonzero? || local.nonzero? + raise "No FQDN for '#{$named_nodes[node.hash]}'. Response code: #{code}" if fqdn.empty? + node.init_full_hostname(fqdn) + end - STDOUT.puts "Host '#{$named_nodes[node.hash]}' is alive with determined hostname #{hostname.strip} and FQDN #{fqdn.strip}" unless $build_validation + STDOUT.puts "Host '#{$named_nodes[node.hash]}' is alive with determined hostname #{node.hostname} and FQDN #{node.full_hostname}" unless $build_validation os_version, os_family = get_os_version(node) node.init_os_family(os_family) node.init_os_version(os_version) @@ -199,13 +212,8 @@ def get_system_name(host) when 'containerized_proxy' system_name = $proxy.full_hostname.sub('pxy', 'pod-pxy') else - begin - node = get_target(host) - system_name = node.full_hostname - rescue RuntimeError - # If the node for that host is not defined, just return the host parameter as system_name - system_name = host - end + node = get_target(host) + system_name = node.full_hostname end system_name end @@ -230,38 +238,32 @@ def net_prefix # This function tests whether a file exists on a node def file_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -f #{file}", 'root', 500) - code.zero? && local.zero? + node.file_exists(file) end # This function tests whether a folder exists on a node def folder_exists?(node, file) - _out, local, _remote, code = node.test_and_store_results_together("test -d #{file}", 'root', 500) - code.zero? && local.zero? + node.folder_exists(file) end # This function deletes a file from a node def file_delete(node, file) - _out, _local, _remote, code = node.test_and_store_results_together("rm #{file}", 'root', 500) - code + node.file_delete(file) end # This function deletes a file from a node def folder_delete(node, folder) - _out, _local, _remote, code = node.test_and_store_results_together("rm -rf #{folder}", 'root', 500) - code + node.folder_delete(folder) end # This function extracts a file from a node def file_extract(node, remote_file, local_file) - code, _remote = node.extract_file(remote_file, local_file, 'root', false) - code + node.extract(remote_file, local_file, 'root', false) end # This function injects a file into a node def file_inject(node, local_file, remote_file) - code, _remote = node.inject_file(local_file, remote_file, 'root', false) - code + node.inject(local_file, remote_file, 'root', false) end # Other global variables @@ -364,7 +366,7 @@ def client_public_ip(host) raise "Cannot resolve node for host '#{host}'" if node.nil? %w[br0 eth0 eth1 ens0 ens1 ens2 ens3 ens4 ens5 ens6].each do |dev| - output, code = node.run("ip address show dev #{dev} | grep 'inet '", check_errors: false) + output, code = node.run_local("ip address show dev #{dev} | grep 'inet '", check_errors: false) next unless code.zero? node.init_public_interface(dev) diff --git a/testsuite/run_sets/core.yml b/testsuite/run_sets/core.yml index 541c79e8dc03..0617abde816a 100644 --- a/testsuite/run_sets/core.yml +++ b/testsuite/run_sets/core.yml @@ -14,7 +14,6 @@ - features/core/srv_organization_credentials.feature - features/core/srv_user_preferences.feature - features/core/srv_channels_add.feature -- features/core/srv_create_repository.feature - features/core/srv_create_activationkey.feature - features/core/srv_osimage.feature - features/core/srv_docker.feature diff --git a/testsuite/run_sets/refhost.yml b/testsuite/run_sets/refhost.yml index cd09326b7421..02ee8f32dd25 100644 --- a/testsuite/run_sets/refhost.yml +++ b/testsuite/run_sets/refhost.yml @@ -12,7 +12,7 @@ - features/core/allcli_sanity.feature - features/core/srv_first_settings.feature - features/core/srv_channels_add.feature -- features/core/srv_create_repository.feature +- features/reposync/srv_create_repository.feature - features/core/srv_create_activationkey.feature - features/core/srv_docker.feature diff --git a/testsuite/run_sets/reposync.yml b/testsuite/run_sets/reposync.yml index 87664f936ab5..a6423908d0c9 100644 --- a/testsuite/run_sets/reposync.yml +++ b/testsuite/run_sets/reposync.yml @@ -13,5 +13,6 @@ - features/reposync/srv_sync_products.feature - features/reposync/srv_enable_sync_products.feature - features/reposync/srv_wait_for_reposync.feature +- features/reposync/srv_create_repository.feature ## Channels and Product synchronization features END ### diff --git a/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec b/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec index ccd3ee2a885d..c80f508aeb11 100644 --- a/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec +++ b/tftpsync/susemanager-tftpsync/susemanager-tftpsync.spec @@ -78,15 +78,6 @@ install -p -D -m 755 configure-tftpsync.sh %{buildroot}%{_sbindir}/configure-tf %endif %endif -%post -if [ -f "/etc/cobbler/settings" ]; then - if ! grep "tftpsync_timeout:" /etc/cobbler/settings >/dev/null; then - echo "" >> /etc/cobbler/settings - echo "tftpsync_timeout: 15" >> /etc/cobbler/settings - echo "" >> /etc/cobbler/settings - fi -fi - %files %defattr(-,root,root,-) %doc COPYING.LIB README diff --git a/utils/spacewalk-hostname-rename b/utils/spacewalk-hostname-rename index cde5ff6c0723..66931e5890c7 100755 --- a/utils/spacewalk-hostname-rename +++ b/utils/spacewalk-hostname-rename @@ -638,6 +638,10 @@ if [ -e $MGR_SYNC_CONF ]; then fi print_status 0 # just simulate end +echo -n "Changing postfix settings ... " | tee -a $LOG +postconf -e myhostname=$HOSTNAME +systemctl restart postfix + echo -n "Starting spacewalk services ... " | tee -a $LOG if [ "$DB_SERVICE" != "" ] then