diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 85370da1a..270037f4d 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,5 +1,11 @@ name: Publish +# If a pull-request is pushed then cancel all previously running jobs related +# to that pull-request +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + on: push: tags: diff --git a/.github/workflows/review-checks.yml b/.github/workflows/review-checks.yml index 3ffe6425c..945d05792 100644 --- a/.github/workflows/review-checks.yml +++ b/.github/workflows/review-checks.yml @@ -1,5 +1,11 @@ name: Review-checks +# If a pull-request is pushed then cancel all previously running jobs related +# to that pull-request +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + on: [pull_request] jobs: @@ -10,6 +16,12 @@ jobs: steps: - uses: actions/checkout@v1 + - name: Fixup CentOS repo files + run: | + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + - name: Install utils run: | yum install -y git wget ca-certificates @@ -30,10 +42,14 @@ jobs: integration-tests: runs-on: ubuntu-latest + timeout-minutes: 30 env: MYSQL_USER: beaker MYSQL_PASSWORD: beaker MYSQL_ROOT_PASSWORD: toor + # https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: fail-fast: false matrix: @@ -51,6 +67,12 @@ jobs: ports: - 3306 steps: + - name: Fixup CentOS repo files + run: | + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + # We have to install git 2.18+ to perform checkout via git # This is possible only via IUS repositories - name: Install git to allow checkout @@ -111,3 +133,92 @@ jobs: # Disable Selenium tests until we have plan for selenium driver + firefox rm -rf src/bkr/inttest/server/selenium ./run-tests.sh -v ${{ matrix.test-target }} + + unit-tests: + runs-on: ubuntu-latest + timeout-minutes: 15 + container: + image: ${{ matrix.os-target.name }} + options: --init + strategy: + fail-fast: false + matrix: + test-target: + - Common + - Client + - LabController + os-target: + - name: centos:7 + - name: quay.io/centos/centos:stream8 + additional_repo: "powertools" + - name: quay.io/centos/centos:stream9 + additional_repo: "crb" + # After fixing the names of unit test functions, migrate to the latest + - name: fedora:38 + env: + # https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true + steps: + - name: Fixup CentOS repo files + run: | + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + if: matrix.os-target.name == 'centos:7' || matrix.os-target.name == 'quay.io/centos/centos:stream8' + + - name: Set BKR_PY3 environment variable + run: echo "BKR_PY3=1" >> $GITHUB_ENV + if: matrix.os-target.name != 'centos:7' + + - name: Install DNF and plugins + run: yum install -y dnf 'dnf-command(builddep)' 'dnf-command(config-manager)' + + - name: Enable additional repositories + run: | + IFS=', ' read -r -a repos <<< "${{ matrix.os-target.additional_repo }}" + for repo in "${repos[@]}"; do + dnf config-manager --set-enabled $repo + done + + # We have to install git 2.18+ to perform checkout via git + # This is possible only via IUS repositories + - name: Install git to allow checkout + run: | + yum install https://repo.ius.io/ius-release-el7.rpm epel-release -y + yum install git236-core -y + if: matrix.os-target.name == 'centos:7' + + # Do not upgrade to @v4 as node 20 is incompatible with CentOS 7 + - name: Checkout + uses: actions/checkout@v3 + + # Remove custom git from the IUS repository - git will be reinstalled later as it is needed by beaker itself. + - name: Remove git236 and YUM repositories + run: yum remove git236-core ius-release epel-release -y + if: matrix.os-target.name == 'centos:7' + + - name: Add Beaker Server YUM repository + if: matrix.os-target.name == 'centos:7' + run: | + curl -o /etc/yum.repos.d/beaker-server.repo https://beaker-project.org/yum/beaker-server-RedHatEnterpriseLinux.repo + + - name: Use EPEL + if: matrix.os-target.name != 'centos:7' && !startsWith(matrix.os-target.name, 'fedora:') + run: | + dnf install -y epel-release + + # Build dependencies must be sufficient, the same is done during RPM build + - name: Install Beaker dependency from specfile + run: | + dnf builddep -y *.spec + + - name: Show Python Environment + if: matrix.os-target.name != 'centos:7' + run: | + python3 -m pip freeze + + - name: Run unit tests + run: | + pushd ${{ matrix.test-target }} + ./run-tests.sh diff --git a/Common/bkr/common/helpers.py b/Common/bkr/common/helpers.py index d701ad8bf..298c7e385 100644 --- a/Common/bkr/common/helpers.py +++ b/Common/bkr/common/helpers.py @@ -12,6 +12,8 @@ import os import fcntl import errno + +import six from six.moves import queue log = getLogger(__name__) @@ -100,12 +102,20 @@ def run(self): self.finished.clear() -class SensitiveUnicode(unicode): +class SensitiveUnicode(six.text_type): + """The intent of this class is to standardize the behavior of Unicode strings across Python 2 and 3, + while treating the contents of the string as sensitive. + + In Python 2, it encodes the unicode string to bytes (str), wrapped in SensitiveStr. + In Python 3, it preserves the unicode representation (str) by overriding the default 'encode' behavior. + """ def __repr__(self): return '' def encode(self, *args, **kwargs): - return SensitiveStr(super(SensitiveUnicode, self).encode(*args, **kwargs)) + if six.PY2: + return SensitiveStr(super(SensitiveUnicode, self).encode(*args, **kwargs)) + return self class SensitiveStr(str): @@ -235,6 +245,10 @@ def siphon(src, dest): chunk = src.read(4096) if not chunk: break + + if six.PY3 and isinstance(chunk, bytes): + chunk = chunk.decode('utf-8') + dest.write(chunk) diff --git a/LabController/Makefile b/LabController/Makefile index 92663c3e9..16bb5491b 100644 --- a/LabController/Makefile +++ b/LabController/Makefile @@ -4,17 +4,25 @@ # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. +BKR_PY3 ?= 0 +COMMAND := python2 + +ifeq ($(BKR_PY3),1) + COMMAND := python3 +endif + + .PHONY: build build: - python2 setup.py build + $(COMMAND) setup.py build .PHONY: install install: build - python2 setup.py install -O1 --skip-build --root $(DESTDIR) + $(COMMAND) setup.py install -O1 --skip-build --root $(DESTDIR) .PHONY: clean clean: - python2 setup.py clean + $(COMMAND) setup.py clean rm -rf build .PHONY: check diff --git a/LabController/aux/anamon b/LabController/extra/anamon similarity index 100% rename from LabController/aux/anamon rename to LabController/extra/anamon diff --git a/LabController/aux/anamon.init b/LabController/extra/anamon.init similarity index 100% rename from LabController/aux/anamon.init rename to LabController/extra/anamon.init diff --git a/LabController/aux/anamon.service b/LabController/extra/anamon.service similarity index 100% rename from LabController/aux/anamon.service rename to LabController/extra/anamon.service diff --git a/LabController/aux/anamon3 b/LabController/extra/anamon3 similarity index 100% rename from LabController/aux/anamon3 rename to LabController/extra/anamon3 diff --git a/LabController/init.d/beaker-provision b/LabController/init.d/beaker-provision deleted file mode 100755 index bb3cdf372..000000000 --- a/LabController/init.d/beaker-provision +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -# -# beaker-provision: Beaker power control and provisioning daemon -# -# chkconfig: - 99 99 -# description: This service acts on queued commands from the Beaker server by -# provisioning test systems and controlling power. - -# Source function library. -. /etc/rc.d/init.d/functions - -[ -f /usr/bin/beaker-provision ] || exit 0 - -prog="beaker-provision" -PIDFILE=/var/run/beaker-lab-controller/${prog}.pid -LOCKFILE=/var/lock/subsys/$prog - -check_pidfile() { - status -p $PIDFILE $prog >& /dev/null - ret=$? - if [ "$ret" -eq 1 ]; then - # remove stale pidfile - rm -f -- "$PIDFILE" - fi -} - -start() { - echo -n $"Starting $prog: " - check_pidfile - daemon --pidfile $PIDFILE $prog - RETVAL=$? - echo - if test $RETVAL = 0; then - touch $LOCKFILE - fi - return $RETVAL -} - -stop() { - local pid start - echo "Stopping $prog ..." - echo "This may take a few moments while running commands complete." - - pid=$(pidofproc -p $PIDFILE $prog) - RETVAL=0 - if checkpid $pid ; then - # First send SIGTERM - kill -TERM $pid - RETVAL=$? - if [ $RETVAL -eq 0 ] ; then - # Wait for death - start=$(date +%s) - while [ $(($(date +%s) - start)) -lt 303 ] ; do - checkpid $pid || break - sleep 1 - done - # If still alive, kill with fire - if checkpid $pid ; then - kill -KILL $pid - RETVAL=$? - fi - fi - fi - - rm -f $LOCKFILE - [ $RETVAL -eq 0 ] && success || error - echo - return $RETVAL -} - -restart() { - stop - start -} - -condrestart(){ - [ -e $LOCKFILE ] && restart - return 0 -} - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p $PIDFILE $prog - RETVAL=$? - ;; - restart) - restart - ;; - condrestart) - condrestart - ;; - reload) - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|reload}" - ;; -esac -exit $RETVAL diff --git a/LabController/init.d/beaker-proxy b/LabController/init.d/beaker-proxy deleted file mode 100755 index 09b240ab8..000000000 --- a/LabController/init.d/beaker-proxy +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh -# -# beakerd: Start the Beaker proxy daemon -# -# chkconfig: - 99 99 -# description: This service proxies unauthenticated requests from the lab -# network to the Beaker Scheduler. Communication to the -# scheduler is done via authenticated https. -# -# - -# Source function library. -. /etc/rc.d/init.d/functions - -[ -f /usr/bin/beaker-proxy ] || exit 0 - -prog="beaker-proxy" -PIDFILE=/var/run/beaker-lab-controller/${prog}.pid -LOCKFILE=/var/lock/subsys/$prog -RUN_AS=apache - -check_pidfile() { - status -p $PIDFILE $prog >& /dev/null - ret=$? - if [ "$ret" -eq 1 ]; then - # remove stale pidfile - rm -f -- "$PIDFILE" - fi -} - -start() { - echo -n $"Starting $prog: " - check_pidfile - daemon --pidfile $PIDFILE --user $RUN_AS $prog - RETVAL=$? - echo - if test $RETVAL = 0; then - touch $LOCKFILE - fi - return $RETVAL -} - -stop() { - echo -n $"Stopping $prog: " - killproc -p $PIDFILE $prog - RETVAL=$? - echo - rm -f $LOCKFILE - return $RETVAL -} - -restart() { - stop - start -} - -condrestart(){ - [ -e $LOCKFILE ] && restart - return 0 -} - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p $PIDFILE $prog - RETVAL=$? - ;; - restart) - restart - ;; - condrestart) - condrestart - ;; - reload) - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|reload}" - ;; -esac -exit $RETVAL diff --git a/LabController/init.d/beaker-transfer b/LabController/init.d/beaker-transfer deleted file mode 100755 index 4381f1f01..000000000 --- a/LabController/init.d/beaker-transfer +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/sh -# -# beaker-transfer: Start the Beaker log transfer daemon -# -# chkconfig: - 99 99 -# description: This service handles rsyncing logs to their final destination -# -# - -# Source function library. -. /etc/rc.d/init.d/functions - -[ -f /usr/bin/beaker-transfer ] || exit 0 - -prog="beaker-transfer" -PIDFILE=/var/run/beaker-lab-controller/${prog}.pid -LOCKFILE=/var/lock/subsys/$prog -RUN_AS=apache - -check_pidfile() { - status -p $PIDFILE $prog >& /dev/null - ret=$? - if [ "$ret" -eq 1 ]; then - # remove stale pidfile - rm -f -- "$PIDFILE" - fi -} - -start() { - echo -n $"Starting $prog: " - check_pidfile - daemon --pidfile $PIDFILE --user $RUN_AS $prog - RETVAL=$? - echo - if test $RETVAL = 0; then - touch $LOCKFILE - fi - return $RETVAL -} - -stop() { - echo -n $"Stopping $prog: " - killproc -p $PIDFILE $prog - RETVAL=$? - echo - rm -f $LOCKFILE - return $RETVAL -} - -restart() { - stop - start -} - -condrestart(){ - [ -e $LOCKFILE ] && restart - return 0 -} - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p $PIDFILE $prog - RETVAL=$? - ;; - restart) - restart - ;; - condrestart) - condrestart - ;; - reload) - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|reload}" - ;; -esac -exit $RETVAL diff --git a/LabController/init.d/beaker-watchdog b/LabController/init.d/beaker-watchdog deleted file mode 100755 index 79f787bb2..000000000 --- a/LabController/init.d/beaker-watchdog +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh -# -# beakerd: Start the Beaker watchdog daemon -# -# chkconfig: - 99 99 -# description: This service handles watchdogs -# active watchdogs are monitored for errors -# expired watchdogs are then aborted -# -# - -# Source function library. -. /etc/rc.d/init.d/functions - -[ -f /usr/bin/beaker-watchdog ] || exit 0 - -prog="beaker-watchdog" -PIDFILE=/var/run/beaker-lab-controller/${prog}.pid -LOCKFILE=/var/lock/subsys/$prog -RUN_AS=apache - -check_pidfile() { - status -p $PIDFILE $prog >& /dev/null - ret=$? - if [ "$ret" -eq 1 ]; then - # remove stale pidfile - rm -f -- "$PIDFILE" - fi -} - -start() { - echo -n $"Starting $prog: " - check_pidfile - daemon --pidfile $PIDFILE --user $RUN_AS $prog - RETVAL=$? - echo - if test $RETVAL = 0; then - touch $LOCKFILE - fi - return $RETVAL -} - -stop() { - echo -n $"Stopping $prog: " - killproc -p $PIDFILE $prog - RETVAL=$? - echo - rm -f $LOCKFILE - return $RETVAL -} - -restart() { - stop - start -} - -condrestart(){ - [ -e $LOCKFILE ] && restart - return 0 -} - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p $PIDFILE $prog - RETVAL=$? - ;; - restart) - restart - ;; - condrestart) - condrestart - ;; - reload) - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|reload}" - ;; -esac -exit $RETVAL diff --git a/LabController/run-tests.sh b/LabController/run-tests.sh index 5ab0bc474..580139769 100755 --- a/LabController/run-tests.sh +++ b/LabController/run-tests.sh @@ -1,5 +1,16 @@ -#/bin/bash +#!/bin/bash set -x -env PYTHONPATH=src:../Common${PYTHONPATH:+:$PYTHONPATH} \ - nosetests ${*:--v --traverse-namespace bkr.labcontroller} +# Use nosetests with python2 interpreter +if [[ -z ${BKR_PY3} ]] || [[ ${BKR_PY3} != 1 ]]; then + command="nosetests ${*:--v --traverse-namespace bkr.labcontroller}"; +else + # Check if pytest-3 is available + if command -v pytest-3 >/dev/null 2>&1; then + command="pytest-3"; + else + command="pytest"; + fi +fi + +env PYTHONPATH=src:../Common${PYTHONPATH:+:$PYTHONPATH} $command diff --git a/LabController/setup.py b/LabController/setup.py index cd7337f84..e890ec84f 100644 --- a/LabController/setup.py +++ b/LabController/setup.py @@ -1,16 +1,21 @@ from setuptools import setup, find_packages -import commands from glob import glob +try: + from subprocess import getstatusoutput +except ImportError: + from commands import getstatusoutput + + def systemd_unit_dir(): - status, output = commands.getstatusoutput('pkg-config --variable systemdsystemunitdir systemd') + status, output = getstatusoutput('pkg-config --variable systemdsystemunitdir systemd') if status or not output: return None # systemd not found return output.strip() def systemd_tmpfiles_dir(): # There doesn't seem to be a specific pkg-config variable for this - status, output = commands.getstatusoutput('pkg-config --variable prefix systemd') + status, output = getstatusoutput('pkg-config --variable prefix systemd') if status or not output: return None # systemd not found return output.strip() + '/lib/tmpfiles.d' @@ -27,7 +32,7 @@ def systemd_tmpfiles_dir(): ('/var/lib/beaker', ['addDistro/addDistro.sh']), ('/var/lib/beaker/addDistro.d', glob('addDistro/addDistro.d/*')), ('/var/www/beaker/logs', []), - ('/usr/share/bkr/lab-controller', ['apache/404.html'] + glob('aux/*')), + ('/usr/share/bkr/lab-controller', ['apache/404.html'] + glob('extra/*')), ] if systemd_unit_dir(): data_files.extend([ @@ -38,14 +43,6 @@ def systemd_tmpfiles_dir(): (systemd_tmpfiles_dir(), ['tmpfiles.d/beaker-lab-controller.conf']), ('/run/beaker-lab-controller', []), ]) -else: - data_files.extend([ - ('/etc/init.d', ['init.d/beaker-proxy', - 'init.d/beaker-transfer', - 'init.d/beaker-provision', - 'init.d/beaker-watchdog']), - ('/var/run/beaker-lab-controller', []), - ]) setup( name='beaker-lab-controller', diff --git a/LabController/src/bkr/labcontroller/clear_netboot.py b/LabController/src/bkr/labcontroller/clear_netboot.py index 400adaf61..b3fa3b206 100644 --- a/LabController/src/bkr/labcontroller/clear_netboot.py +++ b/LabController/src/bkr/labcontroller/clear_netboot.py @@ -20,8 +20,10 @@ import sys from optparse import OptionParser + from bkr.labcontroller import netboot + def main(): usage = "usage: %prog FQDN" description = "Clears the Beaker TFTP netboot files for the given FQDN" diff --git a/LabController/src/bkr/labcontroller/async.py b/LabController/src/bkr/labcontroller/concurrency.py similarity index 80% rename from LabController/src/bkr/labcontroller/async.py rename to LabController/src/bkr/labcontroller/concurrency.py index 77c5b887a..6f2cf8ccf 100644 --- a/LabController/src/bkr/labcontroller/async.py +++ b/LabController/src/bkr/labcontroller/concurrency.py @@ -1,4 +1,3 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or @@ -8,60 +7,71 @@ Async utilities that should probably be in gevent. """ -import sys -import os -import subprocess -import signal -import fcntl import errno +import fcntl import logging -import gevent.event, gevent.socket, gevent.hub +import os +import signal +import subprocess + +import gevent.event +import gevent.hub +import gevent.socket +import six logger = logging.getLogger(__name__) + # Based on code from gevent-subprocess: # https://bitbucket.org/eriks5/gevent-subprocess/src/550405f060a5/src/gevsubprocess/pipe.py def _read_from_pipe(f): fcntl.fcntl(f, fcntl.F_SETFL, os.O_NONBLOCK) chunks = [] - # Start discarding chunks read if we see too many. This will prevent + # Start discarding chunks read if we see too many. This will prevent # a runaway child process from using up all our memory. discarding = False while True: try: + gevent.socket.wait_read(f.fileno()) chunk = f.read(4096) if not chunk: break if not discarding: chunks.append(chunk) if len(chunks) >= 1000: - logger.error('Too many chunks read from fd %s, ' - 'child process is running amok?!', f.fileno()) - chunks.append('+++ DISCARDED') + logger.error( + "Too many chunks read from fd %s, " + "child process is running amok?!", + f.fileno(), + ) + chunks.append(b"+++ DISCARDED") discarding = True - except IOError, e: + except IOError as e: if e.errno != errno.EAGAIN: raise - sys.exc_clear() - gevent.socket.wait_read(f.fileno()) - return ''.join(chunks) + if six.PY3: + # Keep data in bytes until the end to reduce memory footprint + return b"".join(chunks).decode("utf-8") + return "".join(chunks) + def _timeout_kill(p, timeout): gevent.sleep(timeout) _kill_process_group(p.pid) + def _kill_process_group(pgid): # Try SIGTERM first, then SIGKILL just to be safe try: os.killpg(pgid, signal.SIGTERM) - except OSError, e: + except OSError as e: if e.errno != errno.ESRCH: raise else: gevent.sleep(1) try: os.killpg(pgid, signal.SIGKILL) - except OSError, e: + except OSError as e: if e.errno != errno.ESRCH: raise @@ -70,14 +80,14 @@ class MonitoredSubprocess(subprocess.Popen): """ Subclass of subprocess.Popen with some useful additions: - * 'dead' attribute: a gevent.event.Event which is set when the child + * 'dead' attribute: a gevent.event.Event which is set when the child process has terminated - * if 'stdout' is subprocess.PIPE, a 'stdout_reader' attribute: - a greenlet which reads and accumulates the child's stdout (fetch it + * if 'stdout' is subprocess.PIPE, a 'stdout_reader' attribute: + a greenlet which reads and accumulates the child's stdout (fetch it by calling stdout_reader.get()) * same for stderr - * if a timeout is given, the child will be sent SIGTERM and then - SIGKILL if it is still running after the timeout (in seconds) has + * if a timeout is given, the child will be sent SIGTERM and then + SIGKILL if it is still running after the timeout (in seconds) has elapsed """ @@ -86,7 +96,7 @@ class MonitoredSubprocess(subprocess.Popen): def __init__(self, *args, **kwargs): self._running.append(self) self.dead = gevent.event.Event() - timeout = kwargs.pop('timeout', None) + timeout = kwargs.pop("timeout", None) orig_preexec_fn = kwargs.get("preexec_fn", None) def preexec_fn(): @@ -96,9 +106,9 @@ def preexec_fn(): kwargs["preexec_fn"] = preexec_fn super(MonitoredSubprocess, self).__init__(*args, **kwargs) - if kwargs.get('stdout') == subprocess.PIPE: + if kwargs.get("stdout") == subprocess.PIPE: self.stdout_reader = gevent.spawn(_read_from_pipe, self.stdout) - if kwargs.get('stderr') == subprocess.PIPE: + if kwargs.get("stderr") == subprocess.PIPE: self.stderr_reader = gevent.spawn(_read_from_pipe, self.stderr) if timeout: self.timeout_killer = gevent.spawn(_timeout_kill, self, timeout) @@ -106,9 +116,9 @@ def preexec_fn(): @classmethod def _sigchld_handler(cls, signum, frame): assert signum == signal.SIGCHLD - # It's important that we do no real work in this signal handler, - # because we could be invoked at any time (from any stack frame, in the - # middle of anything) and we don't want to raise, or interfere with + # It's important that we do no real work in this signal handler, + # because we could be invoked at any time (from any stack frame, in the + # middle of anything) and we don't want to raise, or interfere with # anything else. So we just schedule the real work in a greenlet. gevent.spawn(cls._reap_children) @@ -120,11 +130,12 @@ def _reap_children(cls): cls._running.remove(child) _kill_process_group(child.pid) child.dead.set() - if hasattr(child, 'timeout_killer'): + if hasattr(child, "timeout_killer"): child.timeout_killer.kill(block=False) + gevent.hub.get_hub() -# XXX dodgy: this signal handler has to be registered *after* the libev -# default loop is created by get_hub(), since libev registers its own +# XXX dodgy: this signal handler has to be registered *after* the libev +# default loop is created by get_hub(), since libev registers its own # (unused) SIGCHLD handler signal.signal(signal.SIGCHLD, MonitoredSubprocess._sigchld_handler) diff --git a/LabController/src/bkr/labcontroller/config.py b/LabController/src/bkr/labcontroller/config.py index 2beecf49e..d14a78f5c 100644 --- a/LabController/src/bkr/labcontroller/config.py +++ b/LabController/src/bkr/labcontroller/config.py @@ -1,37 +1,40 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import copy -import socket import os +import socket + from bkr.common.pyconfig import PyConfigParser -__all__ = ['load_conf', 'get_conf'] +__all__ = ["load_conf", "get_conf"] class Config(PyConfigParser): - def get_url_domain(self): # URL_DOMAIN used to be called SERVER - return self.get('URL_DOMAIN', - self.get('SERVER', socket.gethostname())) + return self.get("URL_DOMAIN", self.get("SERVER", socket.gethostname())) _conf = Config() -default_config = os.path.abspath(os.path.join(os.path.dirname(__file__), "default.conf")) +default_config = os.path.abspath( + os.path.join(os.path.dirname(__file__), "default.conf") +) _conf.load_from_file(default_config) default_system_conf_file = "/etc/beaker/labcontroller.conf" _conf_loaded = False + + def load_conf(conf_file=default_system_conf_file): global _conf, _conf_loaded # Will throw IOError if file does not exist _conf.load_from_file(conf_file) _conf_loaded = True + def get_conf(): global _conf, _conf_loaded if not _conf_loaded: diff --git a/LabController/src/bkr/labcontroller/distro_import.py b/LabController/src/bkr/labcontroller/distro_import.py index 379b74232..cd7283189 100755 --- a/LabController/src/bkr/labcontroller/distro_import.py +++ b/LabController/src/bkr/labcontroller/distro_import.py @@ -1,35 +1,32 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import sys, os -import glob -import xmlrpclib -import string -import ConfigParser -import getopt -import urlparse -from optparse import OptionParser, OptionGroup -import urllib2 -import logging -import socket import copy -from bkr.log import log_to_stream -from bkr.common.bexceptions import BX +import json +import logging +import os import pprint +import socket +import sys import time -import json -import dnf import uuid +from optparse import OptionGroup, OptionParser + +import dnf +from six.moves import configparser, urllib, xmlrpc_client + +from bkr.common.bexceptions import BX +from bkr.log import log_to_stream + def url_exists(url): try: - urllib2.urlopen(url) - except urllib2.URLError: + urllib.request.urlopen(url) + except urllib.error.URLError: return False - except IOError, e: + except IOError as e: # errno 21 is you tried to retrieve a directory. Thats ok. We just # want to ensure the path is valid so far. if e.errno == 21: @@ -38,39 +35,44 @@ def url_exists(url): raise return True + def is_rhel8_alpha(parser): result = False try: - result = (parser.get('compose', 'label') == 'Alpha-1.2' and - parser.get('product', 'short') == 'RHEL' and - parser.get('product', 'version') == '8.0' and - # If the partner has made adjustments to the composeinfo - # files so that the compose looks like a unified compose, - # don't execute the extra code we would normally do for RHEL8 - # Alpha on partner servers. Instead assume that the code - # which can import RHEL7 will do. This is the best guess at - # the moment, since there is nothing really explicit which - # distinguishes the ordinary partner sync from a non-adjusted - # composeinfo. - not parser.has_option('variant-BaseOS', 'variants')) - except ConfigParser.Error: + result = ( + parser.get("compose", "label") == "Alpha-1.2" + and parser.get("product", "short") == "RHEL" + and parser.get("product", "version") == "8.0" + and + # If the partner has made adjustments to the composeinfo + # files so that the compose looks like a unified compose, + # don't execute the extra code we would normally do for RHEL8 + # Alpha on partner servers. Instead assume that the code + # which can import RHEL7 will do. This is the best guess at + # the moment, since there is nothing really explicit which + # distinguishes the ordinary partner sync from a non-adjusted + # composeinfo. + not parser.has_option("variant-BaseOS", "variants") + ) + except configparser.Error: pass return result + class IncompleteTree(BX): """ IncompleteTree is raised when there is a discrepancy between what is specified in a .composeinfo/.treeinfo, and what is actually found on disk. """ + pass class _DummyProxy: - - """A class that enables RPCs to be accessed as attributes ala xmlrpclib.ServerProxy - Inspired/ripped from xmlrpclib.ServerProxy + """A class that enables RPCs to be accessed as attributes ala xmlrpc_client.ServerProxy + Inspired/ripped from xmlrpc_client.ServerProxy """ def __init__(self, name): @@ -80,53 +82,59 @@ def __getattr__(self, name): return _DummyProxy("%s.%s" % (self.__name, name)) def __call__(self, *args): - logging.debug('Dummy call to: %s, args: %s' % (self.__name, args)) + logging.debug("Dummy call to: %s, args: %s" % (self.__name, args)) return True class SchedulerProxy(object): """Scheduler Proxy""" + def __init__(self, options): self.add_distro_cmd = options.add_distro_cmd # addDistroCmd = '/var/lib/beaker/addDistro.sh' if options.dry_run: - class _Dummy(object): - def __getattr__(self, name): return _DummyProxy(name) - self.proxy = _Dummy() else: - self.proxy = xmlrpclib.ServerProxy(options.lab_controller, - allow_none=True) + self.proxy = xmlrpc_client.ServerProxy( + options.lab_controller, allow_none=True + ) def add_distro(self, profile): return self.proxy.add_distro_tree(profile) - def run_distro_test_job(self, name=None, tags=[], osversion=None, - arches=[], variants=[]): + def run_distro_test_job( + self, name=None, tags=[], osversion=None, arches=[], variants=[] + ): if self.is_add_distro_cmd: - cmd = self._make_add_distro_cmd(name=name,tags=tags, - osversion=osversion, - arches=arches, variants=variants) + cmd = self._make_add_distro_cmd( + name=name, + tags=tags, + osversion=osversion, + arches=arches, + variants=variants, + ) logging.debug(cmd) os.system(cmd) else: - raise BX('%s is missing' % self.add_distro_cmd) + raise BX("%s is missing" % self.add_distro_cmd) - def _make_add_distro_cmd(self, name=None, tags=[], - osversion=None, arches=[], variants=[]): - #addDistro.sh "rel-eng" RHEL6.0-20090626.2 RedHatEnterpriseLinux6.0 x86_64,i386 "Server,Workstation,Client" + def _make_add_distro_cmd( + self, name=None, tags=[], osversion=None, arches=[], variants=[] + ): + # addDistro.sh "rel-eng" RHEL6.0-20090626.2 RedHatEnterpriseLinux6.0 x86_64,i386 "Server,Workstation,Client" cmd = '%s "%s" "%s" "%s" "%s" "%s"' % ( self.add_distro_cmd, - ','.join(tags), + ",".join(tags), name, osversion, - ','.join(arches), - ','.join(variants)) + ",".join(arches), + ",".join(variants), + ) return cmd @property @@ -141,32 +149,31 @@ class Parser(object): """ base class to use for processing .composeinfo and .treeinfo """ + url = None parser = None last_modified = 0.0 - infofile = None # overriden in subclasses + infofile = None # overriden in subclasses discinfo = None def parse(self, url): self.url = url try: - f = urllib2.urlopen('%s/%s' % (self.url, self.infofile)) - self.parser = ConfigParser.ConfigParser() + f = urllib.request.urlopen("%s/%s" % (self.url, self.infofile)) + self.parser = configparser.ConfigParser() self.parser.readfp(f) f.close() - except urllib2.URLError: + except urllib.error.URLError: return False - except ConfigParser.MissingSectionHeaderError, e: - raise BX('%s/%s is not parsable: %s' % (self.url, - self.infofile, - e)) + except configparser.MissingSectionHeaderError as e: + raise BX("%s/%s is not parsable: %s" % (self.url, self.infofile, e)) if self.discinfo: try: - f = urllib2.urlopen('%s/%s' % (self.url, self.discinfo)) + f = urllib.request.urlopen("%s/%s" % (self.url, self.discinfo)) self.last_modified = f.read().split("\n")[0] f.close() - except urllib2.URLError: + except urllib.error.URLError: pass return True @@ -174,7 +181,7 @@ def get(self, section, key, default=None): if self.parser: try: default = self.parser.get(section, key) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: + except (configparser.NoSectionError, configparser.NoOptionError): if default is None: raise return default @@ -192,25 +199,28 @@ def has_section_startswith(self, s): return False def __repr__(self): - return '%s/%s' % (self.url, self.infofile) + return "%s/%s" % (self.url, self.infofile) class Cparser(Parser): - infofile = '.composeinfo' + infofile = ".composeinfo" discinfo = None + class Tparser(Parser): - infofile = '.treeinfo' - discinfo = '.discinfo' + infofile = ".treeinfo" + discinfo = ".discinfo" + class TparserRhel5(Tparser): def get(self, section, key, default=None): value = super(TparserRhel5, self).get(section, key, default=default) # .treeinfo for RHEL5 incorrectly reports ppc when it should report ppc64 - if section == 'general' and key == 'arch' and value == 'ppc': - value = 'ppc64' + if section == "general" and key == "arch" and value == "ppc": + value = "ppc64" return value + class Importer(object): def __init__(self, parser): self.parser = parser @@ -222,21 +232,29 @@ def check_arches(self, arches, multiple=True, single=True): if not arches: return if len(arches) > 1 and not multiple: - raise BX('Multiple values for arch are incompatible with %s ' - 'importer' % self.__class__.__name__) + raise BX( + "Multiple values for arch are incompatible with %s " + "importer" % self.__class__.__name__ + ) if not single: - raise BX('Specific value for arch is incompatible with %s ' - 'importer' % self.__class__.__name__) + raise BX( + "Specific value for arch is incompatible with %s " + "importer" % self.__class__.__name__ + ) def check_variants(self, variants, multiple=True, single=True): if not variants: return if len(variants) > 1 and not multiple: - raise BX('Multiple values for variant are incompatible with %s ' - 'importer' % self.__class__.__name__) + raise BX( + "Multiple values for variant are incompatible with %s " + "importer" % self.__class__.__name__ + ) if not single: - raise BX('Specific value for variant is incompatible with %s ' - 'importer' % self.__class__.__name__) + raise BX( + "Specific value for variant is incompatible with %s " + "importer" % self.__class__.__name__ + ) class ComposeInfoMixin(object): @@ -246,10 +264,10 @@ def is_importer_for(cls, url, options=None): if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False return parser @@ -260,17 +278,18 @@ def run_jobs(self): arches = [] variants = [] for distro_tree in self.distro_trees: - arches.append(distro_tree['arch']) - variants.append(distro_tree['variant']) - name = distro_tree['name'] - tags = distro_tree.get('tags', []) - osversion = '%s.%s' % (distro_tree['osmajor'], - distro_tree['osminor']) - self.scheduler.run_distro_test_job(name=name, - tags=tags, - osversion=osversion, - arches=list(set(arches)), - variants=list(set(variants))) + arches.append(distro_tree["arch"]) + variants.append(distro_tree["variant"]) + name = distro_tree["name"] + tags = distro_tree.get("tags", []) + osversion = "%s.%s" % (distro_tree["osmajor"], distro_tree["osminor"]) + self.scheduler.run_distro_test_job( + name=name, + tags=tags, + osversion=osversion, + arches=list(set(arches)), + variants=list(set(variants)), + ) class ComposeInfoLegacy(ComposeInfoMixin, Importer): @@ -279,33 +298,39 @@ class ComposeInfoLegacy(ComposeInfoMixin, Importer): arches = i386,x86_64,ia64,ppc64,s390,s390x name = RHEL4-U8 """ - required = [dict(section='tree', key='name'), - ] - excluded = [dict(section='product', key='variants'), - ] - arches = ['i386', 'x86_64', 'ia64', 'ppc', 'ppc64', 's390', 's390x'] - os_dirs = ['os', 'tree'] + + required = [ + dict(section="tree", key="name"), + ] + excluded = [ + dict(section="product", key="variants"), + ] + arches = ["i386", "x86_64", "ia64", "ppc", "ppc64", "s390", "s390x"] + os_dirs = ["os", "tree"] def get_arches(self): - """ Return a list of arches - """ + """Return a list of arches""" specific_arches = self.options.arch if specific_arches: - return filter(lambda x: url_exists(os.path.join(self.parser.url,x)) \ - and x, [arch for arch in specific_arches]) + return filter( + lambda x: url_exists(os.path.join(self.parser.url, x)) and x, + [arch for arch in specific_arches], + ) else: - return filter(lambda x: url_exists(os.path.join(self.parser.url,x)) \ - and x, [arch for arch in self.arches]) + return filter( + lambda x: url_exists(os.path.join(self.parser.url, x)) and x, + [arch for arch in self.arches], + ) def get_os_dir(self, arch): - """ Return path to os directory - """ + """Return path to os directory""" base_path = os.path.join(self.parser.url, arch) try: - os_dir = filter(lambda x: url_exists(os.path.join(base_path, x)) \ - and x, self.os_dirs)[0] - except IndexError, e: - raise BX('%s no os_dir found: %s' % (base_path, e)) + os_dir = filter( + lambda x: url_exists(os.path.join(base_path, x)) and x, self.os_dirs + )[0] + except IndexError as e: + raise BX("%s no os_dir found: %s" % (base_path, e)) return os.path.join(arch, os_dir) def check_input(self, options): @@ -323,412 +348,464 @@ def process(self, urls, options): full_os_dir = os.path.join(self.parser.url, os_dir) options = copy.deepcopy(self.options) if not options.name: - options.name = self.parser.get('tree', 'name') + options.name = self.parser.get("tree", "name") urls_arch = [os.path.join(url, os_dir) for url in urls] # find our repos, but relative from os_dir - #repos = self.find_repos(full_os_dir, arch) + # repos = self.find_repos(full_os_dir, arch) build = Build(full_os_dir) build.process(urls_arch, options) self.distro_trees.append(build.tree) - except BX, err: + except BX as err: if not options.ignore_missing: exit_status = 1 - logging.warn(err) + logging.warning(err) return exit_status + class ComposeInfo(ComposeInfoMixin, Importer): """ -[product] -family = RHEL -name = Red Hat Enterprise Linux -variants = Client,ComputeNode,Server,Workstation -version = 7.0 - -[variant-Client] -arches = x86_64 -id = Client -name = Client -type = variant -uid = Client -variants = Client-optional - -[variant-Client-optional] -arches = x86_64 -id = optional -name = optional -parent = Client -type = optional -uid = Client-optional -variants = - -[variant-Client-optional.x86_64] -arch = x86_64 -debuginfo = Client-optional/x86_64/debuginfo -os_dir = Client-optional/x86_64/os -packages = Client-optional/x86_64/os/Packages -parent = Client.x86_64 -repository = Client-optional/x86_64/os -sources = Client-optional/source/SRPMS - -[variant-Client.x86_64] -arch = x86_64 -debuginfo = Client/x86_64/debuginfo -isos = Client/x86_64/iso -os_dir = Client/x86_64/os -packages = Client/x86_64/os/Packages -repository = Client/x86_64/os -source_isos = Client/source/iso -sources = Client/source/SRPMS - -[variant-ComputeNode] -arches = x86_64 -id = ComputeNode -name = Compute Node -type = variant -uid = ComputeNode -variants = ComputeNode-optional - -[variant-ComputeNode-optional] -arches = x86_64 -id = optional -name = optional -parent = ComputeNode -type = optional -uid = ComputeNode-optional -variants = - -[variant-ComputeNode-optional.x86_64] -arch = x86_64 -debuginfo = ComputeNode-optional/x86_64/debuginfo -os_dir = ComputeNode-optional/x86_64/os -packages = ComputeNode-optional/x86_64/os/Packages -parent = ComputeNode.x86_64 -repository = ComputeNode-optional/x86_64/os -sources = ComputeNode-optional/source/SRPMS - -[variant-ComputeNode.x86_64] -arch = x86_64 -debuginfo = ComputeNode/x86_64/debuginfo -isos = ComputeNode/x86_64/iso -os_dir = ComputeNode/x86_64/os -packages = ComputeNode/x86_64/os/Packages -repository = ComputeNode/x86_64/os -source_isos = ComputeNode/source/iso -sources = ComputeNode/source/SRPMS - -[variant-Server] -arches = ppc64,s390x,x86_64 -id = Server -name = Server -type = variant -uid = Server -variants = Server-HighAvailability,Server-LoadBalancer,Server-ResilientStorage,Server-ScalableFileSystem,Server-optional - -[variant-Server-HighAvailability] -arches = x86_64 -id = HighAvailability -name = High Availability -parent = Server -type = addon -uid = Server-HighAvailability -variants = - -[variant-Server-HighAvailability.x86_64] -arch = x86_64 -debuginfo = Server/x86_64/debuginfo -os_dir = Server/x86_64/os -packages = Server/x86_64/os/addons/HighAvailability -parent = Server.x86_64 -repository = Server/x86_64/os/addons/HighAvailability -sources = Server/source/SRPMS - -[variant-Server-LoadBalancer] -arches = x86_64 -id = LoadBalancer -name = Load Balancer -parent = Server -type = addon -uid = Server-LoadBalancer -variants = - -[variant-Server-LoadBalancer.x86_64] -arch = x86_64 -debuginfo = Server/x86_64/debuginfo -os_dir = Server/x86_64/os -packages = Server/x86_64/os/addons/LoadBalancer -parent = Server.x86_64 -repository = Server/x86_64/os/addons/LoadBalancer -sources = Server/source/SRPMS - -[variant-Server-ResilientStorage] -arches = x86_64 -id = ResilientStorage -name = Resilient Storage -parent = Server -type = addon -uid = Server-ResilientStorage -variants = - -[variant-Server-ResilientStorage.x86_64] -arch = x86_64 -debuginfo = Server/x86_64/debuginfo -os_dir = Server/x86_64/os -packages = Server/x86_64/os/addons/ResilientStorage -parent = Server.x86_64 -repository = Server/x86_64/os/addons/ResilientStorage -sources = Server/source/SRPMS - -[variant-Server-ScalableFileSystem] -arches = x86_64 -id = ScalableFileSystem -name = Scalable Filesystem Support -parent = Server -type = addon -uid = Server-ScalableFileSystem -variants = - -[variant-Server-ScalableFileSystem.x86_64] -arch = x86_64 -debuginfo = Server/x86_64/debuginfo -os_dir = Server/x86_64/os -packages = Server/x86_64/os/addons/ScalableFileSystem -parent = Server.x86_64 -repository = Server/x86_64/os/addons/ScalableFileSystem -sources = Server/source/SRPMS - -[variant-Server-optional] -arches = ppc64,s390x,x86_64 -id = optional -name = optional -parent = Server -type = optional -uid = Server-optional -variants = - -[variant-Server-optional.ppc64] -arch = ppc64 -debuginfo = Server-optional/ppc64/debuginfo -os_dir = Server-optional/ppc64/os -packages = Server-optional/ppc64/os/Packages -parent = Server.ppc64 -repository = Server-optional/ppc64/os -sources = Server-optional/source/SRPMS - -[variant-Server-optional.s390x] -arch = s390x -debuginfo = Server-optional/s390x/debuginfo -os_dir = Server-optional/s390x/os -packages = Server-optional/s390x/os/Packages -parent = Server.s390x -repository = Server-optional/s390x/os -sources = Server-optional/source/SRPMS - -[variant-Server-optional.x86_64] -arch = x86_64 -debuginfo = Server-optional/x86_64/debuginfo -os_dir = Server-optional/x86_64/os -packages = Server-optional/x86_64/os/Packages -parent = Server.x86_64 -repository = Server-optional/x86_64/os -sources = Server-optional/source/SRPMS - -[variant-Server.ppc64] -arch = ppc64 -debuginfo = Server/ppc64/debuginfo -isos = Server/ppc64/iso -os_dir = Server/ppc64/os -packages = Server/ppc64/os/Packages -repository = Server/ppc64/os -source_isos = Server/source/iso -sources = Server/source/SRPMS - -[variant-Server.s390x] -arch = s390x -debuginfo = Server/s390x/debuginfo -isos = Server/s390x/iso -os_dir = Server/s390x/os -packages = Server/s390x/os/Packages -repository = Server/s390x/os -source_isos = Server/source/iso -sources = Server/source/SRPMS - -[variant-Server.x86_64] -arch = x86_64 -debuginfo = Server/x86_64/debuginfo -isos = Server/x86_64/iso -os_dir = Server/x86_64/os -packages = Server/x86_64/os/Packages -repository = Server/x86_64/os -source_isos = Server/source/iso -sources = Server/source/SRPMS - -[variant-Workstation] -arches = x86_64 -id = Workstation -name = Workstation -type = variant -uid = Workstation -variants = Workstation-ScalableFileSystem,Workstation-optional - -[variant-Workstation-ScalableFileSystem] -arches = x86_64 -id = ScalableFileSystem -name = Scalable Filesystem Support -parent = Workstation -type = addon -uid = Workstation-ScalableFileSystem -variants = - -[variant-Workstation-ScalableFileSystem.x86_64] -arch = x86_64 -debuginfo = Workstation/x86_64/debuginfo -os_dir = Workstation/x86_64/os -packages = Workstation/x86_64/os/addons/ScalableFileSystem -parent = Workstation.x86_64 -repository = Workstation/x86_64/os/addons/ScalableFileSystem -sources = Workstation/source/SRPMS - -[variant-Workstation-optional] -arches = x86_64 -id = optional -name = optional -parent = Workstation -type = optional -uid = Workstation-optional -variants = - -[variant-Workstation-optional.x86_64] -arch = x86_64 -debuginfo = Workstation-optional/x86_64/debuginfo -os_dir = Workstation-optional/x86_64/os -packages = Workstation-optional/x86_64/os/Packages -parent = Workstation.x86_64 -repository = Workstation-optional/x86_64/os -sources = Workstation-optional/source/SRPMS - -[variant-Workstation.x86_64] -arch = x86_64 -debuginfo = Workstation/x86_64/debuginfo -isos = Workstation/x86_64/iso -os_dir = Workstation/x86_64/os -packages = Workstation/x86_64/os/Packages -repository = Workstation/x86_64/os -source_isos = Workstation/source/iso -sources = Workstation/source/SRPMS + [product] + family = RHEL + name = Red Hat Enterprise Linux + variants = Client,ComputeNode,Server,Workstation + version = 7.0 + + [variant-Client] + arches = x86_64 + id = Client + name = Client + type = variant + uid = Client + variants = Client-optional + + [variant-Client-optional] + arches = x86_64 + id = optional + name = optional + parent = Client + type = optional + uid = Client-optional + variants = + + [variant-Client-optional.x86_64] + arch = x86_64 + debuginfo = Client-optional/x86_64/debuginfo + os_dir = Client-optional/x86_64/os + packages = Client-optional/x86_64/os/Packages + parent = Client.x86_64 + repository = Client-optional/x86_64/os + sources = Client-optional/source/SRPMS + + [variant-Client.x86_64] + arch = x86_64 + debuginfo = Client/x86_64/debuginfo + isos = Client/x86_64/iso + os_dir = Client/x86_64/os + packages = Client/x86_64/os/Packages + repository = Client/x86_64/os + source_isos = Client/source/iso + sources = Client/source/SRPMS + + [variant-ComputeNode] + arches = x86_64 + id = ComputeNode + name = Compute Node + type = variant + uid = ComputeNode + variants = ComputeNode-optional + + [variant-ComputeNode-optional] + arches = x86_64 + id = optional + name = optional + parent = ComputeNode + type = optional + uid = ComputeNode-optional + variants = + + [variant-ComputeNode-optional.x86_64] + arch = x86_64 + debuginfo = ComputeNode-optional/x86_64/debuginfo + os_dir = ComputeNode-optional/x86_64/os + packages = ComputeNode-optional/x86_64/os/Packages + parent = ComputeNode.x86_64 + repository = ComputeNode-optional/x86_64/os + sources = ComputeNode-optional/source/SRPMS + + [variant-ComputeNode.x86_64] + arch = x86_64 + debuginfo = ComputeNode/x86_64/debuginfo + isos = ComputeNode/x86_64/iso + os_dir = ComputeNode/x86_64/os + packages = ComputeNode/x86_64/os/Packages + repository = ComputeNode/x86_64/os + source_isos = ComputeNode/source/iso + sources = ComputeNode/source/SRPMS + + [variant-Server] + arches = ppc64,s390x,x86_64 + id = Server + name = Server + type = variant + uid = Server + variants = Server-HighAvailability,Server-LoadBalancer,Server-ResilientStorage,Server-ScalableFileSystem,Server-optional + + [variant-Server-HighAvailability] + arches = x86_64 + id = HighAvailability + name = High Availability + parent = Server + type = addon + uid = Server-HighAvailability + variants = + + [variant-Server-HighAvailability.x86_64] + arch = x86_64 + debuginfo = Server/x86_64/debuginfo + os_dir = Server/x86_64/os + packages = Server/x86_64/os/addons/HighAvailability + parent = Server.x86_64 + repository = Server/x86_64/os/addons/HighAvailability + sources = Server/source/SRPMS + + [variant-Server-LoadBalancer] + arches = x86_64 + id = LoadBalancer + name = Load Balancer + parent = Server + type = addon + uid = Server-LoadBalancer + variants = + + [variant-Server-LoadBalancer.x86_64] + arch = x86_64 + debuginfo = Server/x86_64/debuginfo + os_dir = Server/x86_64/os + packages = Server/x86_64/os/addons/LoadBalancer + parent = Server.x86_64 + repository = Server/x86_64/os/addons/LoadBalancer + sources = Server/source/SRPMS + + [variant-Server-ResilientStorage] + arches = x86_64 + id = ResilientStorage + name = Resilient Storage + parent = Server + type = addon + uid = Server-ResilientStorage + variants = + + [variant-Server-ResilientStorage.x86_64] + arch = x86_64 + debuginfo = Server/x86_64/debuginfo + os_dir = Server/x86_64/os + packages = Server/x86_64/os/addons/ResilientStorage + parent = Server.x86_64 + repository = Server/x86_64/os/addons/ResilientStorage + sources = Server/source/SRPMS + + [variant-Server-ScalableFileSystem] + arches = x86_64 + id = ScalableFileSystem + name = Scalable Filesystem Support + parent = Server + type = addon + uid = Server-ScalableFileSystem + variants = + + [variant-Server-ScalableFileSystem.x86_64] + arch = x86_64 + debuginfo = Server/x86_64/debuginfo + os_dir = Server/x86_64/os + packages = Server/x86_64/os/addons/ScalableFileSystem + parent = Server.x86_64 + repository = Server/x86_64/os/addons/ScalableFileSystem + sources = Server/source/SRPMS + + [variant-Server-optional] + arches = ppc64,s390x,x86_64 + id = optional + name = optional + parent = Server + type = optional + uid = Server-optional + variants = + + [variant-Server-optional.ppc64] + arch = ppc64 + debuginfo = Server-optional/ppc64/debuginfo + os_dir = Server-optional/ppc64/os + packages = Server-optional/ppc64/os/Packages + parent = Server.ppc64 + repository = Server-optional/ppc64/os + sources = Server-optional/source/SRPMS + + [variant-Server-optional.s390x] + arch = s390x + debuginfo = Server-optional/s390x/debuginfo + os_dir = Server-optional/s390x/os + packages = Server-optional/s390x/os/Packages + parent = Server.s390x + repository = Server-optional/s390x/os + sources = Server-optional/source/SRPMS + + [variant-Server-optional.x86_64] + arch = x86_64 + debuginfo = Server-optional/x86_64/debuginfo + os_dir = Server-optional/x86_64/os + packages = Server-optional/x86_64/os/Packages + parent = Server.x86_64 + repository = Server-optional/x86_64/os + sources = Server-optional/source/SRPMS + + [variant-Server.ppc64] + arch = ppc64 + debuginfo = Server/ppc64/debuginfo + isos = Server/ppc64/iso + os_dir = Server/ppc64/os + packages = Server/ppc64/os/Packages + repository = Server/ppc64/os + source_isos = Server/source/iso + sources = Server/source/SRPMS + + [variant-Server.s390x] + arch = s390x + debuginfo = Server/s390x/debuginfo + isos = Server/s390x/iso + os_dir = Server/s390x/os + packages = Server/s390x/os/Packages + repository = Server/s390x/os + source_isos = Server/source/iso + sources = Server/source/SRPMS + + [variant-Server.x86_64] + arch = x86_64 + debuginfo = Server/x86_64/debuginfo + isos = Server/x86_64/iso + os_dir = Server/x86_64/os + packages = Server/x86_64/os/Packages + repository = Server/x86_64/os + source_isos = Server/source/iso + sources = Server/source/SRPMS + + [variant-Workstation] + arches = x86_64 + id = Workstation + name = Workstation + type = variant + uid = Workstation + variants = Workstation-ScalableFileSystem,Workstation-optional + + [variant-Workstation-ScalableFileSystem] + arches = x86_64 + id = ScalableFileSystem + name = Scalable Filesystem Support + parent = Workstation + type = addon + uid = Workstation-ScalableFileSystem + variants = + + [variant-Workstation-ScalableFileSystem.x86_64] + arch = x86_64 + debuginfo = Workstation/x86_64/debuginfo + os_dir = Workstation/x86_64/os + packages = Workstation/x86_64/os/addons/ScalableFileSystem + parent = Workstation.x86_64 + repository = Workstation/x86_64/os/addons/ScalableFileSystem + sources = Workstation/source/SRPMS + + [variant-Workstation-optional] + arches = x86_64 + id = optional + name = optional + parent = Workstation + type = optional + uid = Workstation-optional + variants = + + [variant-Workstation-optional.x86_64] + arch = x86_64 + debuginfo = Workstation-optional/x86_64/debuginfo + os_dir = Workstation-optional/x86_64/os + packages = Workstation-optional/x86_64/os/Packages + parent = Workstation.x86_64 + repository = Workstation-optional/x86_64/os + sources = Workstation-optional/source/SRPMS + + [variant-Workstation.x86_64] + arch = x86_64 + debuginfo = Workstation/x86_64/debuginfo + isos = Workstation/x86_64/iso + os_dir = Workstation/x86_64/os + packages = Workstation/x86_64/os/Packages + repository = Workstation/x86_64/os + source_isos = Workstation/source/iso + sources = Workstation/source/SRPMS """ - required = [dict(section='product', key='variants'), - ] + + required = [ + dict(section="product", key="variants"), + ] excluded = [] def get_arches(self, variant): - """ Return a list of arches for variant - """ - - all_arches = self.parser.get('variant-%s' % variant, 'arches'). \ - split(',') + """Return a list of arches for variant""" + + all_arches = self.parser.get("variant-%s" % variant, "arches").split(",") # Fedora 25+ .composeinfo includes src but it's not a real arch that can be installed - if 'src' in all_arches: - all_arches.remove('src') + if "src" in all_arches: + all_arches.remove("src") specific_arches = set(self.options.arch) if specific_arches: applicable_arches = specific_arches.intersection(set(all_arches)) return list(applicable_arches) else: return all_arches - + def get_variants(self): - """ Return a list of variants - """ + """Return a list of variants""" specific_variants = self.options.variant if specific_variants: return specific_variants - return self.parser.get('product', 'variants').split(',') + return self.parser.get("product", "variants").split(",") def find_repos(self, repo_base, rpath, variant, arch): - """ Find all variant repos - """ + """Find all variant repos""" repos = [] - variants = self.parser.get('variant-%s' % variant, 'variants', '') + variants = self.parser.get("variant-%s" % variant, "variants", "") if variants: - for sub_variant in variants.split(','): - repos.extend(self.find_repos(repo_base, rpath, sub_variant, - arch)) + for sub_variant in variants.split(","): + repos.extend(self.find_repos(repo_base, rpath, sub_variant, arch)) - # Skip addon variants from .composeinfo, we pick these up from + # Skip addon variants from .composeinfo, we pick these up from # .treeinfo - repotype = self.parser.get('variant-%s' % variant, 'type', '') - if repotype == 'addon': + repotype = self.parser.get("variant-%s" % variant, "type", "") + if repotype == "addon": return repos - repopath = self.parser.get('variant-%s.%s' % (variant, arch), - 'repository', '') + repopath = self.parser.get("variant-%s.%s" % (variant, arch), "repository", "") if repopath: - if url_exists(os.path.join(repo_base, rpath, repopath, 'repodata')): - repos.append(dict(repoid=variant, type=repotype, - path=os.path.join(rpath, repopath))) + if url_exists(os.path.join(repo_base, rpath, repopath, "repodata")): + repos.append( + dict( + repoid=variant, + type=repotype, + path=os.path.join(rpath, repopath), + ) + ) else: - logging.warn('%s repo found in .composeinfo but does not exist', variant) + logging.warning( + "%s repo found in .composeinfo but does not exist", variant + ) - debugrepopath = self.parser.get('variant-%s.%s' % (variant, arch), - 'debuginfo', '') + debugrepopath = self.parser.get( + "variant-%s.%s" % (variant, arch), "debuginfo", "" + ) if debugrepopath: - if url_exists(os.path.join(repo_base, rpath, debugrepopath, 'repodata')): - repos.append(dict(repoid='%s-debuginfo' % variant, type='debug', - path=os.path.join(rpath, debugrepopath))) + if url_exists(os.path.join(repo_base, rpath, debugrepopath, "repodata")): + repos.append( + dict( + repoid="%s-debuginfo" % variant, + type="debug", + path=os.path.join(rpath, debugrepopath), + ) + ) else: - logging.warn('%s-debuginfo repo found in .composeinfo but does not exist', variant) + logging.warning( + "%s-debuginfo repo found in .composeinfo but does not exist", + variant, + ) if is_rhel8_alpha(self.parser): appstream_repos = self._guess_appstream_repos(rpath, arch, repo_base) if not debugrepopath: appstream_repos.pop() for repo in appstream_repos: - url = os.path.join(repo_base, repo[2], 'repodata') + url = os.path.join(repo_base, repo[2], "repodata") if url_exists(url): repos.append(dict(repoid=repo[0], type=repo[1], path=repo[2])) else: - raise ValueError("Expected {0} compose at {1} but it doesn't exist".format(repo[0], url)) + raise ValueError( + "Expected {0} compose at {1} but it doesn't exist".format( + repo[0], url + ) + ) return repos def _guess_appstream_repos(self, rpath, arch, repo_base): """Iterate over possible layouts to guess which one fits and return a list of repositories.""" repo_layout = { - '8.0-AppStream-Alpha': [ - ('AppStream', - 'variant', - os.path.join(rpath, '..', '8.0-AppStream-Alpha', 'AppStream', arch, 'os') + "8.0-AppStream-Alpha": [ + ( + "AppStream", + "variant", + os.path.join( + rpath, "..", "8.0-AppStream-Alpha", "AppStream", arch, "os" + ), + ), + ( + "AppStream-debuginfo", + "debug", + os.path.join( + rpath, + "..", + "8.0-AppStream-Alpha", + "AppStream", + arch, + "debug", + "tree", + ), ), - ('AppStream-debuginfo', - 'debug', - os.path.join(rpath, '..', '8.0-AppStream-Alpha', 'AppStream', arch, 'debug', 'tree') - ) ], - 'AppStream-8.0-20180531.0': [ - ('AppStream', - 'variant', - os.path.join(rpath, '..', '..', 'AppStream-8.0-20180531.0', 'compose', 'AppStream', arch, 'os') + "AppStream-8.0-20180531.0": [ + ( + "AppStream", + "variant", + os.path.join( + rpath, + "..", + "..", + "AppStream-8.0-20180531.0", + "compose", + "AppStream", + arch, + "os", + ), ), - ('AppStream-debuginfo', - 'debug', - os.path.join(rpath, '..', '..', 'AppStream-8.0-20180531.0', 'compose', 'AppStream', arch, 'debug', 'tree') + ( + "AppStream-debuginfo", + "debug", + os.path.join( + rpath, + "..", + "..", + "AppStream-8.0-20180531.0", + "compose", + "AppStream", + arch, + "debug", + "tree", + ), ), - ] + ], } appstream_repos = [] for dirname, repos in repo_layout.items(): - url = os.path.join(repo_base, repos[0][2], 'repodata') - logging.debug('Trying to import %s', repos[0][2]) + url = os.path.join(repo_base, repos[0][2], "repodata") + logging.debug("Trying to import %s", repos[0][2]) if not url_exists(url): continue else: appstream_repos = repos if not appstream_repos: - raise ValueError("Could not determine repository layout to import AppStream repo") + raise ValueError( + "Could not determine repository layout to import AppStream repo" + ) return appstream_repos def process(self, urls, options): @@ -739,38 +816,47 @@ def process(self, urls, options): self.distro_trees = [] for variant in self.get_variants(): for arch in self.get_arches(variant): - os_dir = self.parser.get('variant-%s.%s' % - (variant, arch), 'os_dir') + os_dir = self.parser.get("variant-%s.%s" % (variant, arch), "os_dir") options = copy.deepcopy(self.options) if not options.name: - options.name = self.parser.get('product', 'name') + options.name = self.parser.get("product", "name") # our current path relative to the os_dir "../.." - rpath = os.path.join(*['..' for i in range(0, - len(os_dir.split('/')))]) + rpath = os.path.join(*[".." for i in range(0, len(os_dir.split("/")))]) # find our repos, but relative from os_dir - repos = self.find_repos(os.path.join(self.parser.url, os_dir), rpath, variant, arch) + repos = self.find_repos( + os.path.join(self.parser.url, os_dir), rpath, variant, arch + ) urls_variant_arch = [os.path.join(url, os_dir) for url in urls] try: options.variant = [variant] options.arch = [arch] build = Build(os.path.join(self.parser.url, os_dir)) - labels = self.parser.get('compose', 'label', '') - tags = [label.strip() for label in (labels and labels.split() or [])] + labels = self.parser.get("compose", "label", "") + tags = [ + label.strip() for label in (labels and labels.split() or []) + ] try: - isos_path = self.parser.get('variant-%s.%s' % (variant, arch), 'isos') + isos_path = self.parser.get( + "variant-%s.%s" % (variant, arch), "isos" + ) isos_path = os.path.join(rpath, isos_path) - except ConfigParser.NoOptionError: + except configparser.NoOptionError: isos_path = None - build.process(urls_variant_arch, options, repos=repos, - tags=tags, isos_path=isos_path) + build.process( + urls_variant_arch, + options, + repos=repos, + tags=tags, + isos_path=isos_path, + ) self.distro_trees.append(build.tree) - except BX, err: + except BX as err: if not options.ignore_missing: exit_status = 1 - logging.warn(err) + logging.warning(err) return exit_status @@ -778,30 +864,30 @@ class TreeInfoMixin(object): """ Base class for TreeInfo methods """ - required = [dict(section='general', key='family'), - dict(section='general', key='version'), - dict(section='general', key='arch'), - ] + + required = [ + dict(section="general", key="family"), + dict(section="general", key="version"), + dict(section="general", key="arch"), + ] excluded = [] # This is a best guess for the relative iso path # for RHEL5/6/7 and Fedora trees - isos_path = '../iso/' + isos_path = "../iso/" def check_input(self, options): self.check_variants(options.variant, single=False) self.check_arches(options.arch, single=False) - def get_os_dir(self): - """ Return path to os directory + """Return path to os directory This is just a sanity check, the parser's URL should be the os dir. """ try: - os_dir = filter(lambda x: url_exists(x) \ - and x, [self.parser.url])[0] - except IndexError, e: - raise BX('%s no os_dir found: %s' % (self.parser.url, e)) + os_dir = filter(lambda x: url_exists(x) and x, [self.parser.url])[0] + except IndexError as e: + raise BX("%s no os_dir found: %s" % (self.parser.url, e)) return os_dir def _installable_isos_url(self, nfs_url, isos_path_from_compose=None): @@ -816,38 +902,41 @@ def _installable_isos_url(self, nfs_url, isos_path_from_compose=None): # Let's just guess! These are based on # well known locations for each family isos_path = self.isos_path - http_url_components = list(urlparse.urlparse(self.parser.url)) + http_url_components = list(urllib.parse.urlparse(self.parser.url)) http_url_path = http_url_components[2] normalized_isos_path = os.path.normpath(os.path.join(http_url_path, isos_path)) - if not normalized_isos_path.endswith('/'): - normalized_isos_path += '/' + if not normalized_isos_path.endswith("/"): + normalized_isos_path += "/" http_url_components[2] = normalized_isos_path - http_isos_url = urlparse.urlunparse(http_url_components) + http_isos_url = urllib.parse.urlunparse(http_url_components) reachable_iso_dir = url_exists(http_isos_url) if isos_path_from_compose and not reachable_iso_dir: # If .composeinfo says the isos path is there but it isn't, we # should let it be known. - raise IncompleteTree('Could not find iso url %s as specified ' - 'in composeinfo' % http_isos_url) + raise IncompleteTree( + "Could not find iso url %s as specified " + "in composeinfo" % http_isos_url + ) elif not isos_path_from_compose and not reachable_iso_dir: # We can't find the isos path, but we were only ever guessing. return None elif reachable_iso_dir: # We've found the isos path via http, convert it back to # nfs+iso URL. - nfs_url_components = list(urlparse.urlparse(nfs_url)) + nfs_url_components = list(urllib.parse.urlparse(nfs_url)) nfs_url_path = nfs_url_components[2] - normalized_isos_path = os.path.normpath(os.path.join(nfs_url_path, - isos_path)) - if not normalized_isos_path.endswith('/'): - normalized_isos_path += '/' + normalized_isos_path = os.path.normpath( + os.path.join(nfs_url_path, isos_path) + ) + if not normalized_isos_path.endswith("/"): + normalized_isos_path += "/" nfs_isos_url_components = list(nfs_url_components) nfs_isos_url_components[2] = normalized_isos_path - nfs_isos_url_components[0] = 'nfs+iso' - return urlparse.urlunparse(nfs_isos_url_components) + nfs_isos_url_components[0] = "nfs+iso" + return urllib.parse.urlunparse(nfs_isos_url_components) def process(self, urls, options, repos=None, tags=None, isos_path=None): - ''' + """ distro_data = dict( name='RHEL-6-U1', arches=['i386', 'x86_64'], arch='x86_64', @@ -867,78 +956,89 @@ def process(self, urls, options, repos=None, tags=None, isos_path=None): dict(type='initrd', path='images/pxeboot/initrd.img'), ]) - ''' + """ if not repos: repos = [] self.options = options self.scheduler = SchedulerProxy(options) self.tree = dict() # Make sure all url's end with / - urls = [os.path.join(url,'') for url in urls] - self.tree['urls'] = urls - family = self.options.family or \ - self.parser.get('general', 'family').replace(" ","") - version = self.options.version or \ - self.parser.get('general', 'version').replace("-",".") - self.tree['name'] = self.options.name or \ - self.parser.get('general', 'name', - '%s-%s' % (family,version) - ) + urls = [os.path.join(url, "") for url in urls] + self.tree["urls"] = urls + family = self.options.family or self.parser.get("general", "family").replace( + " ", "" + ) + version = self.options.version or self.parser.get("general", "version").replace( + "-", "." + ) + self.tree["name"] = self.options.name or self.parser.get( + "general", "name", "%s-%s" % (family, version) + ) try: - self.tree['variant'] = self.options.variant[0] + self.tree["variant"] = self.options.variant[0] except IndexError: - self.tree['variant'] = self.parser.get('general','variant','') - self.tree['arch'] = self.parser.get('general', 'arch') - self.tree['tree_build_time'] = self.options.buildtime or \ - self.parser.get('general','timestamp', - self.parser.last_modified) - common_tags = tags or [] # passed in from .composeinfo - labels = self.parser.get('general', 'label','') - self.tree['tags'] = list(set(self.options.tags) | set(common_tags) | - set(map(string.strip, labels and labels.split(',') or []))) - self.tree['osmajor'] = "%s%s" % (family, version.split('.')[0]) - if version.find('.') != -1: - self.tree['osminor'] = version.split('.')[1] + self.tree["variant"] = self.parser.get("general", "variant", "") + self.tree["arch"] = self.parser.get("general", "arch") + self.tree["tree_build_time"] = self.options.buildtime or self.parser.get( + "general", "timestamp", self.parser.last_modified + ) + common_tags = tags or [] # passed in from .composeinfo + labels = self.parser.get("general", "label", "") + self.tree["tags"] = list( + set(self.options.tags) + | set(common_tags) + | set(map(lambda label: label.strip(), labels and labels.split(",") or [])) + ) + self.tree["osmajor"] = "%s%s" % (family, version.split(".")[0]) + if version.find(".") != -1: + self.tree["osminor"] = version.split(".")[1] else: - self.tree['osminor'] = '0' + self.tree["osminor"] = "0" - arches = self.parser.get('general', 'arches','') - self.tree['arches'] = map(string.strip, - arches and arches.split(',') or []) + arches = self.parser.get("general", "arches", "") + self.tree["arches"] = map( + lambda arch: arch.strip(), arches and arches.split(",") or [] + ) full_os_dir = self.get_os_dir() # These would have been passed from the Compose*.process() common_repos = repos if not common_repos: - common_repos = self.find_common_repos(full_os_dir, self.tree['arch']) - self.tree['repos'] = self.find_repos() + common_repos + common_repos = self.find_common_repos(full_os_dir, self.tree["arch"]) + self.tree["repos"] = self.find_repos() + common_repos # Add install images - self.tree['images'] = self.get_images() + self.tree["images"] = self.get_images() if not self.options.preserve_install_options: - self.tree['kernel_options'] = self.options.kopts - self.tree['kernel_options_post'] = self.options.kopts_post - self.tree['ks_meta'] = self.options.ks_meta - nfs_url = _get_url_by_scheme(urls, 'nfs') + self.tree["kernel_options"] = self.options.kopts + self.tree["kernel_options_post"] = self.options.kopts_post + self.tree["ks_meta"] = self.options.ks_meta + nfs_url = _get_url_by_scheme(urls, "nfs") if nfs_url: try: nfs_isos_url = self._installable_isos_url(nfs_url, isos_path) - except IncompleteTree, e: - logging.warn(str(e)) + except IncompleteTree as e: + logging.warning(str(e)) else: if nfs_isos_url: - self.tree['urls'].append(nfs_isos_url) + self.tree["urls"].append(nfs_isos_url) self.extend_tree() if options.json: - print json.dumps(self.tree) - logging.debug('\n%s' % pprint.pformat(self.tree)) + print(json.dumps(self.tree)) + logging.debug("\n%s" % pprint.pformat(self.tree)) try: self.add_to_beaker() - logging.info('%s %s %s added to beaker.' % (self.tree['name'], self.tree['variant'], self.tree['arch'])) - except (xmlrpclib.Fault, socket.error), e: - raise BX('failed to add %s %s %s to beaker: %s' % (self.tree['name'], self.tree['variant'], self.tree['arch'], e)) + logging.info( + "%s %s %s added to beaker." + % (self.tree["name"], self.tree["variant"], self.tree["arch"]) + ) + except (xmlrpc_client.Fault, socket.error) as e: + raise BX( + "failed to add %s %s %s to beaker: %s" + % (self.tree["name"], self.tree["variant"], self.tree["arch"], e) + ) def extend_tree(self): pass @@ -950,50 +1050,41 @@ def find_common_repos(self, repo_base, arch): ../../optional//debug/repodata ../debug/repodata """ - repo_paths = [('debuginfo', - 'debug', - '../debug'), - ('optional-debuginfo', - 'debug', - '../../optional/%s/debug' % arch), - ('optional', - 'optional', - '../../optional/%s/os' % arch), - ] + repo_paths = [ + ("debuginfo", "debug", "../debug"), + ("optional-debuginfo", "debug", "../../optional/%s/debug" % arch), + ("optional", "optional", "../../optional/%s/os" % arch), + ] repos = [] for repo in repo_paths: - if url_exists(os.path.join(repo_base, repo[2], 'repodata')): - repos.append(dict( - repoid=repo[0], - type=repo[1], - path=repo[2], - ) - ) + if url_exists(os.path.join(repo_base, repo[2], "repodata")): + repos.append( + dict( + repoid=repo[0], + type=repo[1], + path=repo[2], + ) + ) return repos def get_images(self): images = [] - images.append(dict(type='kernel', - path=self.get_kernel_path())) - images.append(dict(type='initrd', - path=self.get_initrd_path())) + images.append(dict(type="kernel", path=self.get_kernel_path())) + images.append(dict(type="initrd", path=self.get_initrd_path())) return images def add_to_beaker(self): self.scheduler.add_distro(self.tree) def run_jobs(self): - arches = [self.tree['arch']] - variants = [self.tree['variant']] - name = self.tree['name'] - tags = self.tree.get('tags', []) - osversion = '%s.%s' % (self.tree['osmajor'], - self.tree['osminor']) - self.scheduler.run_distro_test_job(name=name, - tags=tags, - osversion=osversion, - arches=arches, - variants=variants) + arches = [self.tree["arch"]] + variants = [self.tree["variant"]] + name = self.tree["name"] + tags = self.tree.get("tags", []) + osversion = "%s.%s" % (self.tree["osmajor"], self.tree["osminor"]) + self.scheduler.run_distro_test_job( + name=name, tags=tags, osversion=osversion, arches=arches, variants=variants + ) class TreeInfoLegacy(TreeInfoMixin, Importer): @@ -1001,20 +1092,23 @@ class TreeInfoLegacy(TreeInfoMixin, Importer): This version of .treeinfo importer has a workaround for missing images-$arch sections. """ - kernels = ['images/pxeboot/vmlinuz', - 'images/kernel.img', - 'ppc/ppc64/vmlinuz', - 'ppc/chrp/vmlinuz', - # We don't support iSeries right now 'ppc/iSeries/vmlinux', - ] - initrds = ['images/pxeboot/initrd.img', - 'images/initrd.img', - 'ppc/ppc64/ramdisk.image.gz', - 'ppc/chrp/ramdisk.image.gz', - # We don't support iSeries right now 'ppc/iSeries/ramdisk.image.gz', - ] - - isos_path = '../ftp-isos/' + + kernels = [ + "images/pxeboot/vmlinuz", + "images/kernel.img", + "ppc/ppc64/vmlinuz", + "ppc/chrp/vmlinuz", + # We don't support iSeries right now 'ppc/iSeries/vmlinux', + ] + initrds = [ + "images/pxeboot/initrd.img", + "images/initrd.img", + "ppc/ppc64/ramdisk.image.gz", + "ppc/chrp/ramdisk.image.gz", + # We don't support iSeries right now 'ppc/iSeries/ramdisk.image.gz', + ] + + isos_path = "../ftp-isos/" @classmethod def is_importer_for(cls, url, options=None): @@ -1022,31 +1116,37 @@ def is_importer_for(cls, url, options=None): if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if not (parser.get('general', 'family').startswith("Red Hat Enterprise Linux")\ - or parser.get('general', 'family').startswith("CentOS")): + if not ( + parser.get("general", "family").startswith("Red Hat Enterprise Linux") + or parser.get("general", "family").startswith("CentOS") + ): return False - if int(parser.get('general', 'version').split('.')[0]) > 4: + if int(parser.get("general", "version").split(".")[0]) > 4: return False return parser def get_kernel_path(self): try: - return filter(lambda x: url_exists(os.path.join(self.parser.url,x)) \ - and x, [kernel for kernel in self.kernels])[0] - except IndexError, e: - raise BX('%s no kernel found: %s' % (self.parser.url, e)) + return filter( + lambda x: url_exists(os.path.join(self.parser.url, x)) and x, + [kernel for kernel in self.kernels], + )[0] + except IndexError as e: + raise BX("%s no kernel found: %s" % (self.parser.url, e)) def get_initrd_path(self): try: - return filter(lambda x: url_exists(os.path.join(self.parser.url,x)) \ - and x, [initrd for initrd in self.initrds])[0] - except IndexError, e: - raise BX('%s no kernel found: %s' % (self.parser.url, e)) + return filter( + lambda x: url_exists(os.path.join(self.parser.url, x)) and x, + [initrd for initrd in self.initrds], + )[0] + except IndexError as e: + raise BX("%s no kernel found: %s" % (self.parser.url, e)) def find_repos(self, *args, **kw): """ @@ -1066,55 +1166,47 @@ def find_repos(self, *args, **kw): """ repos = [] # ppc64 arch uses ppc for the repos - arch = self.tree['arch'].replace('ppc64','ppc') - - repo_paths = [('%s-debuginfo' % self.tree['variant'], - 'debug', - '../debug'), - ('%s-debuginfo' % self.tree['variant'], - 'debug', - '../repo-debug-%s-%s' % (self.tree['variant'], - arch)), - ('%s-optional-debuginfo' % self.tree['variant'], - 'debug', - '../optional/%s/debug' % arch), - ('%s' % self.tree['variant'], - 'variant', - '../repo-%s-%s' % (self.tree['variant'], - arch)), - ('%s' % self.tree['variant'], - 'variant', - '.'), - ('%s-optional' % self.tree['variant'], - 'optional', - '../../optional/%s/os' % arch), - ('VT', - 'addon', - 'VT'), - ('Server', - 'addon', - 'Server'), - ('Cluster', - 'addon', - 'Cluster'), - ('ClusterStorage', - 'addon', - 'ClusterStorage'), - ('Client', - 'addon', - 'Client'), - ('Workstation', - 'addon', - 'Workstation'), - ] + arch = self.tree["arch"].replace("ppc64", "ppc") + + repo_paths = [ + ("%s-debuginfo" % self.tree["variant"], "debug", "../debug"), + ( + "%s-debuginfo" % self.tree["variant"], + "debug", + "../repo-debug-%s-%s" % (self.tree["variant"], arch), + ), + ( + "%s-optional-debuginfo" % self.tree["variant"], + "debug", + "../optional/%s/debug" % arch, + ), + ( + "%s" % self.tree["variant"], + "variant", + "../repo-%s-%s" % (self.tree["variant"], arch), + ), + ("%s" % self.tree["variant"], "variant", "."), + ( + "%s-optional" % self.tree["variant"], + "optional", + "../../optional/%s/os" % arch, + ), + ("VT", "addon", "VT"), + ("Server", "addon", "Server"), + ("Cluster", "addon", "Cluster"), + ("ClusterStorage", "addon", "ClusterStorage"), + ("Client", "addon", "Client"), + ("Workstation", "addon", "Workstation"), + ] for repo in repo_paths: - if url_exists(os.path.join(self.parser.url,repo[2],'repodata')): - repos.append(dict( - repoid=repo[0], - type=repo[1], - path=repo[2], - ) - ) + if url_exists(os.path.join(self.parser.url, repo[2], "repodata")): + repos.append( + dict( + repoid=repo[0], + type=repo[1], + path=repo[2], + ) + ) return repos @@ -1122,52 +1214,55 @@ class TreeInfoRhel5(TreeInfoMixin, Importer): # Used in RHEL5 and all CentOS releases from 5 onwards. # Has image locations but no repo info so we guess that. """ -[general] -family = Red Hat Enterprise Linux Server -timestamp = 1209596791.91 -totaldiscs = 1 -version = 5.2 -discnum = 1 -label = RELEASED -packagedir = Server -arch = ppc - -[images-ppc64] -kernel = ppc/ppc64/vmlinuz -initrd = ppc/ppc64/ramdisk.image.gz -zimage = images/netboot/ppc64.img - -[stage2] -instimage = images/minstg2.img -mainimage = images/stage2.img + [general] + family = Red Hat Enterprise Linux Server + timestamp = 1209596791.91 + totaldiscs = 1 + version = 5.2 + discnum = 1 + label = RELEASED + packagedir = Server + arch = ppc + + [images-ppc64] + kernel = ppc/ppc64/vmlinuz + initrd = ppc/ppc64/ramdisk.image.gz + zimage = images/netboot/ppc64.img + + [stage2] + instimage = images/minstg2.img + mainimage = images/stage2.img """ + @classmethod def is_importer_for(cls, url, options=None): parser = TparserRhel5() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if not parser.has_section_startswith('images-') or \ - parser.has_option('general', 'repository') or \ - parser.has_section_startswith('variant-') or \ - parser.has_section_startswith('addon-'): + if ( + not parser.has_section_startswith("images-") + or parser.has_option("general", "repository") + or parser.has_section_startswith("variant-") + or parser.has_section_startswith("addon-") + ): return False # Fedora has a special case below, see TreeInfoFedora - if 'Fedora' in parser.get('general', 'family'): + if "Fedora" in parser.get("general", "family"): return False return parser def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') + return self.parser.get("images-%s" % self.tree["arch"], "initrd") def find_repos(self): """ @@ -1186,310 +1281,305 @@ def find_repos(self): . """ # ppc64 arch uses ppc for the repos - arch = self.tree['arch'].replace('ppc64','ppc') - - repo_paths = [('VT', - 'addon', - 'VT'), - ('Server', - 'addon', - 'Server'), - ('Cluster', - 'addon', - 'Cluster'), - ('ClusterStorage', - 'addon', - 'ClusterStorage'), - ('Client', - 'addon', - 'Client'), - ('Workstation', - 'addon', - 'Workstation'), - ('distro', 'distro', '.'), - ] + arch = self.tree["arch"].replace("ppc64", "ppc") + + repo_paths = [ + ("VT", "addon", "VT"), + ("Server", "addon", "Server"), + ("Cluster", "addon", "Cluster"), + ("ClusterStorage", "addon", "ClusterStorage"), + ("Client", "addon", "Client"), + ("Workstation", "addon", "Workstation"), + ("distro", "distro", "."), + ] repos = [] for repo in repo_paths: - if url_exists(os.path.join(self.parser.url,repo[2],'repodata')): - repos.append(dict( - repoid=repo[0], - type=repo[1], - path=repo[2], - ) - ) + if url_exists(os.path.join(self.parser.url, repo[2], "repodata")): + repos.append( + dict( + repoid=repo[0], + type=repo[1], + path=repo[2], + ) + ) return repos class TreeInfoFedora(TreeInfoMixin, Importer): - # This is basically the same as TreeInfoRHEL5 except that it hardcodes + # This is basically the same as TreeInfoRHEL5 except that it hardcodes # 'Fedora' in the repoids. - """ + """ """ - """ @classmethod def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if not parser.get('general', 'family').startswith("Fedora"): + if not parser.get("general", "family").startswith("Fedora"): return False # Arm uses a different importer because of all the kernel types. - if parser.get('general', 'arch') in ['arm', 'armhfp']: + if parser.get("general", "arch") in ["arm", "armhfp"]: return False return parser def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') - + return self.parser.get("images-%s" % self.tree["arch"], "initrd") def find_common_repos(self, repo_base, arch): """ Fedora repos ../debug/repodata """ - repo_paths = [('Fedora-debuginfo', - 'debug', - '../debug'), - ] + repo_paths = [ + ("Fedora-debuginfo", "debug", "../debug"), + ] repos = [] for repo in repo_paths: - if url_exists(os.path.join(repo_base, repo[2], 'repodata')): - repos.append(dict( - repoid=repo[0], - type=repo[1], - path=repo[2], - ) - ) + if url_exists(os.path.join(repo_base, repo[2], "repodata")): + repos.append( + dict( + repoid=repo[0], + type=repo[1], + path=repo[2], + ) + ) return repos - def find_repos(self): """ using info from known locations """ repos = [] - repo_paths = [('Fedora', - 'variant', - '.'), - ('Fedora-Everything', - 'fedora', - '../../../Everything/%s/os' % self.tree['arch'])] + repo_paths = [ + ("Fedora", "variant", "."), + ( + "Fedora-Everything", + "fedora", + "../../../Everything/%s/os" % self.tree["arch"], + ), + ] for repo in repo_paths: - if url_exists(os.path.join(self.parser.url,repo[2],'repodata')): - repos.append(dict( - repoid=repo[0], - type=repo[1], - path=repo[2], - ) - ) + if url_exists(os.path.join(self.parser.url, repo[2], "repodata")): + repos.append( + dict( + repoid=repo[0], + type=repo[1], + path=repo[2], + ) + ) return repos + class TreeInfoFedoraArm(TreeInfoFedora, Importer): - """ + """ """ - """ @classmethod def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if not parser.get('general', 'family').startswith("Fedora"): + if not parser.get("general", "family").startswith("Fedora"): return False # Arm uses a different importer because of all the kernel types. - if parser.get('general', 'arch') not in ['arm', 'armhfp']: + if parser.get("general", "arch") not in ["arm", "armhfp"]: return False return parser def get_kernel_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'kernel') + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "kernel" + ) def get_initrd_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'initrd') + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "initrd" + ) def get_uimage_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'uimage', "") + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "uimage", "" + ) def get_uinitrd_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'uinitrd', "") + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "uinitrd", "" + ) def get_images(self): images = [] - images.append(dict(type='kernel', - path=self.get_kernel_path())) - images.append(dict(type='initrd', - path=self.get_initrd_path())) + images.append(dict(type="kernel", path=self.get_kernel_path())) + images.append(dict(type="initrd", path=self.get_initrd_path())) uimage = self.get_uimage_path() if uimage: - images.append(dict(type='uimage', - path=uimage)) + images.append(dict(type="uimage", path=uimage)) uinitrd = self.get_uinitrd_path() if uinitrd: - images.append(dict(type='uinitrd', - path=uinitrd)) - kernel_type_string = self.parser.get(self.tree['arch'], - 'platforms', '') - kernel_types = map(string.strip, - kernel_type_string and - kernel_type_string.split(',') or []) + images.append(dict(type="uinitrd", path=uinitrd)) + kernel_type_string = self.parser.get(self.tree["arch"], "platforms", "") + kernel_types = map( + lambda item: item.strip(), + kernel_type_string and kernel_type_string.split(",") or [], + ) for kernel_type in kernel_types: - images.append(dict(type='kernel', - kernel_type=kernel_type, - path=self.get_kernel_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='uimage', - kernel_type=kernel_type, - path=self.get_uimage_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='initrd', - kernel_type=kernel_type, - path=self.get_initrd_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='uinitrd', - kernel_type=kernel_type, - path=self.get_uinitrd_path( - kernel_type=kernel_type - ) - ) - ) + images.append( + dict( + type="kernel", + kernel_type=kernel_type, + path=self.get_kernel_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="uimage", + kernel_type=kernel_type, + path=self.get_uimage_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="initrd", + kernel_type=kernel_type, + path=self.get_initrd_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="uinitrd", + kernel_type=kernel_type, + path=self.get_uinitrd_path(kernel_type=kernel_type), + ) + ) return images + class TreeInfoRhel6(TreeInfoMixin, Importer): # Used in RHS2 and RHEL6. - # variant-* section has a repository key, and an addons key pointing at + # variant-* section has a repository key, and an addons key pointing at # addon-* sections. """ -[addon-ScalableFileSystem] -identity = ScalableFileSystem/ScalableFileSystem.cert -name = Scalable Filesystem Support -repository = ScalableFileSystem - -[addon-ResilientStorage] -identity = ResilientStorage/ResilientStorage.cert -name = Resilient Storage -repository = ResilientStorage - -[images-x86_64] -kernel = images/pxeboot/vmlinuz -initrd = images/pxeboot/initrd.img -boot.iso = images/boot.iso - -[general] -family = Red Hat Enterprise Linux -timestamp = 1328166952.001091 -variant = Server -totaldiscs = 1 -version = 6.3 -discnum = 1 -packagedir = Packages -variants = Server -arch = x86_64 - -[images-xen] -initrd = images/pxeboot/initrd.img -kernel = images/pxeboot/vmlinuz - -[variant-Server] -addons = ResilientStorage,HighAvailability,ScalableFileSystem,LoadBalancer -identity = Server/Server.cert -repository = Server/repodata - -[addon-HighAvailability] -identity = HighAvailability/HighAvailability.cert -name = High Availability -repository = HighAvailability - -[checksums] -images/pxeboot/initrd.img = sha256:4ffa63cd7780ec0715bd1c50b9eda177ecf28c58094ca519cfb6bb6aca5c225a -images/efiboot.img = sha256:d9ba2cc6fd3286ed7081ce0846e9df7093f5d524461580854b7ac42259c574b1 -images/boot.iso = sha256:5e10d6d4e6e22a62cae1475da1599a8dac91ff7c3783fda7684cf780e067604b -images/pxeboot/vmlinuz = sha256:7180f7f46682555cb1e86a9f1fbbfcc193ee0a52501de9a9002c34528c3ef9ab -images/install.img = sha256:85aaf9f90efa4f43475e4828168a3f7755ecc62f6643d92d23361957160dbc69 -images/efidisk.img = sha256:e9bf66f54f85527e595c4f3b5afe03cdcd0bf279b861c7a20898ce980e2ce4ff - -[stage2] -mainimage = images/install.img - -[addon-LoadBalancer] -identity = LoadBalancer/LoadBalancer.cert -name = Load Balancer -repository = LoadBalancer + [addon-ScalableFileSystem] + identity = ScalableFileSystem/ScalableFileSystem.cert + name = Scalable Filesystem Support + repository = ScalableFileSystem + + [addon-ResilientStorage] + identity = ResilientStorage/ResilientStorage.cert + name = Resilient Storage + repository = ResilientStorage + + [images-x86_64] + kernel = images/pxeboot/vmlinuz + initrd = images/pxeboot/initrd.img + boot.iso = images/boot.iso + + [general] + family = Red Hat Enterprise Linux + timestamp = 1328166952.001091 + variant = Server + totaldiscs = 1 + version = 6.3 + discnum = 1 + packagedir = Packages + variants = Server + arch = x86_64 + + [images-xen] + initrd = images/pxeboot/initrd.img + kernel = images/pxeboot/vmlinuz + + [variant-Server] + addons = ResilientStorage,HighAvailability,ScalableFileSystem,LoadBalancer + identity = Server/Server.cert + repository = Server/repodata + + [addon-HighAvailability] + identity = HighAvailability/HighAvailability.cert + name = High Availability + repository = HighAvailability + + [checksums] + images/pxeboot/initrd.img = sha256:4ffa63cd7780ec0715bd1c50b9eda177ecf28c58094ca519cfb6bb6aca5c225a + images/efiboot.img = sha256:d9ba2cc6fd3286ed7081ce0846e9df7093f5d524461580854b7ac42259c574b1 + images/boot.iso = sha256:5e10d6d4e6e22a62cae1475da1599a8dac91ff7c3783fda7684cf780e067604b + images/pxeboot/vmlinuz = sha256:7180f7f46682555cb1e86a9f1fbbfcc193ee0a52501de9a9002c34528c3ef9ab + images/install.img = sha256:85aaf9f90efa4f43475e4828168a3f7755ecc62f6643d92d23361957160dbc69 + images/efidisk.img = sha256:e9bf66f54f85527e595c4f3b5afe03cdcd0bf279b861c7a20898ce980e2ce4ff + + [stage2] + mainimage = images/install.img + + [addon-LoadBalancer] + identity = LoadBalancer/LoadBalancer.cert + name = Load Balancer + repository = LoadBalancer """ + @classmethod def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'kernel', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "kernel", "") == "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'initrd', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "initrd", "") == "": return False - if not (parser.has_section_startswith('images-') and - parser.has_section_startswith('variant-')): + if not ( + parser.has_section_startswith("images-") + and parser.has_section_startswith("variant-") + ): return False for section in parser.sections(): - if section.startswith('variant-') and \ - not parser.has_option(section, 'addons'): + if section.startswith("variant-") and not parser.has_option( + section, "addons" + ): return False return parser def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') + return self.parser.get("images-%s" % self.tree["arch"], "initrd") def find_repos(self): """ @@ -1498,33 +1588,39 @@ def find_repos(self): repos = [] try: - repopath = self.parser.get('variant-%s' % self.tree['variant'], - 'repository') + repopath = self.parser.get( + "variant-%s" % self.tree["variant"], "repository" + ) # remove the /repodata from the entry, this should not be there - repopath = repopath.replace('/repodata','') - repos.append(dict( - repoid=str(self.tree['variant']), - type='variant', - path=repopath, - ) - ) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: - logging.debug('.treeinfo has no repository for variant %s, %s' % (self.parser.url,e)) + repopath = repopath.replace("/repodata", "") + repos.append( + dict( + repoid=str(self.tree["variant"]), + type="variant", + path=repopath, + ) + ) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + logging.debug( + ".treeinfo has no repository for variant %s, %s" % (self.parser.url, e) + ) try: - addons = self.parser.get('variant-%s' % self.tree['variant'], - 'addons') - addons = addons and addons.split(',') or [] + addons = self.parser.get("variant-%s" % self.tree["variant"], "addons") + addons = addons and addons.split(",") or [] for addon in addons: - repopath = self.parser.get('addon-%s' % addon, 'repository', '') + repopath = self.parser.get("addon-%s" % addon, "repository", "") if repopath: - repos.append(dict( - repoid=addon, - type='addon', - path=repopath, - ) - ) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: - logging.debug('.treeinfo has no addon repos for %s, %s' % (self.parser.url,e)) + repos.append( + dict( + repoid=addon, + type="addon", + path=repopath, + ) + ) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + logging.debug( + ".treeinfo has no addon repos for %s, %s" % (self.parser.url, e) + ) return repos @@ -1534,7 +1630,7 @@ def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False - if parser.get('general', 'family') != "RHVH": + if parser.get("general", "family") != "RHVH": return False return parser @@ -1545,15 +1641,19 @@ def extend_tree(self): ks_meta = self.tree.get("ks_meta") or "" # RHVH assumes that installation is happening based on 'inst.ks' on kernel cmdline - ks_keyword = 'ks_keyword=inst.ks' - autopart_type = 'autopart_type=thinp liveimg={}'.format(img_rpm) + ks_keyword = "ks_keyword=inst.ks" + autopart_type = "autopart_type=thinp liveimg={}".format(img_rpm) self.tree["ks_meta"] = "{} {} {}".format(ks_meta, autopart_type, ks_keyword) def _find_image_update_rpm(self): base = dnf.Base() base.repos.add_new_repo(uuid.uuid4().hex, base.conf, baseurl=[self.parser.url]) base.fill_sack(load_system_repo=False) - pkgs = base.sack.query().filter(name="redhat-virtualization-host-image-update").run() + pkgs = ( + base.sack.query() + .filter(name="redhat-virtualization-host-image-update") + .run() + ) if not pkgs: raise IncompleteTree("Could not find a valid RHVH rpm") return pkgs[0].relativepath @@ -1562,15 +1662,15 @@ def find_repos(self): return [] def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') + return self.parser.get("images-%s" % self.tree["arch"], "initrd") class TreeInfoRhel7(TreeInfoMixin, Importer): # Used in RHEL7 GA. - # Main variant-* section has a repository and a variants key pointing at + # Main variant-* section has a repository and a variants key pointing at # addons (represented as additional variants). @classmethod @@ -1579,113 +1679,118 @@ def is_importer_for(cls, url, options=None): if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if parser.has_option('general', 'addons') or \ - not parser.has_section_startswith('variant-'): + if parser.has_option("general", "addons") or not parser.has_section_startswith( + "variant-" + ): return False return parser def find_repos(self): repos = [] try: - addons = self.parser.get('variant-%s' % self.tree['variant'], 'variants') - addons = addons.split(',') + addons = self.parser.get("variant-%s" % self.tree["variant"], "variants") + addons = addons.split(",") for addon in addons: - addon_section = 'variant-%s' % addon - addon_type = self.parser.get(addon_section, 'type', '') + addon_section = "variant-%s" % addon + addon_type = self.parser.get(addon_section, "type", "") # The type should be self-evident, but let's double check - if addon_type == 'addon': - repopath = self.parser.get(addon_section,'repository', '') + if addon_type == "addon": + repopath = self.parser.get(addon_section, "repository", "") if repopath: - repos.append(dict( - repoid=self.parser.get(addon_section,'id'), - type='addon', - path=repopath,) - ) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: - logging.debug('no addon repos for %s, %s' % (self.parser.url, e)) + repos.append( + dict( + repoid=self.parser.get(addon_section, "id"), + type="addon", + path=repopath, + ) + ) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + logging.debug("no addon repos for %s, %s" % (self.parser.url, e)) return repos def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') + return self.parser.get("images-%s" % self.tree["arch"], "initrd") class TreeInfoRhel(TreeInfoMixin, Importer): # Only used in RHEL7 prior to GA?!? - # No variant-* sections, general has repository key, and addons key + # No variant-* sections, general has repository key, and addons key # pointing at addon-* sections. """ -[addon-HighAvailability] -id = HighAvailability -name = High Availability -repository = addons/HighAvailability -uid = Server-HighAvailability - -[addon-LoadBalancer] -id = LoadBalancer -name = Load Balancer -repository = addons/LoadBalancer -uid = Server-LoadBalancer - -[addon-ResilientStorage] -id = ResilientStorage -name = Resilient Storage -repository = addons/ResilientStorage -uid = Server-ResilientStorage - -[addon-ScalableFileSystem] -id = ScalableFileSystem -name = Scalable Filesystem Support -repository = addons/ScalableFileSystem -uid = Server-ScalableFileSystem - -[general] -addons = HighAvailability,LoadBalancer,ResilientStorage,ScalableFileSystem -arch = x86_64 -family = Red Hat Enterprise Linux -version = 7.0 -variant = Server -timestamp = -name = RHEL-7.0-20120201.0 -repository = - -[images-x86_64] -boot.iso = images/boot.iso -initrd = images/pxeboot/initrd.img -kernel = images/pxeboot/vmlinuz - -[images-xen] -initrd = images/pxeboot/initrd.img -kernel = images/pxeboot/vmlinuz + [addon-HighAvailability] + id = HighAvailability + name = High Availability + repository = addons/HighAvailability + uid = Server-HighAvailability + + [addon-LoadBalancer] + id = LoadBalancer + name = Load Balancer + repository = addons/LoadBalancer + uid = Server-LoadBalancer + + [addon-ResilientStorage] + id = ResilientStorage + name = Resilient Storage + repository = addons/ResilientStorage + uid = Server-ResilientStorage + + [addon-ScalableFileSystem] + id = ScalableFileSystem + name = Scalable Filesystem Support + repository = addons/ScalableFileSystem + uid = Server-ScalableFileSystem + + [general] + addons = HighAvailability,LoadBalancer,ResilientStorage,ScalableFileSystem + arch = x86_64 + family = Red Hat Enterprise Linux + version = 7.0 + variant = Server + timestamp = + name = RHEL-7.0-20120201.0 + repository = + + [images-x86_64] + boot.iso = images/boot.iso + initrd = images/pxeboot/initrd.img + kernel = images/pxeboot/vmlinuz + + [images-xen] + initrd = images/pxeboot/initrd.img + kernel = images/pxeboot/vmlinuz """ + @classmethod def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'kernel', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "kernel", "") == "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'initrd', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "initrd", "") == "": return False - if not parser.has_option('general', 'repository') or \ - parser.has_section_startswith('variant-'): + if not parser.has_option( + "general", "repository" + ) or parser.has_section_startswith("variant-"): return False # Arm uses a different importer because of all the kernel types. - if parser.get('general', 'arch') in ['arm', 'armhfp']: + if parser.get("general", "arch") in ["arm", "armhfp"]: return False return parser @@ -1694,179 +1799,188 @@ def find_repos(self): using info from .treeinfo find addon repos """ repos = [] - repos.append(dict(repoid='distro', type='distro', - path=self.parser.get('general', 'repository'))) + repos.append( + dict( + repoid="distro", + type="distro", + path=self.parser.get("general", "repository"), + ) + ) try: - addons = self.parser.get('general', 'addons') - addons = addons and addons.split(',') or [] + addons = self.parser.get("general", "addons") + addons = addons and addons.split(",") or [] for addon in addons: - repopath = self.parser.get('addon-%s' % addon, 'repository', '') + repopath = self.parser.get("addon-%s" % addon, "repository", "") if repopath: - repos.append(dict( - repoid=addon, - type='addon', - path=repopath, - ) - ) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: - logging.debug('no addon repos for %s, %s' % (self.parser.url,e)) + repos.append( + dict( + repoid=addon, + type="addon", + path=repopath, + ) + ) + except (configparser.NoSectionError, configparser.NoOptionError) as e: + logging.debug("no addon repos for %s, %s" % (self.parser.url, e)) return repos def get_kernel_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'kernel') + return self.parser.get("images-%s" % self.tree["arch"], "kernel") def get_initrd_path(self): - return self.parser.get('images-%s' % self.tree['arch'],'initrd') + return self.parser.get("images-%s" % self.tree["arch"], "initrd") class TreeInfoRhelArm(TreeInfoRhel, Importer): """ -[addon-HighAvailability] -id = HighAvailability -name = High Availability -repository = addons/HighAvailability -uid = Server-HighAvailability - -[addon-LoadBalancer] -id = LoadBalancer -name = Load Balancer -repository = addons/LoadBalancer -uid = Server-LoadBalancer - -[addon-ResilientStorage] -id = ResilientStorage -name = Resilient Storage -repository = addons/ResilientStorage -uid = Server-ResilientStorage - -[addon-ScalableFileSystem] -id = ScalableFileSystem -name = Scalable Filesystem Support -repository = addons/ScalableFileSystem -uid = Server-ScalableFileSystem - -[general] -addons = HighAvailability,LoadBalancer,ResilientStorage,ScalableFileSystem -arch = x86_64 -family = Red Hat Enterprise Linux -version = 7.0 -variant = Server -timestamp = -name = RHEL-7.0-20120201.0 -repository = - -[images-x86_64] -boot.iso = images/boot.iso -initrd = images/pxeboot/initrd.img -kernel = images/pxeboot/vmlinuz - -[images-xen] -initrd = images/pxeboot/initrd.img -kernel = images/pxeboot/vmlinuz + [addon-HighAvailability] + id = HighAvailability + name = High Availability + repository = addons/HighAvailability + uid = Server-HighAvailability + + [addon-LoadBalancer] + id = LoadBalancer + name = Load Balancer + repository = addons/LoadBalancer + uid = Server-LoadBalancer + + [addon-ResilientStorage] + id = ResilientStorage + name = Resilient Storage + repository = addons/ResilientStorage + uid = Server-ResilientStorage + + [addon-ScalableFileSystem] + id = ScalableFileSystem + name = Scalable Filesystem Support + repository = addons/ScalableFileSystem + uid = Server-ScalableFileSystem + + [general] + addons = HighAvailability,LoadBalancer,ResilientStorage,ScalableFileSystem + arch = x86_64 + family = Red Hat Enterprise Linux + version = 7.0 + variant = Server + timestamp = + name = RHEL-7.0-20120201.0 + repository = + + [images-x86_64] + boot.iso = images/boot.iso + initrd = images/pxeboot/initrd.img + kernel = images/pxeboot/vmlinuz + + [images-xen] + initrd = images/pxeboot/initrd.img + kernel = images/pxeboot/vmlinuz """ + @classmethod def is_importer_for(cls, url, options=None): parser = Tparser() if not parser.parse(url): return False for r in cls.required: - if parser.get(r['section'], r['key'], '') == '': + if parser.get(r["section"], r["key"], "") == "": return False for e in cls.excluded: - if parser.get(e['section'], e['key'], '') != '': + if parser.get(e["section"], e["key"], "") != "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'kernel', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "kernel", "") == "": return False - if parser.get('images-%s' % parser.get('general','arch'), 'initrd', '') == '': + if parser.get("images-%s" % parser.get("general", "arch"), "initrd", "") == "": return False - if not parser.has_option('general', 'repository') or \ - parser.has_section_startswith('variant-'): + if not parser.has_option( + "general", "repository" + ) or parser.has_section_startswith("variant-"): return False # Arm uses a different importer because of all the kernel types. - if parser.get('general', 'arch') not in ['arm', 'armhfp']: + if parser.get("general", "arch") not in ["arm", "armhfp"]: return False return parser def get_kernel_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'kernel') + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "kernel" + ) def get_initrd_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'initrd') + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "initrd" + ) def get_uimage_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'uimage', "") + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "uimage", "" + ) def get_uinitrd_path(self, kernel_type=None): if kernel_type: - kernel_type = '%s-' % kernel_type + kernel_type = "%s-" % kernel_type else: - kernel_type = '' - return self.parser.get('images-%s%s' % (kernel_type, - self.tree['arch']),'uinitrd', "") + kernel_type = "" + return self.parser.get( + "images-%s%s" % (kernel_type, self.tree["arch"]), "uinitrd", "" + ) + def get_images(self): images = [] - images.append(dict(type='kernel', - path=self.get_kernel_path())) - images.append(dict(type='initrd', - path=self.get_initrd_path())) + images.append(dict(type="kernel", path=self.get_kernel_path())) + images.append(dict(type="initrd", path=self.get_initrd_path())) uimage = self.get_uimage_path() if uimage: - images.append(dict(type='uimage', - path=uimage)) + images.append(dict(type="uimage", path=uimage)) uinitrd = self.get_uinitrd_path() if uinitrd: - images.append(dict(type='uinitrd', - path=uinitrd)) - kernel_type_string = self.parser.get(self.tree['arch'], - 'platforms', '') - kernel_types = map(string.strip, - kernel_type_string and - kernel_type_string.split(',') or []) + images.append(dict(type="uinitrd", path=uinitrd)) + kernel_type_string = self.parser.get(self.tree["arch"], "platforms", "") + kernel_types = map( + lambda item: item.strip(), + kernel_type_string and kernel_type_string.split(",") or [], + ) for kernel_type in kernel_types: - images.append(dict(type='kernel', - kernel_type=kernel_type, - path=self.get_kernel_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='uimage', - kernel_type=kernel_type, - path=self.get_uimage_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='initrd', - kernel_type=kernel_type, - path=self.get_initrd_path( - kernel_type=kernel_type - ) - ) - ) - images.append(dict(type='uinitrd', - kernel_type=kernel_type, - path=self.get_uinitrd_path( - kernel_type=kernel_type - ) - ) - ) + images.append( + dict( + type="kernel", + kernel_type=kernel_type, + path=self.get_kernel_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="uimage", + kernel_type=kernel_type, + path=self.get_uimage_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="initrd", + kernel_type=kernel_type, + path=self.get_initrd_path(kernel_type=kernel_type), + ) + ) + images.append( + dict( + type="uinitrd", + kernel_type=kernel_type, + path=self.get_uinitrd_path(kernel_type=kernel_type), + ) + ) return images @@ -1897,67 +2011,71 @@ def process(self, urls, options, repos=[]): self.scheduler = SchedulerProxy(options) self.tree = dict() - urls = [os.path.join(url,'') for url in urls] - self.tree['urls'] = urls + urls = [os.path.join(url, "") for url in urls] + self.tree["urls"] = urls if not options.preserve_install_options: - self.tree['kernel_options'] = options.kopts - self.tree['kernel_options_post'] = options.kopts_post - self.tree['ks_meta'] = options.ks_meta - family = options.family + self.tree["kernel_options"] = options.kopts + self.tree["kernel_options_post"] = options.kopts_post + self.tree["ks_meta"] = options.ks_meta + family = options.family version = options.version - self.tree['name'] = options.name + self.tree["name"] = options.name try: - self.tree['variant'] = options.variant[0] + self.tree["variant"] = options.variant[0] except IndexError: - self.tree['variant'] = '' + self.tree["variant"] = "" try: - self.tree['arch'] = options.arch[0] + self.tree["arch"] = options.arch[0] except IndexError: - self.tree['arch'] = '' - self.tree['tree_build_time'] = options.buildtime or \ - time.time() - self.tree['tags'] = options.tags - self.tree['osmajor'] = "%s%s" % (family, version.split('.')[0]) - if version.find('.') != -1: - self.tree['osminor'] = version.split('.')[1] + self.tree["arch"] = "" + self.tree["tree_build_time"] = options.buildtime or time.time() + self.tree["tags"] = options.tags + self.tree["osmajor"] = "%s%s" % (family, version.split(".")[0]) + if version.find(".") != -1: + self.tree["osminor"] = version.split(".")[1] else: - self.tree['osminor'] = '0' + self.tree["osminor"] = "0" - self.tree['arches'] = options.arch - self.tree['repos'] = repos + self.tree["arches"] = options.arch + self.tree["repos"] = repos # Add install images - self.tree['images'] = [] - self.tree['images'].append(dict(type='kernel', - path=options.kernel)) - self.tree['images'].append(dict(type='initrd', - path=options.initrd)) + self.tree["images"] = [] + self.tree["images"].append(dict(type="kernel", path=options.kernel)) + self.tree["images"].append(dict(type="initrd", path=options.initrd)) if options.json: - print json.dumps(self.tree) - logging.debug('\n%s' % pprint.pformat(self.tree)) + print(json.dumps(self.tree)) + logging.debug("\n%s" % pprint.pformat(self.tree)) try: self.add_to_beaker() - logging.info('%s added to beaker.' % self.tree['name']) - except (xmlrpclib.Fault, socket.error), e: - raise BX('failed to add %s to beaker: %s' % (self.tree['name'],e)) + logging.info("%s added to beaker." % self.tree["name"]) + except (xmlrpc_client.Fault, socket.error) as e: + raise BX("failed to add %s to beaker: %s" % (self.tree["name"], e)) def add_to_beaker(self): self.scheduler.add_distro(self.tree) + def Build(url, options=None): # Try all other importers before trying NakedTree for cls in Importer.__subclasses__() + [NakedTree]: parser = cls.is_importer_for(url, options) - if parser != False: + if parser: logging.debug("\tImporter %s Matches", cls.__name__) logging.info("Attempting to import: %s", url) return cls(parser) else: logging.debug("\tImporter %s does not match", cls.__name__) - raise BX('No valid importer found for %s' % url) + raise BX("No valid importer found for %s" % url) + + +_primary_methods = [ + "http", + "https", + "ftp", +] -_primary_methods = ['http', 'https', 'ftp',] def _get_primary_url(urls): """Return primary method used to import distro @@ -1967,16 +2085,17 @@ def _get_primary_url(urls): nfs can't be the primary install method. """ for url in urls: - method = url.split(':',1)[0] + method = url.split(":", 1)[0] if method in _primary_methods: primary = url return primary return None + def _get_url_by_scheme(urls, scheme): """Return the first url that matches the given scheme""" for url in urls: - method = url.split(':',1)[0] + method = url.split(":", 1)[0] if method == scheme: return url return None @@ -1987,86 +2106,111 @@ def main(): description = """Imports distro(s) from the given distro_url. Valid distro_urls are nfs://, http:// and ftp://. A primary distro_url of either http:// or ftp:// must be specified. In order for an import to succeed a .treeinfo or a .composeinfo must be present at the distro_url or you can do what is called a "naked" import if you specify the following arguments: --family, --version, --name, --arch, --kernel, --initrd. Only one tree can be imported at a time when doing a naked import.""" parser = OptionParser(usage=usage, description=description) - parser.add_option("-j", "--json", - default=False, - action='store_true', - help="Prints the tree to be imported, in JSON format") - parser.add_option("-c", "--add-distro-cmd", - default="/var/lib/beaker/addDistro.sh", - help="Command to run to add a new distro") - parser.add_option("-n", "--name", - default=None, - help="Alternate name to use, otherwise we read it from .treeinfo") - parser.add_option("-t", "--tag", - default=[], - action="append", - dest="tags", - help="Additional tags to add") - parser.add_option("-r", "--run-jobs", - action='store_true', - default=False, - help="Run automated Jobs") - parser.add_option("-v", "--debug", - action='store_true', - default=False, - help="show debug messages") - parser.add_option('--dry-run', action='store_true', - help='Do not actually add any distros to beaker') - parser.add_option("-q", "--quiet", - action='store_true', - default=False, - help="less messages") - parser.add_option("--family", - default=None, - help="Specify family") - parser.add_option("--variant", - action='append', - default=[], - help="Specify variant. Multiple values are valid when importing a compose >=RHEL7") - parser.add_option("--version", - default=None, - help="Specify version") - parser.add_option("--kopts", - default=None, - help="add kernel options to use for install") - parser.add_option("--kopts-post", - default=None, - help="add kernel options to use for after install") - parser.add_option("--ks-meta", - default=None, - help="add variables to use in kickstart templates") - parser.add_option("--preserve-install-options", - action='store_true', - default=False, - help=("Do not overwrite the 'Install Options' (Kickstart " - "Metadata, Kernel Options, & Kernel Options Post) already " - "stored for the distro. This option can not be used with " - "any of --kopts, --kopts-post, or --ks-meta") - ) - parser.add_option("--buildtime", - default=None, - type=float, - help="Specify build time") - parser.add_option("--arch", - action='append', - default=[], - help="Specify arch. Multiple values are valid when importing a compose") - parser.add_option("--ignore-missing-tree-compose", - dest='ignore_missing', - action='store_true', - default=False, - help="If a specific tree within a compose is missing, do not print any errors") - group = OptionGroup(parser, "Naked Tree Options", - "These options only apply when importing without a .treeinfo or .composeinfo") - group.add_option("--kernel", - default=None, - help="Specify path to kernel (relative to distro_url)") - group.add_option("--initrd", - default=None, - help="Specify path to initrd (relative to distro_url)") - group.add_option("--lab-controller", - default="http://localhost:8000", - help="Specify which lab controller to import to. Defaults to http://localhost:8000") + parser.add_option( + "-j", + "--json", + default=False, + action="store_true", + help="Prints the tree to be imported, in JSON format", + ) + parser.add_option( + "-c", + "--add-distro-cmd", + default="/var/lib/beaker/addDistro.sh", + help="Command to run to add a new distro", + ) + parser.add_option( + "-n", + "--name", + default=None, + help="Alternate name to use, otherwise we read it from .treeinfo", + ) + parser.add_option( + "-t", + "--tag", + default=[], + action="append", + dest="tags", + help="Additional tags to add", + ) + parser.add_option( + "-r", + "--run-jobs", + action="store_true", + default=False, + help="Run automated Jobs", + ) + parser.add_option( + "-v", "--debug", action="store_true", default=False, help="show debug messages" + ) + parser.add_option( + "--dry-run", + action="store_true", + help="Do not actually add any distros to beaker", + ) + parser.add_option( + "-q", "--quiet", action="store_true", default=False, help="less messages" + ) + parser.add_option("--family", default=None, help="Specify family") + parser.add_option( + "--variant", + action="append", + default=[], + help="Specify variant. Multiple values are valid when importing a compose >=RHEL7", + ) + parser.add_option("--version", default=None, help="Specify version") + parser.add_option( + "--kopts", default=None, help="add kernel options to use for install" + ) + parser.add_option( + "--kopts-post", default=None, help="add kernel options to use for after install" + ) + parser.add_option( + "--ks-meta", default=None, help="add variables to use in kickstart templates" + ) + parser.add_option( + "--preserve-install-options", + action="store_true", + default=False, + help=( + "Do not overwrite the 'Install Options' (Kickstart " + "Metadata, Kernel Options, & Kernel Options Post) already " + "stored for the distro. This option can not be used with " + "any of --kopts, --kopts-post, or --ks-meta" + ), + ) + parser.add_option( + "--buildtime", default=None, type=float, help="Specify build time" + ) + parser.add_option( + "--arch", + action="append", + default=[], + help="Specify arch. Multiple values are valid when importing a compose", + ) + parser.add_option( + "--ignore-missing-tree-compose", + dest="ignore_missing", + action="store_true", + default=False, + help="If a specific tree within a compose is missing, do not print any errors", + ) + group = OptionGroup( + parser, + "Naked Tree Options", + "These options only apply when importing without a .treeinfo or .composeinfo", + ) + group.add_option( + "--kernel", default=None, help="Specify path to kernel (relative to distro_url)" + ) + group.add_option( + "--initrd", default=None, help="Specify path to initrd (relative to distro_url)" + ) + group.add_option( + "--lab-controller", + default="http://localhost:8000", + help="Specify which lab controller to import to. Defaults to http://localhost:8000", + ) parser.add_option_group(group) (opts, urls) = parser.parse_args() @@ -2082,41 +2226,47 @@ def main(): if opts.preserve_install_options: if any([opts.kopts, opts.kopts_post, opts.ks_meta]): - logging.critical("--preserve-install-options can not be used with any of: " - "--kopt, --kopts-post, or --ks-meta") + logging.critical( + "--preserve-install-options can not be used with any of: " + "--kopt, --kopts-post, or --ks-meta" + ) sys.exit(4) if not urls: - logging.critical('No location(s) specified!') + logging.critical("No location(s) specified!") sys.exit(1) primary_url = _get_primary_url(urls) - if primary_url == None: - logging.critical('missing a valid primary installer! %s, are valid install methods' % ' and '.join(_primary_methods)) + if primary_url is None: + logging.critical( + "missing a valid primary installer! %s, are valid install methods" + % " and ".join(_primary_methods) + ) sys.exit(2) if opts.dry_run: - logging.info('Dry Run only, no data will be sent to beaker') + logging.info("Dry Run only, no data will be sent to beaker") exit_status = [] try: build = Build(primary_url, options=opts) try: build.check_input(opts) exit_status.append(build.process(urls, opts)) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), e: + except (configparser.NoSectionError, configparser.NoOptionError) as e: logging.critical(str(e)) sys.exit(3) - except (xmlrpclib.Fault,BX), err: + except (xmlrpc_client.Fault, BX) as err: logging.critical(err) sys.exit(127) if opts.run_jobs: - logging.info('running jobs.') + logging.info("running jobs.") build.run_jobs() # if the list of exit_status-es contain any non-zero # value it means that at least one tree failed to import - # correctly, and hence set the exit status of the script + # correctly, and hence set the exit status of the script # accordingly return bool(any(exit_status)) -if __name__ == '__main__': + +if __name__ == "__main__": sys.exit(main()) diff --git a/LabController/src/bkr/labcontroller/expire_distros.py b/LabController/src/bkr/labcontroller/expire_distros.py index 182d802af..3253ca5ec 100644 --- a/LabController/src/bkr/labcontroller/expire_distros.py +++ b/LabController/src/bkr/labcontroller/expire_distros.py @@ -1,20 +1,19 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import sys, os -import xmlrpclib -import urllib2 -import urlparse +import os +import sys + +from six.moves import urllib, xmlrpc_client def check_http(url): try: - urllib2.urlopen(url, timeout=120) + urllib.request.urlopen(url, timeout=120) return True - except urllib2.HTTPError as e: + except urllib.error.HTTPError as e: if e.code in (404, 410): return False else: @@ -23,10 +22,10 @@ def check_http(url): def check_ftp(url): try: - urllib2.urlopen(url, timeout=120) + urllib.request.urlopen(url, timeout=120) return True - except urllib2.URLError as e: - if '550' in e.reason: + except urllib.error.URLError as e: + if "550" in e.reason: return False else: raise @@ -41,19 +40,20 @@ def check_nfs(tree): Make sure the tree is accessible, check that the server is up first. """ - _, nfs_server, nfs_path, _, _, _ = urlparse.urlparse(tree) + _, nfs_server, nfs_path, _, _, _ = urllib.parse.urlparse(tree) # Beaker uses a non-standard syntax for NFS URLs, inherited from Cobbler: # nfs://server:/path # so we need to strip a trailing colon from the hostname portion. - nfs_server = nfs_server.rstrip(':') - server_path = os.path.join('/net', nfs_server) - if nfs_path.startswith('/'): + nfs_server = nfs_server.rstrip(":") + server_path = os.path.join("/net", nfs_server) + if nfs_path.startswith("/"): nfs_path = nfs_path[1:] tree_path = os.path.join(server_path, nfs_path) if not os.path.exists(server_path): - raise NFSServerInaccessible('Cannot access NFS server %s ' - 'or autofs not running (%s does not exist)' - % (nfs_server, server_path)) + raise NFSServerInaccessible( + "Cannot access NFS server %s " + "or autofs not running (%s does not exist)" % (nfs_server, server_path) + ) if not os.path.exists(tree_path): return False return True @@ -64,43 +64,53 @@ def check_url(url): Returns True if the given URL exists. """ - scheme = urlparse.urlparse(url).scheme - if scheme == 'nfs' or scheme.startswith('nfs+'): + scheme = urllib.parse.urlparse(url).scheme + if scheme == "nfs" or scheme.startswith("nfs+"): return check_nfs(url) - elif scheme == 'http' or scheme == 'https': + elif scheme == "http" or scheme == "https": return check_http(url) - elif scheme == 'ftp': + elif scheme == "ftp": return check_ftp(url) else: - raise ValueError('Unrecognised URL scheme %s for tree %s' % (scheme, url)) + raise ValueError("Unrecognised URL scheme %s for tree %s" % (scheme, url)) -def check_all_trees(ignore_errors=False, - dry_run=False, - lab_controller='http://localhost:8000', - remove_all=False): - proxy = xmlrpclib.ServerProxy(lab_controller, allow_none=True) +def check_all_trees( + ignore_errors=False, + dry_run=False, + lab_controller="http://localhost:8000", + remove_all=False, +): + proxy = xmlrpc_client.ServerProxy(lab_controller, allow_none=True) rdistro_trees = [] distro_trees = proxy.get_distro_trees() if not remove_all: for distro_tree in distro_trees: accessible = False - for lc, url in distro_tree['available']: + for lc, url in distro_tree["available"]: try: if check_url(url): accessible = True else: - print('{0} is missing [Distro Tree ID {1}]'.format( - url, - distro_tree['distro_tree_id'])) - except (urllib2.URLError, urllib2.HTTPError, NFSServerInaccessible) as e: + print( + "{0} is missing [Distro Tree ID {1}]".format( + url, distro_tree["distro_tree_id"] + ) + ) + except ( + urllib.error.URLError, + urllib.error.HTTPError, + NFSServerInaccessible, + ) as e: if ignore_errors: # suppress exception, assume the tree still exists accessible = True else: - sys.stderr.write('Error checking for existence of URL %s ' - 'for distro tree %s:\n%s\n' - % (url, distro_tree['distro_tree_id'], e)) + sys.stderr.write( + "Error checking for existence of URL %s " + "for distro tree %s:\n%s\n" + % (url, distro_tree["distro_tree_id"], e) + ) sys.exit(1) if not accessible: # All methods were inaccessible! @@ -113,39 +123,60 @@ def check_all_trees(ignore_errors=False, if len(distro_trees) != len(rdistro_trees) or remove_all: for distro_tree in rdistro_trees: if dry_run: - print('Distro marked for remove %s:%d' % (distro_tree['distro_name'], - distro_tree['distro_tree_id'])) + print( + "Distro marked for remove %s:%d" + % (distro_tree["distro_name"], distro_tree["distro_tree_id"]) + ) else: - print('Removing distro %s:%d' % (distro_tree['distro_name'], - distro_tree['distro_tree_id'])) - proxy.remove_distro_trees([distro_tree['distro_tree_id']]) + print( + "Removing distro %s:%d" + % (distro_tree["distro_name"], distro_tree["distro_tree_id"]) + ) + proxy.remove_distro_trees([distro_tree["distro_tree_id"]]) else: - sys.stderr.write('All distros are missing! Please check your server!\n') + sys.stderr.write("All distros are missing! Please check your server!\n") sys.exit(1) def main(): from optparse import OptionParser + parser = OptionParser() - parser.add_option('--ignore-errors', default=False, action='store_true', - help='Ignore all network errors when communicating with mirrors.') - parser.add_option('--dry-run', default=False, action='store_true', - help='Prints no longer accessible distro without updating the database.') - parser.add_option('--lab-controller', - default='http://localhost:8000', - help='Specify which lab controller to import to. ' - 'Defaults to http://localhost:8000.') - parser.add_option('--remove-all', default=False, action='store_true', - help='Remove all distros from lab controller.') + parser.add_option( + "--ignore-errors", + default=False, + action="store_true", + help="Ignore all network errors when communicating with mirrors.", + ) + parser.add_option( + "--dry-run", + default=False, + action="store_true", + help="Prints no longer accessible distro without updating the database.", + ) + parser.add_option( + "--lab-controller", + default="http://localhost:8000", + help="Specify which lab controller to import to. " + "Defaults to http://localhost:8000.", + ) + parser.add_option( + "--remove-all", + default=False, + action="store_true", + help="Remove all distros from lab controller.", + ) options, args = parser.parse_args() try: - check_all_trees(options.ignore_errors, - options.dry_run, - options.lab_controller, - options.remove_all) + check_all_trees( + options.ignore_errors, + options.dry_run, + options.lab_controller, + options.remove_all, + ) except KeyboardInterrupt: pass -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/LabController/src/bkr/labcontroller/log_storage.py b/LabController/src/bkr/labcontroller/log_storage.py index 355be2051..522d0d292 100644 --- a/LabController/src/bkr/labcontroller/log_storage.py +++ b/LabController/src/bkr/labcontroller/log_storage.py @@ -1,4 +1,3 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or @@ -6,43 +5,45 @@ # We are talking about job logs here, not logs produced by the daemons. -import os, os.path import errno +import os +import os.path + from bkr.common.helpers import makedirs_ignore -class LogFile(object): +class LogFile(object): def __init__(self, path, register_func, create=True): - self.path = path #: absolute path where the log will be stored - self.register_func = register_func #: called only if the file was created - self.create = create #: create the file if it doesn't exist + self.path = path #: absolute path where the log will be stored + self.register_func = register_func #: called only if the file was created + self.create = create #: create the file if it doesn't exist def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.path) + return "%s(%r)" % (self.__class__.__name__, self.path) def open_ro(self): """ If you just want to read the log, call this instead of entering the context manager. """ - return open(self.path, 'r') + return open(self.path, "r") def __enter__(self): - makedirs_ignore(os.path.dirname(self.path), 0755) + makedirs_ignore(os.path.dirname(self.path), 0o755) created = False if self.create: try: - # stdio does not have any mode string which corresponds to this + # stdio does not have any mode string which corresponds to this # combination of flags, so we have to use raw os.open :-( - fd = os.open(self.path, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0644) + fd = os.open(self.path, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o644) created = True - except (OSError, IOError), e: + except (OSError, IOError) as e: if e.errno != errno.EEXIST: raise fd = os.open(self.path, os.O_RDWR) else: fd = os.open(self.path, os.O_RDWR) try: - self.f = os.fdopen(fd, 'r+') + self.f = os.fdopen(fd, "r+") except Exception: os.close(fd) raise @@ -65,68 +66,99 @@ def truncate(self, size): def update_chunk(self, data, offset): if offset < 0: - raise ValueError('Offset cannot be negative') + raise ValueError("Offset cannot be negative") self.f.seek(offset, os.SEEK_SET) - # XXX the original uploadFile acquires an exclusive lock while writing, + # XXX the original uploadFile acquires an exclusive lock while writing, # for no reason that I can discern self.f.write(data) self.f.flush() + class LogStorage(object): """ Handles storage of job logs on the local filesystem. - The old XML-RPC API doesn't include the recipe ID with the task or result - upload calls. So for now, everything is stored flat. Eventually it would be + The old XML-RPC API doesn't include the recipe ID with the task or result + upload calls. So for now, everything is stored flat. Eventually it would be nice to arrange things hierarchically with everything under recipe instead. """ def __init__(self, base_dir, base_url, hub): self.base_dir = base_dir - if not base_url.endswith('/'): - base_url += '/' # really it is always a directory + if not base_url.endswith("/"): + base_url += "/" # really it is always a directory self.base_url = base_url self.hub = hub def recipe(self, recipe_id, path, create=True): - path = os.path.normpath(path.lstrip('/')) - if path.startswith('../'): - raise ValueError('Upload path not allowed: %s' % path) - recipe_base_dir = os.path.join(self.base_dir, 'recipes', - (recipe_id[:-3] or '0') + '+', recipe_id, '') - recipe_base_url = '%srecipes/%s+/%s/' % (self.base_url, - recipe_id[:-3] or '0', recipe_id) - return LogFile(os.path.join(recipe_base_dir, path), - lambda: self.hub.recipes.register_file(recipe_base_url, - recipe_id, os.path.dirname(path), os.path.basename(path), - recipe_base_dir), - create=create) + path = os.path.normpath(path.lstrip("/")) + if path.startswith("../"): + raise ValueError("Upload path not allowed: %s" % path) + recipe_base_dir = os.path.join( + self.base_dir, "recipes", (recipe_id[:-3] or "0") + "+", recipe_id, "" + ) + recipe_base_url = "%srecipes/%s+/%s/" % ( + self.base_url, + recipe_id[:-3] or "0", + recipe_id, + ) + return LogFile( + os.path.join(recipe_base_dir, path), + lambda: self.hub.recipes.register_file( + recipe_base_url, + recipe_id, + os.path.dirname(path), + os.path.basename(path), + recipe_base_dir, + ), + create=create, + ) def task(self, task_id, path, create=True): - path = os.path.normpath(path.lstrip('/')) - if path.startswith('../'): - raise ValueError('Upload path not allowed: %s' % path) - task_base_dir = os.path.join(self.base_dir, 'tasks', - (task_id[:-3] or '0') + '+', task_id, '') - task_base_url = '%stasks/%s+/%s/' % (self.base_url, - task_id[:-3] or '0', task_id) - return LogFile(os.path.join(task_base_dir, path), - lambda: self.hub.recipes.tasks.register_file(task_base_url, - task_id, os.path.dirname(path), os.path.basename(path), - task_base_dir), - create=create) + path = os.path.normpath(path.lstrip("/")) + if path.startswith("../"): + raise ValueError("Upload path not allowed: %s" % path) + task_base_dir = os.path.join( + self.base_dir, "tasks", (task_id[:-3] or "0") + "+", task_id, "" + ) + task_base_url = "%stasks/%s+/%s/" % ( + self.base_url, + task_id[:-3] or "0", + task_id, + ) + return LogFile( + os.path.join(task_base_dir, path), + lambda: self.hub.recipes.tasks.register_file( + task_base_url, + task_id, + os.path.dirname(path), + os.path.basename(path), + task_base_dir, + ), + create=create, + ) def result(self, result_id, path, create=True): - path = os.path.normpath(path.lstrip('/')) - if path.startswith('../'): - raise ValueError('Upload path not allowed: %s' % path) - result_base_dir = os.path.join(self.base_dir, 'results', - (result_id[:-3] or '0') + '+', result_id, '') - result_base_url = '%sresults/%s+/%s/' % (self.base_url, - result_id[:-3] or '0', result_id) - return LogFile(os.path.join(result_base_dir, path), - lambda: self.hub.recipes.tasks.register_result_file(result_base_url, - result_id, os.path.dirname(path), os.path.basename(path), - result_base_dir), - create=create) + path = os.path.normpath(path.lstrip("/")) + if path.startswith("../"): + raise ValueError("Upload path not allowed: %s" % path) + result_base_dir = os.path.join( + self.base_dir, "results", (result_id[:-3] or "0") + "+", result_id, "" + ) + result_base_url = "%sresults/%s+/%s/" % ( + self.base_url, + result_id[:-3] or "0", + result_id, + ) + return LogFile( + os.path.join(result_base_dir, path), + lambda: self.hub.recipes.tasks.register_result_file( + result_base_url, + result_id, + os.path.dirname(path), + os.path.basename(path), + result_base_dir, + ), + create=create, + ) diff --git a/LabController/src/bkr/labcontroller/main.py b/LabController/src/bkr/labcontroller/main.py index 72d5557e0..15adf4073 100644 --- a/LabController/src/bkr/labcontroller/main.py +++ b/LabController/src/bkr/labcontroller/main.py @@ -1,33 +1,46 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os -import time -import sys +import logging import signal +import sys +from datetime import datetime +from optparse import OptionParser + import daemon +import gevent +import gevent.event +import gevent.monkey +import gevent.pool +import gevent.pywsgi from daemon import pidfile -from optparse import OptionParser -from datetime import datetime -from SimpleXMLRPCServer import SimpleXMLRPCDispatcher -from DocXMLRPCServer import XMLRPCDocGenerator from flask.wrappers import Request, Response -from werkzeug.routing import Map as RoutingMap, Rule -from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \ -BadRequest, RequestEntityTooLarge -import gevent, gevent.pool, gevent.wsgi, gevent.event, gevent.monkey +from six.moves.xmlrpc_server import SimpleXMLRPCDispatcher +from werkzeug.exceptions import ( + BadRequest, + HTTPException, + MethodNotAllowed, + RequestEntityTooLarge, +) +from werkzeug.routing import Map as RoutingMap +from werkzeug.routing import Rule + from bkr.common.helpers import RepeatTimer -from bkr.labcontroller.proxy import Proxy, ProxyHTTP from bkr.labcontroller.config import get_conf, load_conf +from bkr.labcontroller.proxy import Proxy, ProxyHTTP from bkr.log import log_to_stream, log_to_syslog -import logging + +try: + from xmlrpc.server import XMLRPCDocGenerator +except ImportError: + from DocXMLRPCServer import XMLRPCDocGenerator + logger = logging.getLogger(__name__) -class XMLRPCDispatcher(SimpleXMLRPCDispatcher, XMLRPCDocGenerator): +class XMLRPCDispatcher(SimpleXMLRPCDispatcher, XMLRPCDocGenerator): def __init__(self): SimpleXMLRPCDispatcher.__init__(self, allow_none=True) XMLRPCDocGenerator.__init__(self) @@ -41,107 +54,154 @@ def _dispatch(self, method, params): try: result = SimpleXMLRPCDispatcher._dispatch(self, method, params) except: - logger.exception('Error handling XML-RPC call %s', str(method)) - logger.debug('Time: %s %s %s', datetime.utcnow() - start, str(method), str(params)[0:50]) + logger.exception("Error handling XML-RPC call %s", str(method)) + logger.debug( + "Time: %s %s %s", + datetime.utcnow() - start, + str(method), + str(params)[0:50], + ) raise - logger.debug('Time: %s %s %s', datetime.utcnow() - start, str(method), str(params)[0:50]) + logger.debug( + "Time: %s %s %s", datetime.utcnow() - start, str(method), str(params)[0:50] + ) return result + class LimitedRequest(Request): - max_content_length = 10 * 1024 * 1024 # 10MB + max_content_length = 10 * 1024 * 1024 # 10MB -class WSGIApplication(object): +class WSGIApplication(object): def __init__(self, proxy): self.proxy = proxy self.proxy_http = ProxyHTTP(proxy) self.xmlrpc_dispatcher = XMLRPCDispatcher() self.xmlrpc_dispatcher.register_instance(proxy) - self.url_map = RoutingMap([ - # pseudo-XML-RPC calls used in kickstarts: - # (these permit GET to make it more convenient to trigger them using curl) - Rule('/nopxe/', - endpoint=(self.proxy, 'clear_netboot')), - Rule('/install_start/', - endpoint=(self.proxy, 'install_start')), - Rule('/install_done//', - endpoint=(self.proxy, 'install_done')), - Rule('/install_done//', - endpoint=(self.proxy, 'install_done')), - Rule('/postinstall_done/', - endpoint=(self.proxy, 'postinstall_done')), - Rule('/postreboot/', - endpoint=(self.proxy, 'postreboot')), - Rule('/install_fail//', - endpoint=(self.proxy, 'install_fail')), - - # Harness API: - Rule('/recipes//', - methods=['GET'], - endpoint=(self.proxy_http, 'get_recipe')), - Rule('/recipes//watchdog', - methods=['GET'], - endpoint=(self.proxy_http, 'get_watchdog')), - Rule('/recipes//watchdog', - methods=['POST'], - endpoint=(self.proxy_http, 'post_watchdog')), - Rule('/recipes//status', - methods=['POST'], - endpoint=(self.proxy_http, 'post_recipe_status')), - Rule('/recipes//tasks//', - methods=['PATCH'], - endpoint=(self.proxy_http, 'patch_task')), - Rule('/recipes//tasks//status', - methods=['POST'], - endpoint=(self.proxy_http, 'post_task_status')), - Rule('/recipes//tasks//results/', - methods=['POST'], - endpoint=(self.proxy_http, 'post_result')), - Rule('/recipes//logs/', - methods=['GET'], - endpoint=(self.proxy_http, 'list_recipe_logs')), - Rule('/recipes//logs/', - methods=['GET', 'PUT'], - endpoint=(self.proxy_http, 'do_recipe_log')), - Rule('/recipes//tasks//logs/', - methods=['GET'], - endpoint=(self.proxy_http, 'list_task_logs')), - Rule('/recipes//tasks//logs/', - methods=['GET', 'PUT'], - endpoint=(self.proxy_http, 'do_task_log')), - Rule('/recipes//tasks//results//logs/', - methods=['GET'], - endpoint=(self.proxy_http, 'list_result_logs')), - Rule('/recipes//tasks//results//logs/', - methods=['GET', 'PUT'], - endpoint=(self.proxy_http, 'do_result_log')), - Rule('/power//', - methods=['PUT'], - endpoint=(self.proxy_http, 'put_power')), - Rule('/healthz/', - methods=['HEAD', 'GET'], - endpoint=(self.proxy_http, 'healthz')) - ]) + self.url_map = RoutingMap( + [ + # pseudo-XML-RPC calls used in kickstarts: + # (these permit GET to make it more convenient to trigger them using curl) + Rule("/nopxe/", endpoint=(self.proxy, "clear_netboot")), + Rule( + "/install_start/", endpoint=(self.proxy, "install_start") + ), + Rule( + "/install_done//", endpoint=(self.proxy, "install_done") + ), + Rule( + "/install_done//", + endpoint=(self.proxy, "install_done"), + ), + Rule( + "/postinstall_done/", + endpoint=(self.proxy, "postinstall_done"), + ), + Rule("/postreboot/", endpoint=(self.proxy, "postreboot")), + Rule( + "/install_fail//", endpoint=(self.proxy, "install_fail") + ), + # Harness API: + Rule( + "/recipes//", + methods=["GET"], + endpoint=(self.proxy_http, "get_recipe"), + ), + Rule( + "/recipes//watchdog", + methods=["GET"], + endpoint=(self.proxy_http, "get_watchdog"), + ), + Rule( + "/recipes//watchdog", + methods=["POST"], + endpoint=(self.proxy_http, "post_watchdog"), + ), + Rule( + "/recipes//status", + methods=["POST"], + endpoint=(self.proxy_http, "post_recipe_status"), + ), + Rule( + "/recipes//tasks//", + methods=["PATCH"], + endpoint=(self.proxy_http, "patch_task"), + ), + Rule( + "/recipes//tasks//status", + methods=["POST"], + endpoint=(self.proxy_http, "post_task_status"), + ), + Rule( + "/recipes//tasks//results/", + methods=["POST"], + endpoint=(self.proxy_http, "post_result"), + ), + Rule( + "/recipes//logs/", + methods=["GET"], + endpoint=(self.proxy_http, "list_recipe_logs"), + ), + Rule( + "/recipes//logs/", + methods=["GET", "PUT"], + endpoint=(self.proxy_http, "do_recipe_log"), + ), + Rule( + "/recipes//tasks//logs/", + methods=["GET"], + endpoint=(self.proxy_http, "list_task_logs"), + ), + Rule( + "/recipes//tasks//logs/", + methods=["GET", "PUT"], + endpoint=(self.proxy_http, "do_task_log"), + ), + Rule( + "/recipes//tasks//results//logs/", + methods=["GET"], + endpoint=(self.proxy_http, "list_result_logs"), + ), + Rule( + "/recipes//tasks//results//logs/", + methods=["GET", "PUT"], + endpoint=(self.proxy_http, "do_result_log"), + ), + Rule( + "/power//", + methods=["PUT"], + endpoint=(self.proxy_http, "put_power"), + ), + Rule( + "/healthz/", + methods=["HEAD", "GET"], + endpoint=(self.proxy_http, "healthz"), + ), + ] + ) @LimitedRequest.application def __call__(self, req): try: # Limit request data in all cases. - if req.max_content_length is not None and \ - req.content_length > req.max_content_length: - raise RequestEntityTooLarge() - if req.path in ('/', '/RPC2', '/server'): - if req.method == 'POST': + if ( + req.max_content_length is not None + and req.content_length > req.max_content_length + ): + raise RequestEntityTooLarge() + if req.path in ("/", "/RPC2", "/server"): + if req.method == "POST": # XML-RPC - if req.mimetype != 'text/xml': - return BadRequest('XML-RPC requests must be text/xml') + if req.mimetype != "text/xml": + return BadRequest("XML-RPC requests must be text/xml") result = self.xmlrpc_dispatcher._marshaled_dispatch(req.data) - return Response(response=result, content_type='text/xml') - elif req.method in ('GET', 'HEAD'): + return Response(response=result, content_type="text/xml") + elif req.method in ("GET", "HEAD"): # XML-RPC docs return Response( - response=self.xmlrpc_dispatcher.generate_html_documentation(), - content_type='text/html') + response=self.xmlrpc_dispatcher.generate_html_documentation(), + content_type="text/html", + ) else: return MethodNotAllowed() else: @@ -149,34 +209,42 @@ def __call__(self, req): if obj is self.proxy: # pseudo-XML-RPC result = getattr(obj, attr)(**args) - return Response(response=repr(result), content_type='text/plain') + return Response(response=repr(result), content_type="text/plain") else: return getattr(obj, attr)(req, **args) - except HTTPException, e: + except HTTPException as e: return e + # Temporary hack to disable keepalive in gevent.wsgi.WSGIServer. This should be easier. -class WSGIHandler(gevent.wsgi.WSGIHandler): +class WSGIHandler(gevent.pywsgi.WSGIHandler): def read_request(self, raw_requestline): result = super(WSGIHandler, self).read_request(raw_requestline) self.close_connection = True return result + # decorator to log uncaught exceptions in the WSGI application def log_failed_requests(func): def _log_failed_requests(environ, start_response): try: return func(environ, start_response) - except Exception, e: - logger.exception('Error handling request %s %s', - environ.get('REQUEST_METHOD'), environ.get('PATH_INFO')) + except Exception as e: + logger.exception( + "Error handling request %s %s", + environ.get("REQUEST_METHOD"), + environ.get("PATH_INFO"), + ) raise + return _log_failed_requests + def daemon_shutdown(signum, frame): - logger.info('Received signal %s, shutting down', signum) + logger.info("Received signal %s, shutting down", signum) shutting_down.set() + def main_loop(proxy=None, conf=None): """infinite daemon loop""" global shutting_down @@ -187,14 +255,18 @@ def main_loop(proxy=None, conf=None): signal.signal(signal.SIGINT, daemon_shutdown) signal.signal(signal.SIGTERM, daemon_shutdown) - login = RepeatTimer(conf['RENEW_SESSION_INTERVAL'], proxy.hub._login, - stop_on_exception=False) + login = RepeatTimer( + conf["RENEW_SESSION_INTERVAL"], proxy.hub._login, stop_on_exception=False + ) login.daemon = True login.start() - server = gevent.wsgi.WSGIServer(('::', 8000), - log_failed_requests(WSGIApplication(proxy)), - handler_class=WSGIHandler, spawn=gevent.pool.Pool()) + server = gevent.pywsgi.WSGIServer( + ("::", 8000), + log_failed_requests(WSGIApplication(proxy)), + handler_class=WSGIHandler, + spawn=gevent.pool.Pool(), + ) server.stop_timeout = None server.start() @@ -204,14 +276,18 @@ def main_loop(proxy=None, conf=None): server.stop() login.stop() + def main(): parser = OptionParser() - parser.add_option("-c", "--config", - help="Full path to config file to use") - parser.add_option("-f", "--foreground", default=False, action="store_true", - help="run in foreground (do not spawn a daemon)") - parser.add_option("-p", "--pid-file", - help="specify a pid file") + parser.add_option("-c", "--config", help="Full path to config file to use") + parser.add_option( + "-f", + "--foreground", + default=False, + action="store_true", + help="run in foreground (do not spawn a daemon)", + ) + parser.add_option("-p", "--pid-file", help="specify a pid file") (opts, args) = parser.parse_args() if opts.config: @@ -221,7 +297,9 @@ def main(): pid_file = opts.pid_file if pid_file is None: - pid_file = conf.get("PROXY_PID_FILE", "/var/run/beaker-lab-controller/beaker-proxy.pid") + pid_file = conf.get( + "PROXY_PID_FILE", "/var/run/beaker-lab-controller/beaker-proxy.pid" + ) # HubProxy will try to log some stuff, even though we # haven't configured our logging handlers yet. So we send logs to stderr @@ -229,7 +307,7 @@ def main(): log_to_stream(sys.stderr, level=logging.WARNING) try: proxy = Proxy(conf=conf) - except Exception, ex: + except Exception as ex: sys.stderr.write("Error starting beaker-proxy: %s\n" % ex) sys.exit(1) @@ -239,10 +317,14 @@ def main(): else: # See BZ#977269 proxy.close() - with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile( - pid_file, acquire_timeout=0), detach_process=True, stderr=sys.stderr): - log_to_syslog('beaker-proxy') + with daemon.DaemonContext( + pidfile=pidfile.TimeoutPIDLockFile(pid_file, acquire_timeout=0), + detach_process=True, + stderr=sys.stderr, + ): + log_to_syslog("beaker-proxy") main_loop(proxy=proxy, conf=conf) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/LabController/src/bkr/labcontroller/netboot.py b/LabController/src/bkr/labcontroller/netboot.py index 5c6c519db..eca310a42 100644 --- a/LabController/src/bkr/labcontroller/netboot.py +++ b/LabController/src/bkr/labcontroller/netboot.py @@ -1,23 +1,28 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os, os.path +import collections import errno -import socket import logging -import tempfile +import os +import os.path import shutil -from contextlib import contextmanager -import collections -from cStringIO import StringIO -import urllib -import urllib2 -from bkr.labcontroller.config import get_conf -from bkr.common.helpers import (atomically_replaced_file, makedirs_ignore, - siphon, unlink_ignore, atomic_link, atomic_symlink) +import socket + +import six +from six.moves import cStringIO as StringIO +from six.moves import urllib + +from bkr.common.helpers import ( + atomic_link, + atomic_symlink, + atomically_replaced_file, + makedirs_ignore, + siphon, + unlink_ignore, +) from bkr.labcontroller.config import get_conf logger = logging.getLogger(__name__) @@ -27,14 +32,16 @@ class ImageFetchingError(Exception): """ Raised when an error occurs while fetching netboot images from the network. """ + def __init__(self, url, distro_tree_id, cause): super(ImageFetchingError, self).__init__( - 'Error fetching image %s for distro tree %s: %s' - % (url, distro_tree_id, cause)) + "Error fetching image %s for distro tree %s: %s" + % (url, distro_tree_id, cause) + ) def get_tftp_root(): - return get_conf().get('TFTP_ROOT', '/var/lib/tftpboot') + return get_conf().get("TFTP_ROOT", "/var/lib/tftpboot") def copy_ignore(path, source_file): @@ -42,13 +49,15 @@ def copy_ignore(path, source_file): Creates and populates a file by copying from a source file object. The destination file will remain untouched if it already exists. """ + mode = "x" if six.PY3 else "wx" try: - f = open(path, 'wx') # not sure this is portable to Python 3! + f = open(path, mode) except IOError as e: if e.errno == errno.EEXIST: return else: raise + try: logger.debug("%s didn't exist, writing it", path) siphon(source_file, f) @@ -71,7 +80,7 @@ def copy_path_ignore(dest_path, source_path): Nothing will be copied if the source file does not exist. """ try: - source_file = open(source_path, 'rb') + source_file = open(source_path, "rb") except IOError as e: if e.errno == errno.ENOENT: return @@ -96,18 +105,20 @@ def copy_default_loader_images(): # ... the problem is that is either the ia32 version or the x64 version # depending on the architecture of the server, blerg. makedirs_ignore(get_tftp_root(), mode=0o755) - copy_path_ignore(os.path.join(get_tftp_root(), 'pxelinux.0'), - '/usr/share/syslinux/pxelinux.0') - copy_path_ignore(os.path.join(get_tftp_root(), 'menu.c32'), - '/usr/share/syslinux/menu.c32') + copy_path_ignore( + os.path.join(get_tftp_root(), "pxelinux.0"), "/usr/share/syslinux/pxelinux.0" + ) + copy_path_ignore( + os.path.join(get_tftp_root(), "menu.c32"), "/usr/share/syslinux/menu.c32" + ) def fetch_bootloader_image(fqdn, fqdn_dir, distro_tree_id, image_url): - timeout = get_conf().get('IMAGE_FETCH_TIMEOUT') - logger.debug('Fetching bootloader image %s for %s', image_url, fqdn) - with atomically_replaced_file(os.path.join(fqdn_dir, 'image')) as dest: + timeout = get_conf().get("IMAGE_FETCH_TIMEOUT") + logger.debug("Fetching bootloader image %s for %s", image_url, fqdn) + with atomically_replaced_file(os.path.join(fqdn_dir, "image")) as dest: try: - siphon(urllib2.urlopen(image_url, timeout=timeout), dest) + siphon(urllib.request.urlopen(image_url, timeout=timeout), dest) except Exception as e: raise ImageFetchingError(image_url, distro_tree_id, e) @@ -119,56 +130,64 @@ def fetch_images(distro_tree_id, kernel_url, initrd_url, fqdn): /images//kernel /images//initrd """ - images_dir = os.path.join(get_tftp_root(), 'images', fqdn) + images_dir = os.path.join(get_tftp_root(), "images", fqdn) makedirs_ignore(images_dir, 0o755) # Only look for fetched images if distro_tree is registered if distro_tree_id is not None: - distrotree_dir = os.path.join(get_tftp_root(), 'distrotrees', str(distro_tree_id)) + distrotree_dir = os.path.join( + get_tftp_root(), "distrotrees", str(distro_tree_id) + ) # beaker-pxemenu might have already fetched the images, so let's try there # before anywhere else. try: - atomic_link(os.path.join(distrotree_dir, 'kernel'), - os.path.join(images_dir, 'kernel')) - atomic_link(os.path.join(distrotree_dir, 'initrd'), - os.path.join(images_dir, 'initrd')) - logger.debug('Using images from distro tree %s for %s', distro_tree_id, fqdn) + atomic_link( + os.path.join(distrotree_dir, "kernel"), + os.path.join(images_dir, "kernel"), + ) + atomic_link( + os.path.join(distrotree_dir, "initrd"), + os.path.join(images_dir, "initrd"), + ) + logger.debug( + "Using images from distro tree %s for %s", distro_tree_id, fqdn + ) return except OSError as e: if e.errno != errno.ENOENT: raise # No luck there, so try something else... - timeout = get_conf().get('IMAGE_FETCH_TIMEOUT') - logger.debug('Fetching kernel %s for %s', kernel_url, fqdn) - with atomically_replaced_file(os.path.join(images_dir, 'kernel')) as dest: + timeout = get_conf().get("IMAGE_FETCH_TIMEOUT") + logger.debug("Fetching kernel %s for %s", kernel_url, fqdn) + with atomically_replaced_file(os.path.join(images_dir, "kernel")) as dest: try: - siphon(urllib2.urlopen(kernel_url, timeout=timeout), dest) + siphon(urllib.request.urlopen(kernel_url, timeout=timeout), dest) except Exception as e: raise ImageFetchingError(kernel_url, distro_tree_id, e) - logger.debug('Fetching initrd %s for %s', initrd_url, fqdn) - with atomically_replaced_file(os.path.join(images_dir, 'initrd')) as dest: + logger.debug("Fetching initrd %s for %s", initrd_url, fqdn) + with atomically_replaced_file(os.path.join(images_dir, "initrd")) as dest: try: - siphon(urllib2.urlopen(initrd_url, timeout=timeout), dest) + siphon(urllib.request.urlopen(initrd_url, timeout=timeout), dest) except Exception as e: raise ImageFetchingError(initrd_url, distro_tree_id, e) def have_images(fqdn): - return os.path.exists(os.path.join(get_tftp_root(), 'images', fqdn)) + return os.path.exists(os.path.join(get_tftp_root(), "images", fqdn)) def clear_images(fqdn): - """Removes kernel and initrd images """ - images_dir = os.path.join(get_tftp_root(), 'images', fqdn) - logger.debug('Removing images for %s', fqdn) + """Removes kernel and initrd images""" + images_dir = os.path.join(get_tftp_root(), "images", fqdn) + logger.debug("Removing images for %s", fqdn) shutil.rmtree(images_dir, ignore_errors=True) def pxe_basename(fqdn): # pxelinux uses upper-case hex IP address for config filename ipaddr = socket.gethostbyname(fqdn) - return '%02X%02X%02X%02X' % tuple(int(octet) for octet in ipaddr.split('.')) + return "%02X%02X%02X%02X" % tuple(int(octet) for octet in ipaddr.split(".")) def extract_arg(arg, kernel_options): @@ -180,28 +199,36 @@ def extract_arg(arg, kernel_options): tokens = [] for token in kernel_options.split(): if token.startswith(arg): - value = token[len(arg):] + value = token[len(arg) :] else: tokens.append(token) if value: - return (value, ' '.join(tokens)) + return (value, " ".join(tokens)) else: return (None, kernel_options) -def configure_grub2(fqdn, default_config_loc, - config_file, kernel_options, devicetree=''): - grub2_postfix, kernel_options = extract_arg('grub2_postfix=', kernel_options) +def configure_grub2( + fqdn, default_config_loc, config_file, kernel_options, devicetree="" +): + grub2_postfix, kernel_options = extract_arg("grub2_postfix=", kernel_options) config = """\ linux%s /images/%s/kernel %s netboot_method=grub2 initrd%s /images/%s/initrd %s boot -""" % (grub2_postfix or '', fqdn, kernel_options, grub2_postfix or '', fqdn, devicetree) +""" % ( + grub2_postfix or "", + fqdn, + kernel_options, + grub2_postfix or "", + fqdn, + devicetree, + ) with atomically_replaced_file(config_file) as f: f.write(config) # We also ensure a default config exists that exits - write_ignore(os.path.join(default_config_loc, 'grub.cfg'), 'exit\n') + write_ignore(os.path.join(default_config_loc, "grub.cfg"), "exit\n") def clear_grub2(config): @@ -218,24 +245,24 @@ def configure_aarch64(fqdn, kernel_options, basedir): /EFI/BOOT/grub.cfg """ grub2_conf = "grub.cfg-%s" % pxe_basename(fqdn) - pxe_base = os.path.join(basedir, 'aarch64') + pxe_base = os.path.join(basedir, "aarch64") makedirs_ignore(pxe_base, mode=0o755) - efi_conf_dir = os.path.join(basedir, 'EFI', 'BOOT') + efi_conf_dir = os.path.join(basedir, "EFI", "BOOT") makedirs_ignore(efi_conf_dir, mode=0o755) - devicetree, kernel_options = extract_arg('devicetree=', kernel_options) + devicetree, kernel_options = extract_arg("devicetree=", kernel_options) if devicetree: - devicetree = 'devicetree %s' % devicetree + devicetree = "devicetree %s" % devicetree else: - devicetree = '' + devicetree = "" grub_cfg_file = os.path.join(efi_conf_dir, grub2_conf) - logger.debug('Writing aarch64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing aarch64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, efi_conf_dir, grub_cfg_file, kernel_options, devicetree) grub_cfg_file = os.path.join(pxe_base, grub2_conf) - logger.debug('Writing aarch64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing aarch64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, pxe_base, grub_cfg_file, kernel_options, devicetree) @@ -243,9 +270,9 @@ def clear_aarch64(fqdn, basedir): """ Removes PXE bootloader file created by configure_aarch64 """ - pxe_base = os.path.join(basedir, 'aarch64') + pxe_base = os.path.join(basedir, "aarch64") basename = "grub.cfg-%s" % pxe_basename(fqdn) - logger.debug('Removing aarch64 config for %s as %s', fqdn, basename) + logger.debug("Removing aarch64 config for %s as %s", fqdn, basename) clear_grub2(os.path.join(pxe_base, basename)) @@ -264,22 +291,26 @@ def configure_armlinux(fqdn, kernel_options, basedir): This is needed to set a path prefix of arm so that we don't conflict with x86 pxelinux.cfg files. """ - pxe_base = os.path.join(basedir, 'arm') + pxe_base = os.path.join(basedir, "arm") makedirs_ignore(pxe_base, mode=0o755) - write_ignore(os.path.join(pxe_base, 'empty'), '') - pxe_dir = os.path.join(pxe_base, 'pxelinux.cfg') + write_ignore(os.path.join(pxe_base, "empty"), "") + pxe_dir = os.path.join(pxe_base, "pxelinux.cfg") makedirs_ignore(pxe_dir, mode=0o755) basename = pxe_basename(fqdn) - config = '''default linux + config = """default linux prompt 0 timeout 100 label linux kernel ../images/%s/kernel initrd ../images/%s/initrd append %s netboot_method=armpxe -''' % (fqdn, fqdn, kernel_options) - logger.debug('Writing armlinux config for %s as %s', fqdn, basename) +""" % ( + fqdn, + fqdn, + kernel_options, + ) + logger.debug("Writing armlinux config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(pxe_dir, basename)) as f: f.write(config) @@ -288,9 +319,9 @@ def clear_armlinux(fqdn, basedir): """ Removes PXE bootloader file created by configure_armlinux """ - pxe_dir = os.path.join(basedir, 'arm', 'pxelinux.cfg') + pxe_dir = os.path.join(basedir, "arm", "pxelinux.cfg") basename = pxe_basename(fqdn) - logger.debug('Removing armlinux config for %s as %s', fqdn, basename) + logger.debug("Removing armlinux config for %s as %s", fqdn, basename) unlink_ignore(os.path.join(pxe_dir, basename)) # XXX Should we save a default config, the way we do for non-ARM PXE? @@ -301,27 +332,31 @@ def _configure_pxelinux_config(basedir, fqdn, initrd_defined, kernel_options, sy Kernel and initrd has to be relative to image location """ - kernel = os.path.join('images', fqdn, 'kernel') - initrd = os.path.join('images', fqdn, 'initrd') + kernel = os.path.join("images", fqdn, "kernel") + initrd = os.path.join("images", fqdn, "initrd") path_diff = os.path.relpath(get_tftp_root(), basedir) - kernel = os.path.join(path_diff if symlink else '/', kernel) - initrd = os.path.join(path_diff if symlink else '/', initrd) + kernel = os.path.join(path_diff if symlink else "/", kernel) + initrd = os.path.join(path_diff if symlink else "/", initrd) # Unfortunately the initrd kernel arg needs some special handling. It can be # supplied from the Beaker side (e.g. a system-specific driver disk) but we # also supply the main initrd here which we have fetched from the distro. if initrd_defined: - initrd = '{0},{1}'.format(initrd, initrd_defined) + initrd = "{0},{1}".format(initrd, initrd_defined) - return '''default linux + return """default linux prompt 0 timeout 100 label linux kernel %s ipappend 2 append initrd=%s %s netboot_method=pxe -''' % (kernel, initrd, kernel_options) +""" % ( + kernel, + initrd, + kernel_options, + ) # Bootloader config: PXE Linux @@ -335,34 +370,37 @@ def configure_pxelinux(fqdn, kernel_options, basedir, symlink=False): /pxelinux.cfg/default """ - pxe_dir = os.path.join(basedir, 'pxelinux.cfg') + pxe_dir = os.path.join(basedir, "pxelinux.cfg") makedirs_ignore(pxe_dir, mode=0o755) basename = pxe_basename(fqdn) - initrd, kernel_options = extract_arg('initrd=', kernel_options) + initrd, kernel_options = extract_arg("initrd=", kernel_options) config = _configure_pxelinux_config(basedir, fqdn, initrd, kernel_options, symlink) - logger.debug('Writing pxelinux config for %s as %s', fqdn, basename) + logger.debug("Writing pxelinux config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(pxe_dir, basename)) as f: f.write(config) # We also ensure a default config exists that falls back to local boot - write_ignore(os.path.join(pxe_dir, 'default'), '''default local + write_ignore( + os.path.join(pxe_dir, "default"), + """default local prompt 0 timeout 0 label local localboot 0 -''') +""", + ) def clear_pxelinux(fqdn, basedir): """ Removes PXE bootloader file created by configure_pxelinux """ - pxe_dir = os.path.join(basedir, 'pxelinux.cfg') + pxe_dir = os.path.join(basedir, "pxelinux.cfg") basename = pxe_basename(fqdn) configname = os.path.join(pxe_dir, basename) - logger.debug('Removing pxelinux config for %s as %s', fqdn, basename) + logger.debug("Removing pxelinux config for %s as %s", fqdn, basename) unlink_ignore(configname) @@ -377,43 +415,52 @@ def configure_ipxe(fqdn, kernel_options, basedir): /ipxe/default """ - ipxe_dir = os.path.join(basedir, 'ipxe') + ipxe_dir = os.path.join(basedir, "ipxe") makedirs_ignore(ipxe_dir, mode=0o755) basename = pxe_basename(fqdn).lower() # Unfortunately the initrd kernel arg needs some special handling. It can be # supplied from the Beaker side (e.g. a system-specific driver disk) but we # also supply the main initrd here which we have fetched from the distro. - initrd, kernel_options = extract_arg('initrd=', kernel_options) + initrd, kernel_options = extract_arg("initrd=", kernel_options) if initrd: - initrd = '/images/%s/initrd\ninitrd %s' % (fqdn, initrd) + initrd = "/images/%s/initrd\ninitrd %s" % (fqdn, initrd) else: - initrd = '/images/%s/initrd' % fqdn - config = '''#!ipxe + initrd = "/images/%s/initrd" % fqdn + config = """#!ipxe kernel /images/%s/kernel initrd %s imgargs kernel initrd=initrd %s netboot_method=ipxe BOOTIF=01-${netX/mac:hexhyp} boot || exit 1 -''' % (fqdn, initrd, kernel_options) - logger.debug('Writing ipxe config for %s as %s', fqdn, basename) +""" % ( + fqdn, + initrd, + kernel_options, + ) + logger.debug("Writing ipxe config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(ipxe_dir, basename)) as f: f.write(config) # We also ensure a default config exists that falls back to local boot - write_ignore(os.path.join(ipxe_dir, 'default'), '''#!ipxe + write_ignore( + os.path.join(ipxe_dir, "default"), + """#!ipxe iseq ${builtin/platform} pcbios && sanboot --no-describe --drive 0x80 || exit 1 -''') +""", + ) + def clear_ipxe(fqdn, basedir): """ Removes iPXE bootloader file created by configure_ipxe """ - ipxe_dir = os.path.join(basedir, 'ipxe') + ipxe_dir = os.path.join(basedir, "ipxe") basename = pxe_basename(fqdn).lower() configname = os.path.join(ipxe_dir, basename) - logger.debug('Removing iPXE config for %s as %s', fqdn, basename) + logger.debug("Removing iPXE config for %s as %s", fqdn, basename) unlink_ignore(configname) + ### Bootloader config: EFI GRUB def configure_efigrub(fqdn, kernel_options, basedir): """ @@ -425,27 +472,32 @@ def configure_efigrub(fqdn, kernel_options, basedir): /grub/images -> /images """ - grub_dir = os.path.join(basedir, 'grub') + grub_dir = os.path.join(basedir, "grub") makedirs_ignore(grub_dir, mode=0o755) - atomic_symlink('../images', os.path.join(grub_dir, 'images')) + atomic_symlink("../images", os.path.join(grub_dir, "images")) basename = pxe_basename(fqdn) # Unfortunately the initrd kernel arg needs some special handling. It can be # supplied from the Beaker side (e.g. a system-specific driver disk) but we # also supply the main initrd here which we have fetched from the distro. - initrd, kernel_options = extract_arg('initrd=', kernel_options) + initrd, kernel_options = extract_arg("initrd=", kernel_options) if initrd: - initrd = ' '.join(['/images/%s/initrd' % fqdn] + initrd.split(',')) + initrd = " ".join(["/images/%s/initrd" % fqdn] + initrd.split(",")) else: - initrd = '/images/%s/initrd' % fqdn - config = '''default 0 + initrd = "/images/%s/initrd" % fqdn + config = """default 0 timeout 10 title Beaker scheduled job for %s root (nd) kernel /images/%s/kernel %s netboot_method=efigrub initrd %s -''' % (fqdn, fqdn, kernel_options, initrd) - logger.debug('Writing grub config for %s as %s', fqdn, basename) +""" % ( + fqdn, + fqdn, + kernel_options, + initrd, + ) + logger.debug("Writing grub config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(grub_dir, basename)) as f: f.write(config) @@ -454,9 +506,9 @@ def clear_efigrub(fqdn, basedir): """ Removes bootloader file created by configure_efigrub """ - grub_dir = os.path.join(basedir, 'grub') + grub_dir = os.path.join(basedir, "grub") basename = pxe_basename(fqdn) - logger.debug('Removing grub config for %s as %s', fqdn, basename) + logger.debug("Removing grub config for %s as %s", fqdn, basename) unlink_ignore(os.path.join(grub_dir, basename)) @@ -469,31 +521,33 @@ def configure_zpxe(fqdn, kernel_url, initrd_url, kernel_options, basedir): /s390x/s__parm /s390x/s__conf """ - zpxe_dir = os.path.join(basedir, 's390x') + zpxe_dir = os.path.join(basedir, "s390x") makedirs_ignore(zpxe_dir, mode=0o755) kernel_options = "%s netboot_method=zpxe" % kernel_options # The structure of these files is dictated by zpxe.rexx, # Cobbler's "pseudo-PXE" for zVM on s390(x). # XXX I don't think multiple initrds are supported? - logger.debug('Writing zpxe index file for %s', fqdn) - with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s' % fqdn)) as f: - if get_conf().get('ZPXE_USE_FTP', True): - if not kernel_url.startswith('ftp://') or not initrd_url.startswith('ftp://'): - raise ValueError('zPXE only supports FTP for downloading images') - f.write('%s\n%s\n\n' % (kernel_url, initrd_url)) + logger.debug("Writing zpxe index file for %s", fqdn) + with atomically_replaced_file(os.path.join(zpxe_dir, "s_%s" % fqdn)) as f: + if get_conf().get("ZPXE_USE_FTP", True): + if not kernel_url.startswith("ftp://") or not initrd_url.startswith( + "ftp://" + ): + raise ValueError("zPXE only supports FTP for downloading images") + f.write("%s\n%s\n\n" % (kernel_url, initrd_url)) else: - f.write('/images/%s/kernel\n/images/%s/initrd\n\n' % (fqdn, fqdn)) - logger.debug('Writing zpxe parm file for %s', fqdn) - with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s_parm' % fqdn)) as f: + f.write("/images/%s/kernel\n/images/%s/initrd\n\n" % (fqdn, fqdn)) + logger.debug("Writing zpxe parm file for %s", fqdn) + with atomically_replaced_file(os.path.join(zpxe_dir, "s_%s_parm" % fqdn)) as f: # must be wrapped at 80 columns rest = kernel_options while rest: - f.write(rest[:80] + '\n') + f.write(rest[:80] + "\n") rest = rest[80:] - logger.debug('Writing zpxe conf file for %s', fqdn) - with atomically_replaced_file(os.path.join(zpxe_dir, 's_%s_conf' % fqdn)) as f: - pass # unused, but zpxe.rexx fetches it anyway + logger.debug("Writing zpxe conf file for %s", fqdn) + with atomically_replaced_file(os.path.join(zpxe_dir, "s_%s_conf" % fqdn)) as f: + pass # unused, but zpxe.rexx fetches it anyway def clear_zpxe(fqdn, basedir): @@ -504,8 +558,8 @@ def clear_zpxe(fqdn, basedir): Removed: /s390x/s__parm Removed: /s390x/s__conf """ - zpxe_dir = os.path.join(basedir, 's390x') - configname = os.path.join(zpxe_dir, 's_%s' % fqdn) + zpxe_dir = os.path.join(basedir, "s390x") + configname = os.path.join(zpxe_dir, "s_%s" % fqdn) if not os.path.exists(configname): # Don't create a default zpxe config if we didn't create # a zpxe config for this system @@ -513,11 +567,11 @@ def clear_zpxe(fqdn, basedir): logger.debug('Writing "local" zpxe index file for %s', fqdn) with atomically_replaced_file(configname) as f: - f.write('local\n') # XXX or should we just delete it?? - logger.debug('Removing zpxe parm file for %s', fqdn) - unlink_ignore(os.path.join(zpxe_dir, 's_%s_parm' % fqdn)) - logger.debug('Removing zpxe conf file for %s', fqdn) - unlink_ignore(os.path.join(zpxe_dir, 's_%s_conf' % fqdn)) + f.write("local\n") # XXX or should we just delete it?? + logger.debug("Removing zpxe parm file for %s", fqdn) + unlink_ignore(os.path.join(zpxe_dir, "s_%s_parm" % fqdn)) + logger.debug("Removing zpxe conf file for %s", fqdn) + unlink_ignore(os.path.join(zpxe_dir, "s_%s_conf" % fqdn)) # Bootloader config: EFI Linux (ELILO) @@ -527,9 +581,9 @@ def configure_elilo(fqdn, kernel_options, basedir): /.conf """ - basename = '%s.conf' % pxe_basename(fqdn) + basename = "%s.conf" % pxe_basename(fqdn) # XXX I don't think multiple initrds are supported? - config = '''relocatable + config = """relocatable image=/images/%s/kernel label=netinstall @@ -537,8 +591,12 @@ def configure_elilo(fqdn, kernel_options, basedir): initrd=/images/%s/initrd read-only root=/dev/ram -''' % (fqdn, kernel_options, fqdn) - logger.debug('Writing elilo config for %s as %s', fqdn, basename) +""" % ( + fqdn, + kernel_options, + fqdn, + ) + logger.debug("Writing elilo config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(basedir, basename)) as f: f.write(config) @@ -547,7 +605,7 @@ def clear_elilo(fqdn, basedir): """ Removes bootloader file created by configure_elilo """ - basename = '%s.conf' % pxe_basename(fqdn) + basename = "%s.conf" % pxe_basename(fqdn) unlink_ignore(os.path.join(basedir, basename)) @@ -559,12 +617,12 @@ def configure_yaboot(fqdn, kernel_options, basedir, yaboot_symlink=True): /etc/ /ppc/ -> ../yaboot """ - yaboot_conf_dir = os.path.join(basedir, 'etc') + yaboot_conf_dir = os.path.join(basedir, "etc") makedirs_ignore(yaboot_conf_dir, mode=0o755) basename = pxe_basename(fqdn).lower() # XXX I don't think multiple initrds are supported? - config = '''init-message="Beaker scheduled job for %s" + config = """init-message="Beaker scheduled job for %s" timeout=80 delay=10 default=linux @@ -573,15 +631,20 @@ def configure_yaboot(fqdn, kernel_options, basedir, yaboot_symlink=True): label=linux initrd=/images/%s/initrd append="%s netboot_method=yaboot" -''' % (fqdn, fqdn, fqdn, kernel_options) - logger.debug('Writing yaboot config for %s as %s', fqdn, basename) +""" % ( + fqdn, + fqdn, + fqdn, + kernel_options, + ) + logger.debug("Writing yaboot config for %s as %s", fqdn, basename) with atomically_replaced_file(os.path.join(yaboot_conf_dir, basename)) as f: f.write(config) if yaboot_symlink: - ppc_dir = os.path.join(basedir, 'ppc') + ppc_dir = os.path.join(basedir, "ppc") makedirs_ignore(ppc_dir, mode=0o755) - logger.debug('Creating yaboot symlink for %s as %s', fqdn, basename) - atomic_symlink('../yaboot', os.path.join(ppc_dir, basename)) + logger.debug("Creating yaboot symlink for %s as %s", fqdn, basename) + atomic_symlink("../yaboot", os.path.join(ppc_dir, basename)) def clear_yaboot(fqdn, basedir, yaboot_symlink=True): @@ -589,11 +652,11 @@ def clear_yaboot(fqdn, basedir, yaboot_symlink=True): Removes bootloader file created by configure_yaboot """ basename = pxe_basename(fqdn).lower() - logger.debug('Removing yaboot config for %s as %s', fqdn, basename) - unlink_ignore(os.path.join(basedir, 'etc', basename)) + logger.debug("Removing yaboot config for %s as %s", fqdn, basename) + unlink_ignore(os.path.join(basedir, "etc", basename)) if yaboot_symlink: - logger.debug('Removing yaboot symlink for %s as %s', fqdn, basename) - unlink_ignore(os.path.join(basedir, 'ppc', basename)) + logger.debug("Removing yaboot symlink for %s as %s", fqdn, basename) + unlink_ignore(os.path.join(basedir, "ppc", basename)) # Bootloader config for X86_64 @@ -612,24 +675,24 @@ def configure_x86_64(fqdn, kernel_options, basedir): grub2_conf = "grub.cfg-%s" % pxe_basename(fqdn) - efi_conf_dir = os.path.join(basedir, 'EFI', 'BOOT') + efi_conf_dir = os.path.join(basedir, "EFI", "BOOT") makedirs_ignore(efi_conf_dir, mode=0o755) grub_cfg_file = os.path.join(efi_conf_dir, grub2_conf) - logger.debug('Writing grub2/x86_64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/x86_64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, efi_conf_dir, grub_cfg_file, kernel_options) - x86_64_dir = os.path.join(basedir, 'x86_64') + x86_64_dir = os.path.join(basedir, "x86_64") makedirs_ignore(x86_64_dir, mode=0o755) grub_cfg_file = os.path.join(x86_64_dir, grub2_conf) - logger.debug('Writing grub2/x86_64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/x86_64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, x86_64_dir, grub_cfg_file, kernel_options) # Location can be used as fallback # Mostly old GRUB2 relying on this location - grub2_conf_dir = os.path.join(basedir, 'boot', 'grub2') + grub2_conf_dir = os.path.join(basedir, "boot", "grub2") makedirs_ignore(grub2_conf_dir, mode=0o755) grub_cfg_file = os.path.join(grub2_conf_dir, grub2_conf) - logger.debug('Writing grub2/x86_64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/x86_64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, grub2_conf_dir, grub_cfg_file, kernel_options) @@ -638,19 +701,18 @@ def clear_x86_64(fqdn, basedir): Calls clear_grub2() to remove the machine config file and symlink to the grub2 boot loader """ - x86_64_dir = os.path.join(basedir, 'x86_64') + x86_64_dir = os.path.join(basedir, "x86_64") grub2_config = "grub.cfg-%s" % pxe_basename(fqdn) - logger.debug('Removing grub2/x86_64 config for %s as %s', fqdn, grub2_config) + logger.debug("Removing grub2/x86_64 config for %s as %s", fqdn, grub2_config) clear_grub2(os.path.join(x86_64_dir, grub2_config)) - grub2_conf_dir = os.path.join(basedir, 'boot', 'grub2') + grub2_conf_dir = os.path.join(basedir, "boot", "grub2") - logger.debug('Removing grub2/x86_64 config for %s as %s', fqdn, grub2_config) + logger.debug("Removing grub2/x86_64 config for %s as %s", fqdn, grub2_config) clear_grub2(os.path.join(grub2_conf_dir, grub2_config)) - # Bootloader config for PPC64 def configure_ppc64(fqdn, kernel_options, basedir): """ @@ -667,11 +729,11 @@ def configure_ppc64(fqdn, kernel_options, basedir): """ - ppc_dir = os.path.join(basedir, 'ppc') + ppc_dir = os.path.join(basedir, "ppc") makedirs_ignore(ppc_dir, mode=0o755) grub_cfg_file = os.path.join(ppc_dir, "grub.cfg-%s" % pxe_basename(fqdn)) - logger.debug('Writing grub2/ppc64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/ppc64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, ppc_dir, grub_cfg_file, kernel_options) # The following two hacks are to accommodate the differences in behavior @@ -681,21 +743,22 @@ def configure_ppc64(fqdn, kernel_options, basedir): # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1144106 # hack for older grub - grub2_conf_dir = os.path.join(basedir, 'boot', 'grub2') + grub2_conf_dir = os.path.join(basedir, "boot", "grub2") makedirs_ignore(grub2_conf_dir, mode=0o755) grub_cfg_file = os.path.join(grub2_conf_dir, "grub.cfg-%s" % pxe_basename(fqdn)) - logger.debug('Writing grub2/ppc64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/ppc64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, grub2_conf_dir, grub_cfg_file, kernel_options) # hack for power VMs grub_cfg_file = os.path.join(basedir, "grub.cfg-%s" % pxe_basename(fqdn)) - logger.debug('Writing grub2/ppc64 config for %s as %s', fqdn, grub_cfg_file) + logger.debug("Writing grub2/ppc64 config for %s as %s", fqdn, grub_cfg_file) configure_grub2(fqdn, ppc_dir, grub_cfg_file, kernel_options) - grub2_symlink = '%s-grub2' % pxe_basename(fqdn).lower() - logger.debug('Creating grub2 symlink for %s as %s', fqdn, grub2_symlink) - atomic_symlink('../boot/grub2/powerpc-ieee1275/core.elf', - os.path.join(ppc_dir, grub2_symlink)) + grub2_symlink = "%s-grub2" % pxe_basename(fqdn).lower() + logger.debug("Creating grub2 symlink for %s as %s", fqdn, grub2_symlink) + atomic_symlink( + "../boot/grub2/powerpc-ieee1275/core.elf", os.path.join(ppc_dir, grub2_symlink) + ) def clear_ppc64(fqdn, basedir): @@ -703,22 +766,22 @@ def clear_ppc64(fqdn, basedir): Calls clear_grub2() to remove the machine config file and symlink to the grub2 boot loader """ - ppc_dir = os.path.join(basedir, 'ppc') + ppc_dir = os.path.join(basedir, "ppc") grub2_config = "grub.cfg-%s" % pxe_basename(fqdn) - logger.debug('Removing grub2/ppc64 config for %s as %s', fqdn, grub2_config) + logger.debug("Removing grub2/ppc64 config for %s as %s", fqdn, grub2_config) clear_grub2(os.path.join(ppc_dir, grub2_config)) - grub2_symlink = '%s-grub2' % pxe_basename(fqdn).lower() - logger.debug('Removing grub2 symlink for %s as %s', fqdn, grub2_symlink) + grub2_symlink = "%s-grub2" % pxe_basename(fqdn).lower() + logger.debug("Removing grub2 symlink for %s as %s", fqdn, grub2_symlink) clear_grub2(os.path.join(ppc_dir, grub2_symlink)) # clear the files which were created as a result of the hacks # mentioned in configure_ppc64() - grub2_conf_dir = os.path.join(basedir, 'boot', 'grub2') + grub2_conf_dir = os.path.join(basedir, "boot", "grub2") grub2_config = "grub.cfg-%s" % pxe_basename(fqdn) - logger.debug('Removing grub2/ppc64 config for %s as %s', fqdn, grub2_config) + logger.debug("Removing grub2/ppc64 config for %s as %s", fqdn, grub2_config) clear_grub2(os.path.join(grub2_conf_dir, grub2_config)) grub2_config = "grub.cfg-%s" % pxe_basename(fqdn) - logger.debug('Removing grub2/ppc64 config for %s as %s', fqdn, grub2_config) + logger.debug("Removing grub2/ppc64 config for %s as %s", fqdn, grub2_config) clear_grub2(os.path.join(basedir, grub2_config)) @@ -728,20 +791,31 @@ def configure_petitboot(fqdn, ko, basedir): basedir/bootloader//petitboot.cfg """ - config = '''default Beaker scheduled job for %s + config = """default Beaker scheduled job for %s label Beaker scheduled job for %s kernel ::/images/%s/kernel initrd ::/images/%s/initrd append %s netboot_method=petitboot -''' % (fqdn, fqdn, fqdn, fqdn, ko) +""" % ( + fqdn, + fqdn, + fqdn, + fqdn, + ko, + ) if not basedir: basedir = get_tftp_root() - petitboot_conf_dir = os.path.join(basedir, 'bootloader', fqdn) + petitboot_conf_dir = os.path.join(basedir, "bootloader", fqdn) makedirs_ignore(petitboot_conf_dir, mode=0o755) - logger.debug('Writing petitboot config for %s as %s', fqdn, - os.path.join(petitboot_conf_dir, 'petitboot.cfg')) - with atomically_replaced_file(os.path.join(petitboot_conf_dir, 'petitboot.cfg')) as f: + logger.debug( + "Writing petitboot config for %s as %s", + fqdn, + os.path.join(petitboot_conf_dir, "petitboot.cfg"), + ) + with atomically_replaced_file( + os.path.join(petitboot_conf_dir, "petitboot.cfg") + ) as f: f.write(config) @@ -751,8 +825,9 @@ def clear_petitboot(fqdn, basedir): """ if not basedir: basedir = get_tftp_root() - petitboot_conf_dir = os.path.join(basedir, 'bootloader', fqdn) - unlink_ignore(os.path.join(petitboot_conf_dir, 'petitboot.cfg')) + petitboot_conf_dir = os.path.join(basedir, "bootloader", fqdn) + unlink_ignore(os.path.join(petitboot_conf_dir, "petitboot.cfg")) + # Mass configuration @@ -767,8 +842,7 @@ def clear_petitboot(fqdn, basedir): # clutter on the TFTP server. -class Bootloader(collections.namedtuple("Bootloader", - "name configure clear arches")): +class Bootloader(collections.namedtuple("Bootloader", "name configure clear arches")): def __repr__(self): return "Bootloader(%r)" % self.name @@ -787,10 +861,8 @@ def add_bootloader(name, configure, clear, arches=None): add_bootloader("ipxe", configure_ipxe, clear_ipxe) add_bootloader("efigrub", configure_efigrub, clear_efigrub) add_bootloader("yaboot", configure_yaboot, clear_yaboot) -add_bootloader("grub2", configure_ppc64, clear_ppc64, - set(["ppc64", "ppc64le"])) -add_bootloader("grub2_x86_64", configure_x86_64, clear_x86_64, - set(["x86_64"])) +add_bootloader("grub2", configure_ppc64, clear_ppc64, set(["ppc64", "ppc64le"])) +add_bootloader("grub2_x86_64", configure_x86_64, clear_x86_64, set(["x86_64"])) add_bootloader("elilo", configure_elilo, clear_elilo) add_bootloader("armlinux", configure_armlinux, clear_armlinux) add_bootloader("aarch64", configure_aarch64, clear_aarch64, set(["aarch64"])) @@ -800,9 +872,9 @@ def add_bootloader(name, configure, clear, arches=None): # Custom bootloader stuff def configure_netbootloader_directory(fqdn, fqdn_dir, kernel_options): - logger.debug('Creating custom netbootloader tree for %s in %s', fqdn, fqdn_dir) + logger.debug("Creating custom netbootloader tree for %s in %s", fqdn, fqdn_dir) makedirs_ignore(fqdn_dir, mode=0o755) - grub2_cfg_file = os.path.join(fqdn_dir, 'grub.cfg-%s' % pxe_basename(fqdn)) + grub2_cfg_file = os.path.join(fqdn_dir, "grub.cfg-%s" % pxe_basename(fqdn)) configure_grub2(fqdn, fqdn_dir, grub2_cfg_file, kernel_options) configure_pxelinux(fqdn, kernel_options, fqdn_dir, symlink=True) configure_ipxe(fqdn, kernel_options, fqdn_dir) @@ -810,36 +882,42 @@ def configure_netbootloader_directory(fqdn, fqdn_dir, kernel_options): def clear_netbootloader_directory(fqdn): - fqdn_dir = os.path.join(get_tftp_root(), 'bootloader', fqdn) - logger.debug('Removing custom netbootloader config for %s from %s', fqdn, fqdn_dir) - unlink_ignore(os.path.join(fqdn_dir, 'image')) - grub2_cfg_file = os.path.join(fqdn_dir, 'grub.cfg-%s'%pxe_basename(fqdn)) + fqdn_dir = os.path.join(get_tftp_root(), "bootloader", fqdn) + logger.debug("Removing custom netbootloader config for %s from %s", fqdn, fqdn_dir) + unlink_ignore(os.path.join(fqdn_dir, "image")) + grub2_cfg_file = os.path.join(fqdn_dir, "grub.cfg-%s" % pxe_basename(fqdn)) clear_grub2(grub2_cfg_file) clear_pxelinux(fqdn, fqdn_dir) clear_ipxe(fqdn, fqdn_dir) clear_yaboot(fqdn, fqdn_dir, yaboot_symlink=False) -def configure_all(fqdn, arch, distro_tree_id, - kernel_url, initrd_url, kernel_options, - image_url, basedir=None): +def configure_all( + fqdn, + arch, + distro_tree_id, + kernel_url, + initrd_url, + kernel_options, + image_url, + basedir=None, +): """Configure images and all bootloader files for given fqdn""" fetch_images(distro_tree_id, kernel_url, initrd_url, fqdn) if not basedir: basedir = get_tftp_root() - netbootloader, kernel_options = extract_arg('netbootloader=', - kernel_options) + netbootloader, kernel_options = extract_arg("netbootloader=", kernel_options) for bootloader in BOOTLOADERS.values(): if bootloader.arches and arch not in bootloader.arches: # Arch constrained bootloader and this system doesn't match continue bootloader.configure(fqdn, kernel_options, basedir) - if arch == 's390' or arch == 's390x': + if arch == "s390" or arch == "s390x": configure_zpxe(fqdn, kernel_url, initrd_url, kernel_options, basedir) # Custom boot loader code tftp_root = get_tftp_root() - fqdn_dir = os.path.join(tftp_root, 'bootloader', fqdn) + fqdn_dir = os.path.join(tftp_root, "bootloader", fqdn) if image_url or netbootloader: configure_netbootloader_directory(fqdn, fqdn_dir, kernel_options) diff --git a/LabController/src/bkr/labcontroller/provision.py b/LabController/src/bkr/labcontroller/provision.py index bb8ff924a..c24d85cf5 100644 --- a/LabController/src/bkr/labcontroller/provision.py +++ b/LabController/src/bkr/labcontroller/provision.py @@ -1,48 +1,54 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import sys -import os, os.path +import datetime import errno import logging -import time +import os +import os.path import random import signal +import subprocess +import sys +import time +from optparse import OptionParser + import daemon -import datetime +import gevent +import gevent.event +import gevent.hub +import gevent.monkey +import gevent.socket import pkg_resources -import subprocess -import xmlrpclib +import six from daemon import pidfile -from optparse import OptionParser -import gevent, gevent.hub, gevent.socket, gevent.event, gevent.monkey -from bkr.labcontroller.exceptions import ShutdownException -from bkr.log import log_to_stream, log_to_syslog -from bkr.common.helpers import SensitiveUnicode, total_seconds -from bkr.labcontroller.config import load_conf, get_conf -from bkr.labcontroller.proxy import ProxyHelper +from six.moves import xmlrpc_client + +from bkr.common.helpers import SensitiveUnicode from bkr.labcontroller import netboot -import utils +from bkr.labcontroller.config import get_conf, load_conf +from bkr.labcontroller.proxy import ProxyHelper +from bkr.labcontroller.utils import get_console_files +from bkr.log import log_to_stream, log_to_syslog logger = logging.getLogger(__name__) -class CommandQueuePoller(ProxyHelper): +class CommandQueuePoller(ProxyHelper): def __init__(self, *args, **kwargs): super(CommandQueuePoller, self).__init__(*args, **kwargs) - self.commands = {} #: dict of (id -> command info) for running commands - self.greenlets = {} #: dict of (command id -> greenlet which is running it) - self.last_command_datetime = {} # Last time a command was run against a system. + self.commands = {} #: dict of (id -> command info) for running commands + self.greenlets = {} #: dict of (command id -> greenlet which is running it) + self.last_command_datetime = {} # Last time a command was run against a system. def get_queued_commands(self): try: commands = self.hub.labcontrollers.get_queued_command_details() - except xmlrpclib.Fault as fault: - if 'Anonymous access denied' in fault.faultString: - logger.debug('Session expired, re-authenticating') + except xmlrpc_client.Fault as fault: + if "Anonymous access denied" in fault.faultString: + logger.debug("Session expired, re-authenticating") self.hub._login() commands = self.hub.labcontrollers.get_queued_command_details() else: @@ -50,17 +56,22 @@ def get_queued_commands(self): for command in commands: # The 'is not None' check is important as we do not want to # stringify the None type - if 'power' in command and 'passwd' in command['power'] and \ - command['power']['passwd'] is not None: - command['power']['passwd'] = SensitiveUnicode(command['power']['passwd']) + if ( + "power" in command + and "passwd" in command["power"] + and command["power"]["passwd"] is not None + ): + command["power"]["passwd"] = SensitiveUnicode( + command["power"]["passwd"] + ) return commands def get_running_command_ids(self): try: ids = self.hub.labcontrollers.get_running_command_ids() - except xmlrpclib.Fault as fault: - if 'Anonymous access denied' in fault.faultString: - logger.debug('Session expired, re-authenticating') + except xmlrpc_client.Fault as fault: + if "Anonymous access denied" in fault.faultString: + logger.debug("Session expired, re-authenticating") self.hub._login() ids = self.hub.labcontrollers.get_running_command_ids() else: @@ -89,133 +100,172 @@ def clear_orphaned_commands(self): self.mark_command_aborted(id, "Command orphaned, aborting") def poll(self): - logger.debug('Clearing orphaned commands') + logger.debug("Clearing orphaned commands") self.clear_orphaned_commands() - logger.debug('Polling for queued commands') + logger.debug("Polling for queued commands") for command in self.get_queued_commands(): - if command['id'] in self.commands: + if command["id"] in self.commands: # We've already seen it, ignore continue # This command has to wait for any other existing commands against the # same system, to prevent collisions - predecessors = [self.greenlets[c['id']] - for c in self.commands.itervalues() - if c['fqdn'] == command['fqdn']] - if 'power' in command and command['power'].get('address'): + predecessors = [ + self.greenlets[c["id"]] + for c in six.itervalues(self.commands) + if c["fqdn"] == command["fqdn"] + ] + if "power" in command and command["power"].get("address"): # Also wait for other commands running against the same power address - predecessors.extend(self.greenlets[c['id']] - for c in self.commands.itervalues() - if 'power' in c and c['power'].get('address') - == command['power']['address']) + predecessors.extend( + self.greenlets[c["id"]] + for c in six.itervalues(self.commands) + if "power" in c + and c["power"].get("address") == command["power"]["address"] + ) self.spawn_handler(command, predecessors) def spawn_handler(self, command, predecessors): - self.commands[command['id']] = command + self.commands[command["id"]] = command greenlet = gevent.spawn(self.handle, command, predecessors) - self.greenlets[command['id']] = greenlet + self.greenlets[command["id"]] = greenlet + def completion_callback(greenlet): if greenlet.exception: - logger.error('Command handler %r had unhandled exception: %r', - greenlet, greenlet.exception) - del self.commands[command['id']] - del self.greenlets[command['id']] + logger.error( + "Command handler %r had unhandled exception: %r", + greenlet, + greenlet.exception, + ) + del self.commands[command["id"]] + del self.greenlets[command["id"]] + greenlet.link(completion_callback) def handle(self, command, predecessors): - if command.get('delay'): + if command.get("delay"): # Before anything else, we need to wait for our delay period. # Instead of just doing time.sleep we do a timed wait on # shutting_down, so that our delay doesn't hold up the shutdown. - logger.debug('Delaying %s seconds for command %s', - command['delay'], command['id']) - if shutting_down.wait(timeout=command['delay']): + logger.debug( + "Delaying %s seconds for command %s", command["delay"], command["id"] + ) + if shutting_down.wait(timeout=command["delay"]): return gevent.joinall(predecessors) if shutting_down.is_set(): return - quiescent_period = command.get('quiescent_period') + quiescent_period = command.get("quiescent_period") if quiescent_period: - system_fqdn = command.get('fqdn') + system_fqdn = command.get("fqdn") last_command_finished_at = self.last_command_datetime.get(system_fqdn) if last_command_finished_at: # Get the difference between the time now and the number of # seconds until we can run another command - seconds_to_wait = total_seconds((last_command_finished_at + - datetime.timedelta(seconds=quiescent_period)) - - datetime.datetime.utcnow()) + seconds_to_wait = ( + ( + last_command_finished_at + + datetime.timedelta(seconds=quiescent_period) + ) + - datetime.datetime.utcnow() + ).total_seconds() else: # Play it safe, wait for the whole period. seconds_to_wait = quiescent_period if seconds_to_wait > 0: - logger.debug('Entering quiescent period, delaying %s seconds for' - ' command %s' % (seconds_to_wait, command['id'])) + logger.debug( + "Entering quiescent period, delaying %s seconds for" + " command %s" % (seconds_to_wait, command["id"]) + ) if shutting_down.wait(timeout=seconds_to_wait): return - logger.debug('Handling command %r', command) - self.mark_command_running(command['id']) + logger.debug("Handling command %r", command) + self.mark_command_running(command["id"]) try: - if command['action'] in (u'on', u'off', 'interrupt'): + if command["action"] in ("on", "off", "interrupt"): handle_power(self.conf, command) - elif command['action'] == u'reboot': - # For backwards compatibility only. The server now splits + elif command["action"] == "reboot": + # For backwards compatibility only. The server now splits # reboots into 'off' followed by 'on'. - handle_power(self.conf, dict(command.items() + [('action', u'off')])) + handle_power( + self.conf, dict(list(command.items()) + [("action", "off")]) + ) time.sleep(5) - handle_power(self.conf, dict(command.items() + [('action', u'on')])) - elif command['action'] == u'clear_logs': + handle_power( + self.conf, dict(list(command.items()) + [("action", "on")]) + ) + elif command["action"] == "clear_logs": handle_clear_logs(self.conf, command) - elif command['action'] == u'configure_netboot': + elif command["action"] == "configure_netboot": handle_configure_netboot(command) - elif command['action'] == u'clear_netboot': + elif command["action"] == "clear_netboot": handle_clear_netboot(command) else: - raise ValueError('Unrecognised action %s' % command['action']) + raise ValueError("Unrecognised action %s" % command["action"]) # XXX or should we just ignore it and leave it queued? except netboot.ImageFetchingError as e: - logger.exception('Error processing command %s', command['id']) + logger.exception("Error processing command %s", command["id"]) # It's not the system's fault so don't mark it as broken - self.mark_command_failed(command['id'], unicode(e), False) - except Exception, e: - logger.exception('Error processing command %s', command['id']) - self.mark_command_failed(command['id'], - '%s: %s' % (e.__class__.__name__, e), True) + self.mark_command_failed(command["id"], six.text_type(e), False) + except Exception as e: + logger.exception("Error processing command %s", command["id"]) + self.mark_command_failed( + command["id"], "%s: %s" % (e.__class__.__name__, e), True + ) else: - self.mark_command_completed(command['id']) + self.mark_command_completed(command["id"]) finally: if quiescent_period: - self.last_command_datetime[command['fqdn']] = datetime.datetime.utcnow() - logger.debug('Finished handling command %s', command['id']) + self.last_command_datetime[command["fqdn"]] = datetime.datetime.utcnow() + logger.debug("Finished handling command %s", command["id"]) + def find_power_script(power_type): - customised = '/etc/beaker/power-scripts/%s' % power_type + customised = "/etc/beaker/power-scripts/%s" % power_type if os.path.exists(customised) and os.access(customised, os.X_OK): return customised - resource = 'power-scripts/%s' % power_type - if pkg_resources.resource_exists('bkr.labcontroller', resource): - return pkg_resources.resource_filename('bkr.labcontroller', resource) - raise ValueError('Invalid power type %r' % power_type) + resource = "power-scripts/%s" % power_type + if pkg_resources.resource_exists("bkr.labcontroller", resource): + return pkg_resources.resource_filename("bkr.labcontroller", resource) + raise ValueError("Invalid power type %r" % power_type) + + +def _decode(value): + # Decode if we are running python2 and value is unicode + if six.PY2 and isinstance(value, six.text_type): + return value.encode("utf8") + return value + def build_power_env(command): env = dict(os.environ) - env['power_address'] = (command['power'].get('address') or u'').encode('utf8') - env['power_id'] = (command['power'].get('id') or u'').encode('utf8') - env['power_user'] = (command['power'].get('user') or u'').encode('utf8') - env['power_pass'] = (command['power'].get('passwd') or u'').encode('utf8') - env['power_mode'] = command['action'].encode('utf8') + power_mapping = { + "address": "power_address", + "id": "power_id", + "user": "power_user", + "passwd": "power_pass", + } + + for k, v in six.iteritems(power_mapping): + env[v] = _decode(command["power"].get(k, "")) + + env["power_mode"] = _decode(command["action"]) + return env + def handle_clear_logs(conf, command): - for filename, _ in utils.get_console_files( - console_logs_directory=conf['CONSOLE_LOGS'], system_name=command['fqdn']): + for filename, _ in get_console_files( + console_logs_directory=conf["CONSOLE_LOGS"], system_name=command["fqdn"] + ): truncate_logfile(filename) def truncate_logfile(console_log): - logger.debug('Truncating console log %s', console_log) + logger.debug("Truncating console log %s", console_log) try: - f = open(console_log, 'r+') - except IOError, e: + f = open(console_log, "r+") + except IOError as e: if e.errno != errno.ENOENT: raise else: @@ -223,59 +273,75 @@ def truncate_logfile(console_log): def handle_configure_netboot(command): - netboot.configure_all(command['fqdn'], - command['netboot']['arch'], - command['netboot']['distro_tree_id'], - command['netboot']['kernel_url'], - command['netboot']['initrd_url'], - command['netboot']['kernel_options'], - command['netboot']['image_url']) + netboot.configure_all( + command["fqdn"], + command["netboot"]["arch"], + command["netboot"]["distro_tree_id"], + command["netboot"]["kernel_url"], + command["netboot"]["initrd_url"], + command["netboot"]["kernel_options"], + command["netboot"]["image_url"], + ) + def handle_clear_netboot(command): - netboot.clear_all(command['fqdn']) + netboot.clear_all(command["fqdn"]) + def handle_power(conf, command): - from bkr.labcontroller.async import MonitoredSubprocess - script = find_power_script(command['power']['type']) + from bkr.labcontroller.concurrency import MonitoredSubprocess + + script = find_power_script(command["power"]["type"]) env = build_power_env(command) # We try the command up to 5 times, because some power commands # are flakey (apparently)... - for attempt in range(1, conf['POWER_ATTEMPTS'] + 1): + for attempt in range(1, conf["POWER_ATTEMPTS"] + 1): if attempt > 1: # After the first attempt fails we do a randomised exponential # backoff in the style of Ethernet. # Instead of just doing time.sleep we do a timed wait on # shutting_down, so that our delay doesn't hold up the shutdown. delay = random.uniform(attempt, 2**attempt) - logger.debug('Backing off %0.3f seconds for power command %s', - delay, command['id']) + logger.debug( + "Backing off %0.3f seconds for power command %s", delay, command["id"] + ) if shutting_down.wait(timeout=delay): break - logger.debug('Launching power script %s (attempt %s) with env %r', - script, attempt, env) + logger.debug( + "Launching power script %s (attempt %s) with env %r", script, attempt, env + ) # N.B. the timeout value used here affects daemon shutdown time, # make sure the init script is kept up to date! - p = MonitoredSubprocess([script], env=env, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - timeout=300) - logger.debug('Waiting on power script pid %s', p.pid) + p = MonitoredSubprocess( + [script], + env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=300, + ) + logger.debug("Waiting on power script pid %s", p.pid) p.dead.wait() output = p.stdout_reader.get() if p.returncode == 0 or shutting_down.is_set(): break if p.returncode != 0: sanitised_output = output[:150].strip() - if command['power'].get('passwd'): + if command["power"].get("passwd"): sanitised_output = sanitised_output.replace( - command['power']['passwd'], '********') - raise ValueError('Power script %s failed after %s attempts with exit status %s:\n%s' - % (script, attempt, p.returncode, sanitised_output)) + command["power"]["passwd"], "********" + ) + raise ValueError( + "Power script %s failed after %s attempts with exit status %s:\n%s" + % (script, attempt, p.returncode, sanitised_output) + ) # TODO submit complete stdout and stderr? + def shutdown_handler(signum, frame): - logger.info('Received signal %s, shutting down', signum) + logger.info("Received signal %s, shutting down", signum) shutting_down.set() + def main_loop(poller=None, conf=None): global shutting_down shutting_down = gevent.event.Event() @@ -285,31 +351,35 @@ def main_loop(poller=None, conf=None): signal.signal(signal.SIGINT, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) - logger.debug('Copying default boot loader images') + logger.debug("Copying default boot loader images") netboot.copy_default_loader_images() - logger.debug('Clearing old running commands') - poller.clear_running_commands(u'Stale command cleared on startup') + logger.debug("Clearing old running commands") + poller.clear_running_commands("Stale command cleared on startup") - logger.debug('Entering main provision loop') + logger.debug("Entering main provision loop") while True: try: poller.poll() - except: - logger.exception('Failed to poll for queued commands') - if shutting_down.wait(timeout=conf.get('SLEEP_TIME', 20)): - gevent.hub.get_hub().join() # let running greenlets terminate + except: # noqa + logger.exception("Failed to poll for queued commands") + if shutting_down.wait(timeout=conf.get("SLEEP_TIME", 20)): + gevent.hub.get_hub().join() # let running greenlets terminate break - logger.debug('Exited main provision loop') + logger.debug("Exited main provision loop") + def main(): parser = OptionParser() - parser.add_option("-c", "--config", - help="Full path to config file to use") - parser.add_option("-f", "--foreground", default=False, action="store_true", - help="run in foreground (do not spawn a daemon)") - parser.add_option("-p", "--pid-file", - help="specify a pid file") + parser.add_option("-c", "--config", help="Full path to config file to use") + parser.add_option( + "-f", + "--foreground", + default=False, + action="store_true", + help="run in foreground (do not spawn a daemon)", + ) + parser.add_option("-p", "--pid-file", help="specify a pid file") (opts, args) = parser.parse_args() if opts.config: load_conf(opts.config) @@ -318,16 +388,18 @@ def main(): conf = get_conf() pid_file = opts.pid_file if pid_file is None: - pid_file = conf.get("PROVISION_PID_FILE", "/var/run/beaker-lab-controller/beaker-provision.pid") + pid_file = conf.get( + "PROVISION_PID_FILE", "/var/run/beaker-lab-controller/beaker-provision.pid" + ) - # HubProxy will try to log some stuff, even though we - # haven't configured our logging handlers yet. So we send logs to stderr + # HubProxy will try to log some stuff, even though we + # haven't configured our logging handlers yet. So we send logs to stderr # temporarily here, and configure it again below. log_to_stream(sys.stderr, level=logging.WARNING) try: poller = CommandQueuePoller(conf=conf) - except Exception, ex: - sys.stderr.write('Error starting beaker-provision: %s\n' % ex) + except Exception as ex: + sys.stderr.write("Error starting beaker-provision: %s\n" % ex) sys.exit(1) if opts.foreground: @@ -336,14 +408,17 @@ def main(): else: # See BZ#977269 poller.close() - with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile( - pid_file, acquire_timeout=0),detach_process=True): - log_to_syslog('beaker-provision') + with daemon.DaemonContext( + pidfile=pidfile.TimeoutPIDLockFile(pid_file, acquire_timeout=0), + detach_process=True, + ): + log_to_syslog("beaker-provision") try: main_loop(poller=poller, conf=conf) except Exception: - logger.exception('Unhandled exception in main_loop') + logger.exception("Unhandled exception in main_loop") raise -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/LabController/src/bkr/labcontroller/proxy.py b/LabController/src/bkr/labcontroller/proxy.py index 24d8c1d14..b1dbf5c1d 100644 --- a/LabController/src/bkr/labcontroller/proxy.py +++ b/LabController/src/bkr/labcontroller/proxy.py @@ -1,50 +1,52 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. +import base64 import errno -import os -import sys +import json import logging -import time -import base64 -import lxml.etree +import os import re -import json +import shlex import shutil -import tempfile -import xmlrpclib import subprocess +import tempfile +import time +from xml.sax.saxutils import escape as xml_escape +from xml.sax.saxutils import quoteattr as xml_quoteattr + +import lxml.etree import pkg_resources -import shlex -from xml.sax.saxutils import escape as xml_escape, quoteattr as xml_quoteattr -from werkzeug.wrappers import Response -from werkzeug.exceptions import BadRequest, NotAcceptable, NotFound, \ - LengthRequired, UnsupportedMediaType, Conflict -from werkzeug.utils import redirect +from six.moves import xmlrpc_client +from werkzeug.exceptions import ( + BadRequest, + Conflict, + LengthRequired, + NotAcceptable, + NotFound, + UnsupportedMediaType, +) from werkzeug.http import parse_content_range_header +from werkzeug.utils import redirect +from werkzeug.wrappers import Response from werkzeug.wsgi import wrap_file + from bkr.common.hub import HubProxy from bkr.labcontroller.config import get_conf from bkr.labcontroller.log_storage import LogStorage -import utils -try: - #pylint: disable=E0611 - from subprocess import check_output -except ImportError: - from utils import check_output +from bkr.labcontroller.utils import get_console_files + logger = logging.getLogger(__name__) + def replace_with_blanks(match): - return ' ' * (match.end() - match.start() - 1) + '\n' + return " " * (match.end() - match.start() - 1) + "\n" class ProxyHelper(object): - - def __init__(self, conf=None, hub=None, **kwargs): self.conf = get_conf() @@ -62,92 +64,86 @@ def __init__(self, conf=None, hub=None, **kwargs): # self.hub is created here self.hub = hub if self.hub is None: - self.hub = HubProxy(logger=logging.getLogger('bkr.common.hub.HubProxy'), conf=self.conf, - **kwargs) - self.log_storage = LogStorage(self.conf.get("CACHEPATH"), - "%s://%s/beaker/logs" % (self.conf.get('URL_SCHEME', - 'http'), self.conf.get_url_domain()), - self.hub) + self.hub = HubProxy( + logger=logging.getLogger("bkr.common.hub.HubProxy"), + conf=self.conf, + **kwargs + ) + self.log_storage = LogStorage( + self.conf.get("CACHEPATH"), + "%s://%s/beaker/logs" + % (self.conf.get("URL_SCHEME", "http"), self.conf.get_url_domain()), + self.hub, + ) def close(self): - if sys.version_info >= (2, 7): - self.hub._hub('close')() - - def recipe_upload_file(self, - recipe_id, - path, - name, - size, - md5sum, - offset, - data): - """ Upload a file in chunks - path: the relative path to upload to - name: the name of the file - size: size of the contents (bytes) - md5: md5sum (hex digest) of contents - data: base64 encoded file contents - offset: the offset of the chunk - Files can be uploaded in chunks, if so the md5 and the size - describe the chunk rather than the whole file. The offset - indicates where the chunk belongs + self.hub._hub("close")() + + def recipe_upload_file(self, recipe_id, path, name, size, md5sum, offset, data): + """Upload a file in chunks + path: the relative path to upload to + name: the name of the file + size: size of the contents (bytes) + md5: md5sum (hex digest) of contents + data: base64 encoded file contents + offset: the offset of the chunk + Files can be uploaded in chunks, if so the md5 and the size + describe the chunk rather than the whole file. The offset + indicates where the chunk belongs """ # Originally offset=-1 had special meaning, but that was unused - logger.debug("recipe_upload_file recipe_id:%s name:%s offset:%s size:%s", - recipe_id, name, offset, size) - with self.log_storage.recipe(str(recipe_id), os.path.join(path, name)) as log_file: + logger.debug( + "recipe_upload_file recipe_id:%s name:%s offset:%s size:%s", + recipe_id, + name, + offset, + size, + ) + with self.log_storage.recipe( + str(recipe_id), os.path.join(path, name) + ) as log_file: log_file.update_chunk(base64.decodestring(data), int(offset or 0)) return True - def task_result(self, - task_id, - result_type, - result_path=None, - result_score=None, - result_summary=None): - """ report a result to the scheduler """ + def task_result( + self, + task_id, + result_type, + result_path=None, + result_score=None, + result_summary=None, + ): + """report a result to the scheduler""" logger.debug("task_result %s", task_id) - return self.hub.recipes.tasks.result(task_id, - result_type, - result_path, - result_score, - result_summary) - - def task_info(self, - qtask_id): - """ accepts qualified task_id J:213 RS:1234 R:312 T:1234 etc.. Returns dict with status """ + return self.hub.recipes.tasks.result( + task_id, result_type, result_path, result_score, result_summary + ) + + def task_info(self, qtask_id): + """accepts qualified task_id J:213 RS:1234 R:312 T:1234 etc.. Returns dict with status""" logger.debug("task_info %s", qtask_id) return self.hub.taskactions.task_info(qtask_id) - def recipe_stop(self, - recipe_id, - stop_type, - msg=None): - """ tell the scheduler that we are stopping this recipe - stop_type = ['abort', 'cancel'] - msg to record + def recipe_stop(self, recipe_id, stop_type, msg=None): + """tell the scheduler that we are stopping this recipe + stop_type = ['abort', 'cancel'] + msg to record """ logger.debug("recipe_stop %s", recipe_id) return self.hub.recipes.stop(recipe_id, stop_type, msg) - def recipeset_stop(self, - recipeset_id, - stop_type, - msg=None): - """ tell the scheduler that we are stopping this recipeset - stop_type = ['abort', 'cancel'] - msg to record + def recipeset_stop(self, recipeset_id, stop_type, msg=None): + """tell the scheduler that we are stopping this recipeset + stop_type = ['abort', 'cancel'] + msg to record """ logger.debug("recipeset_stop %s", recipeset_id) return self.hub.recipesets.stop(recipeset_id, stop_type, msg) - def job_stop(self, - job_id, - stop_type, - msg=None): - """ tell the scheduler that we are stopping this job - stop_type = ['abort', 'cancel'] - msg to record + def job_stop(self, job_id, stop_type, msg=None): + """tell the scheduler that we are stopping this job + stop_type = ['abort', 'cancel'] + msg to record """ logger.debug("job_stop %s", job_id) return self.hub.jobs.stop(job_id, stop_type, msg) @@ -157,23 +153,21 @@ def get_my_recipe(self, request): Accepts a dict with key 'recipe_id'. Returns an XML document for the recipe with that id. """ - if 'recipe_id' in request: - logger.debug("get_recipe recipe_id:%s", request['recipe_id']) - return self.hub.recipes.to_xml(request['recipe_id']) + if "recipe_id" in request: + logger.debug("get_recipe recipe_id:%s", request["recipe_id"]) + return self.hub.recipes.to_xml(request["recipe_id"]) def get_peer_roles(self, task_id): - logger.debug('get_peer_roles %s', task_id) + logger.debug("get_peer_roles %s", task_id) return self.hub.recipes.tasks.peer_roles(task_id) def extend_watchdog(self, task_id, kill_time): - """ tell the scheduler to extend the watchdog by kill_time seconds - """ + """tell the scheduler to extend the watchdog by kill_time seconds""" logger.debug("extend_watchdog %s %s", task_id, kill_time) return self.hub.recipes.tasks.extend(task_id, kill_time) def task_to_dict(self, task_name): - """ returns metadata about task_name from the TaskLibrary - """ + """returns metadata about task_name from the TaskLibrary""" return self.hub.tasks.to_dict(task_name) def get_console_log(self, recipe_id, length=None): @@ -182,25 +176,27 @@ def get_console_log(self, recipe_id, length=None): """ return self.hub.recipes.console_output(recipe_id, length) + class ConsoleLogHelper(object): """ Helper class to watch console log outputs and upload them to Scheduler """ + blocksize = 65536 def __init__(self, watchdog, proxy, panic, logfile_name=None): self.watchdog = watchdog self.proxy = proxy self.logfile_name = logfile_name if logfile_name is not None else "console.log" - self.strip_ansi = re.compile("(\033\[[0-9;\?]*[ABCDHfsnuJKmhr])") - ascii_control_chars = map(chr, range(0, 32) + [127]) - keep_chars = '\t\n' + self.strip_ansi = re.compile(r"(\033\[[0-9;\?]*[ABCDHfsnuJKmhr])") + ascii_control_chars = list(map(chr, list(range(0, 32)) + [127])) + keep_chars = "\t\n" strip_control_chars = [c for c in ascii_control_chars if c not in keep_chars] - self.strip_cntrl = re.compile('[%s]' % re.escape(''.join(strip_control_chars))) + self.strip_cntrl = re.compile("[%s]" % re.escape("".join(strip_control_chars))) self.panic_detector = PanicDetector(panic) self.install_failure_detector = InstallFailureDetector() self.where = 0 - self.incomplete_line = '' + self.incomplete_line = "" def process_log(self, block): # Sanitize control characters @@ -210,18 +206,18 @@ def process_log(self, block): if self.strip_ansi: block = self.strip_ansi.sub(replace_with_blanks, block) if self.strip_cntrl: - block = self.strip_cntrl.sub(' ', block) + block = self.strip_cntrl.sub(" ", block) # Check for panics # Only feed the panic detector complete lines. If we have read a part # of a line, store it in self.incomplete_line and it will be prepended # to the subsequent block. - lines = (self.incomplete_line + block).split('\n') + lines = (self.incomplete_line + block).split("\n") self.incomplete_line = lines.pop() # Guard against a pathological case of the console filling up with # bytes but no newlines. Avoid buffering them into memory forever. if len(self.incomplete_line) > self.blocksize * 2: lines.append(self.incomplete_line) - self.incomplete_line = '' + self.incomplete_line = "" if self.panic_detector: for line in lines: panic_found = self.panic_detector.feed(line) @@ -233,19 +229,22 @@ def process_log(self, block): # Store block try: log_file = self.proxy.log_storage.recipe( - str(self.watchdog['recipe_id']), - self.logfile_name, create=(self.where == 0)) + str(self.watchdog["recipe_id"]), + self.logfile_name, + create=(self.where == 0), + ) with log_file: log_file.update_chunk(block, self.where) - except (OSError, IOError), e: + except (OSError, IOError) as e: if e.errno == errno.ENOENT: - pass # someone has removed our log, discard the update + pass # someone has removed our log, discard the update else: raise class ConsoleWatchLogFiles(object): - """ Monitor a directory for log files and upload them """ + """Monitor a directory for log files and upload them""" + def __init__(self, logdir, system_name, watchdog, proxy, panic): self.logdir = os.path.abspath(logdir) self.system_name = system_name @@ -254,24 +253,40 @@ def __init__(self, logdir, system_name, watchdog, proxy, panic): self.panic = panic self.logfiles = {} - for filename, logfile_name in utils.get_console_files( - console_logs_directory=self.logdir, system_name=self.system_name): - logger.info('Watching console log file %s for recipe %s', - filename, self.watchdog['recipe_id']) + for filename, logfile_name in get_console_files( + console_logs_directory=self.logdir, system_name=self.system_name + ): + logger.info( + "Watching console log file %s for recipe %s", + filename, + self.watchdog["recipe_id"], + ) self.logfiles[filename] = ConsoleWatchFile( - log=filename, watchdog=self.watchdog, proxy=self.proxy, - panic=self.panic, logfile_name=logfile_name) + log=filename, + watchdog=self.watchdog, + proxy=self.proxy, + panic=self.panic, + logfile_name=logfile_name, + ) def update(self): # Check for any new log files - for filename, logfile_name in utils.get_console_files( - console_logs_directory=self.logdir, system_name=self.system_name): + for filename, logfile_name in get_console_files( + console_logs_directory=self.logdir, system_name=self.system_name + ): if filename not in self.logfiles: - logger.info('Watching console log file %s for recipe %s', - filename, self.watchdog['recipe_id']) + logger.info( + "Watching console log file %s for recipe %s", + filename, + self.watchdog["recipe_id"], + ) self.logfiles[filename] = ConsoleWatchFile( - log=filename, watchdog=self.watchdog, proxy=self.proxy, - panic=self.panic, logfile_name=logfile_name) + log=filename, + watchdog=self.watchdog, + proxy=self.proxy, + panic=self.panic, + logfile_name=logfile_name, + ) # Update all of our log files. If any had updated data return True updated = False @@ -281,11 +296,11 @@ def update(self): class ConsoleWatchFile(ConsoleLogHelper): - def __init__(self, log, watchdog, proxy, panic, logfile_name=None): self.log = log super(ConsoleWatchFile, self).__init__( - watchdog, proxy, panic, logfile_name=logfile_name) + watchdog, proxy, panic, logfile_name=logfile_name + ) def update(self): """ @@ -293,9 +308,9 @@ def update(self): """ try: file = open(self.log, "r") - except (OSError, IOError), e: + except (OSError, IOError) as e: if e.errno == errno.ENOENT: - return False # doesn't exist + return False # doesn't exist else: raise try: @@ -305,15 +320,15 @@ def update(self): finally: file.close() if not block: - return False # nothing new has been read + return False # nothing new has been read self.process_log(block) self.where = now return True def truncate(self): try: - f = open(self.log, 'r+') - except IOError, e: + f = open(self.log, "r+") + except IOError as e: if e.errno != errno.ENOENT: raise else: @@ -325,15 +340,16 @@ class ConsoleWatchVirt(ConsoleLogHelper): """ Watch console logs from virtual machines """ + def update(self): - output = self.proxy.get_console_log(self.watchdog['recipe_id']) + output = self.proxy.get_console_log(self.watchdog["recipe_id"]) # OpenStack returns the console output as unicode, although it just # replaces all non-ASCII bytes with U+FFFD REPLACEMENT CHARACTER. # But Beaker normally deals in raw bytes for the consoles. # We can't get back the original bytes that OpenStack discarded so # let's just convert to UTF-8 so that the U+FFFD characters are written # properly at least. - output = output.encode('utf8') + output = output.encode("utf8") if len(output) >= 102400: # If the console log is more than 100KB OpenStack only returns the *last* 100KB. # https://bugs.launchpad.net/nova/+bug/1081436 @@ -343,7 +359,7 @@ def update(self): now = len(block) self.where = 0 else: - block = output[self.where:] + block = output[self.where :] now = self.where + len(block) if not block: return False @@ -353,7 +369,6 @@ def update(self): class PanicDetector(object): - def __init__(self, pattern): self.pattern = re.compile(pattern) self.fired = False @@ -368,8 +383,8 @@ def feed(self, line): self.fired = True return match.group() -class InstallFailureDetector(object): +class InstallFailureDetector(object): def __init__(self): self.patterns = [] for raw_pattern in self._load_patterns(): @@ -377,22 +392,23 @@ def __init__(self): # If the pattern is empty, it is either a mistake or the admin is # trying to override a package pattern to disable it. Either way, # exclude it from the list. - if pattern.search(''): + if pattern.search(""): continue self.patterns.append(pattern) self.fired = False def _load_patterns(self): - site_dir = '/etc/beaker/install-failure-patterns' + site_dir = "/etc/beaker/install-failure-patterns" try: site_patterns = os.listdir(site_dir) - except OSError, e: + except OSError as e: if e.errno == errno.ENOENT: site_patterns = [] else: raise - package_patterns = pkg_resources.resource_listdir('bkr.labcontroller', - 'install-failure-patterns') + package_patterns = pkg_resources.resource_listdir( + "bkr.labcontroller", "install-failure-patterns" + ) # site patterns override package patterns of the same name for p in site_patterns: if p in package_patterns: @@ -400,15 +416,18 @@ def _load_patterns(self): patterns = [] for p in site_patterns: try: - patterns.append(open(os.path.join(site_dir, p), 'r').read().strip()) - except OSError, e: + patterns.append(open(os.path.join(site_dir, p), "r").read().strip()) + except OSError as e: if e.errno == errno.ENOENT: - pass # readdir race + pass # readdir race else: raise for p in package_patterns: - patterns.append(pkg_resources.resource_string('bkr.labcontroller', - 'install-failure-patterns/' + p)) + patterns.append( + pkg_resources.resource_string( + "bkr.labcontroller", "install-failure-patterns/" + p + ) + ) return patterns def feed(self, line): @@ -422,16 +441,15 @@ def feed(self, line): class LogArchiver(ProxyHelper): - def transfer_logs(self): transfered = False server = self.conf.get_url_domain() - logger.debug('Polling for recipes to be transferred') + logger.debug("Polling for recipes to be transferred") try: recipe_ids = self.hub.recipes.by_log_server(server) - except xmlrpclib.Fault as fault: - if 'Anonymous access denied' in fault.faultString: - logger.debug('Session expired, re-authenticating') + except xmlrpc_client.Fault as fault: + if "Anonymous access denied" in fault.faultString: + logger.debug("Session expired, re-authenticating") self.hub._login() recipe_ids = self.hub.recipes.by_log_server(server) else: @@ -442,44 +460,61 @@ def transfer_logs(self): return transfered def transfer_recipe_logs(self, recipe_id): - """ If Cache is turned on then move the recipes logs to their final place - """ + """If Cache is turned on then move the recipes logs to their final place""" tmpdir = tempfile.mkdtemp(dir=self.conf.get("CACHEPATH")) try: # Move logs to tmp directory layout - logger.debug('Fetching files list for recipe %s', recipe_id) + logger.debug("Fetching files list for recipe %s", recipe_id) mylogs = self.hub.recipes.files(recipe_id) trlogs = [] - logger.debug('Building temporary log tree for transfer under %s', tmpdir) + logger.debug("Building temporary log tree for transfer under %s", tmpdir) for mylog in mylogs: - mysrc = '%s/%s/%s' % (mylog['basepath'], mylog['path'], mylog['filename']) - mydst = '%s/%s/%s/%s' % (tmpdir, mylog['filepath'], - mylog['path'], mylog['filename']) + mysrc = "%s/%s/%s" % ( + mylog["basepath"], + mylog["path"], + mylog["filename"], + ) + mydst = "%s/%s/%s/%s" % ( + tmpdir, + mylog["filepath"], + mylog["path"], + mylog["filename"], + ) if os.path.exists(mysrc): if not os.path.exists(os.path.dirname(mydst)): os.makedirs(os.path.dirname(mydst)) try: - os.link(mysrc,mydst) + os.link(mysrc, mydst) trlogs.append(mylog) - except OSError, e: - logger.exception('Error hard-linking %s to %s', mysrc, mydst) + except OSError: + logger.exception("Error hard-linking %s to %s", mysrc, mydst) return else: - logger.warn('Recipe %s file %s missing on disk, ignoring', - recipe_id, mysrc) + logger.warning( + "Recipe %s file %s missing on disk, ignoring", recipe_id, mysrc + ) # rsync the logs to their new home - rsync_succeeded = self.rsync('%s/' % tmpdir, '%s' % self.conf.get("ARCHIVE_RSYNC")) + rsync_succeeded = self.rsync( + "%s/" % tmpdir, "%s" % self.conf.get("ARCHIVE_RSYNC") + ) if not rsync_succeeded: return # if the logs have been transferred then tell the server the new location - logger.debug('Updating recipe %s file locations on the server', recipe_id) - self.hub.recipes.change_files(recipe_id, self.conf.get("ARCHIVE_SERVER"), - self.conf.get("ARCHIVE_BASEPATH")) + logger.debug("Updating recipe %s file locations on the server", recipe_id) + self.hub.recipes.change_files( + recipe_id, + self.conf.get("ARCHIVE_SERVER"), + self.conf.get("ARCHIVE_BASEPATH"), + ) for mylog in trlogs: - mysrc = '%s/%s/%s' % (mylog['basepath'], mylog['path'], mylog['filename']) + mysrc = "%s/%s/%s" % ( + mylog["basepath"], + mylog["path"], + mylog["filename"], + ) self.rm(mysrc) try: - self.removedirs('%s/%s' % (mylog['basepath'], mylog['path'])) + self.removedirs("%s/%s" % (mylog["basepath"], mylog["path"])) except OSError: # It's ok if it fails, dir may not be empty yet pass @@ -488,29 +523,31 @@ def transfer_recipe_logs(self, recipe_id): shutil.rmtree(tmpdir) def rm(self, src): - """ remove src - """ + """remove src""" if os.path.exists(src): return os.unlink(src) return True def removedirs(self, path): - """ remove empty dirs - """ + """remove empty dirs""" if os.path.exists(path): return os.removedirs(path) return True def rsync(self, src, dst): - """ Run system rsync command to move files - """ - args = ['rsync'] + shlex.split(self.conf.get('RSYNC_FLAGS', '')) + [src, dst] - logger.debug('Invoking rsync as %r', args) + """Run system rsync command to move files""" + args = ["rsync"] + shlex.split(self.conf.get("RSYNC_FLAGS", "")) + [src, dst] + logger.debug("Invoking rsync as %r", args) p = subprocess.Popen(args, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: - logger.error('Failed to rsync recipe logs from %s to %s\nExit status: %s\n%s', - src, dst, p.returncode, err) + logger.error( + "Failed to rsync recipe logs from %s to %s\nExit status: %s\n%s", + src, + dst, + p.returncode, + err, + ) return False return True @@ -518,121 +555,134 @@ def sleep(self): # Sleep between polling time.sleep(self.conf.get("SLEEP_TIME", 20)) + class Monitor(ProxyHelper): - """ Upload console log if present to Scheduler - and look for panic/bug/etc.. + """Upload console log if present to Scheduler + and look for panic/bug/etc.. """ def __init__(self, watchdog, obj, *args, **kwargs): - """ Monitor system - """ + """Monitor system""" self.watchdog = watchdog self.conf = obj.conf self.hub = obj.hub self.log_storage = obj.log_storage - if(self.watchdog['is_virt_recipe']): - logger.info('Watching OpenStack console for recipe %s', self.watchdog['recipe_id']) + if self.watchdog["is_virt_recipe"]: + logger.info( + "Watching OpenStack console for recipe %s", self.watchdog["recipe_id"] + ) self.console_watch = ConsoleWatchVirt( - self.watchdog, self, self.conf["PANIC_REGEX"]) + self.watchdog, self, self.conf["PANIC_REGEX"] + ) else: self.console_watch = ConsoleWatchLogFiles( - logdir=self.conf['CONSOLE_LOGS'], - system_name=self.watchdog['system'], watchdog=self.watchdog, - proxy=self, panic=self.conf["PANIC_REGEX"]) + logdir=self.conf["CONSOLE_LOGS"], + system_name=self.watchdog["system"], + watchdog=self.watchdog, + proxy=self, + panic=self.conf["PANIC_REGEX"], + ) def run(self): - """ check the logs for new data to upload/or cp - """ + """check the logs for new data to upload/or cp""" return self.console_watch.update() def report_panic(self, watchdog, panic_message): - logger.info('Panic detected for recipe %s on system %s: ' - 'console log contains string %r', watchdog['recipe_id'], - watchdog['system'], panic_message) - job = lxml.etree.fromstring(self.get_my_recipe( - dict(recipe_id=watchdog['recipe_id']))) - recipe = job.find('recipeSet/guestrecipe') + logger.info( + "Panic detected for recipe %s on system %s: " + "console log contains string %r", + watchdog["recipe_id"], + watchdog["system"], + panic_message, + ) + job = lxml.etree.fromstring( + self.get_my_recipe(dict(recipe_id=watchdog["recipe_id"])) + ) + recipe = job.find("recipeSet/guestrecipe") if recipe is None: - recipe = job.find('recipeSet/recipe') - if recipe.find('watchdog').get('panic') == 'ignore': + recipe = job.find("recipeSet/recipe") + if recipe.find("watchdog").get("panic") == "ignore": # Don't Report the panic - logger.info('Not reporting panic due to panic=ignore') - elif recipe.get('status') == 'Reserved': - logger.info('Not reporting panic as recipe is reserved') + logger.info("Not reporting panic due to panic=ignore") + elif recipe.get("status") == "Reserved": + logger.info("Not reporting panic as recipe is reserved") else: # Report the panic # Look for active task, worst case it records it on the last task - for task in recipe.iterfind('task'): - if task.get('status') == 'Running': + for task in recipe.iterfind("task"): + if task.get("status") == "Running": break - self.task_result(task.get('id'), 'panic', '/', 0, panic_message) + self.task_result(task.get("id"), "panic", "/", 0, panic_message) # set the watchdog timeout to 10 minutes, gives some time for all data to # print out on the serial console # this may abort the recipe depending on what the recipeSets # watchdog behaviour is set to. - self.extend_watchdog(task.get('id'), 60 * 10) + self.extend_watchdog(task.get("id"), 60 * 10) def report_install_failure(self, watchdog, failure_message): - logger.info('Install failure detected for recipe %s on system %s: ' - 'console log contains string %r', watchdog['recipe_id'], - watchdog['system'], failure_message) - job = lxml.etree.fromstring(self.get_my_recipe( - dict(recipe_id=watchdog['recipe_id']))) - recipe = job.find('recipeSet/guestrecipe') + logger.info( + "Install failure detected for recipe %s on system %s: " + "console log contains string %r", + watchdog["recipe_id"], + watchdog["system"], + failure_message, + ) + job = lxml.etree.fromstring( + self.get_my_recipe(dict(recipe_id=watchdog["recipe_id"])) + ) + recipe = job.find("recipeSet/guestrecipe") if recipe is None: - recipe = job.find('recipeSet/recipe') + recipe = job.find("recipeSet/recipe") # For now we are re-using the same panic="" attribute which is used to # control panic detection, bug 1055320 is an RFE to change this - if recipe.find('watchdog').get('panic') == 'ignore': - logger.info('Not reporting install failure due to panic=ignore') - elif recipe.find('installation') is not None and recipe.find('installation').get('install_finished'): - logger.info('Not reporting install failure for finished installation') + if recipe.find("watchdog").get("panic") == "ignore": + logger.info("Not reporting install failure due to panic=ignore") + elif recipe.find("installation") is not None and recipe.find( + "installation" + ).get("install_finished"): + logger.info("Not reporting install failure for finished installation") else: # Ideally we would record it against the Installation entity for # the recipe, but that's not a thing yet, so we just add a result # to the first task (which is typically /distribution/install) - first_task = recipe.findall('task')[0] - self.task_result(first_task.get('id'), 'fail', '/', 0, failure_message) - self.recipe_stop(recipe.get('id'), 'abort', 'Installation failed') + first_task = recipe.findall("task")[0] + self.task_result(first_task.get("id"), "fail", "/", 0, failure_message) + self.recipe_stop(recipe.get("id"), "abort", "Installation failed") + class Proxy(ProxyHelper): - def task_upload_file(self, - task_id, - path, - name, - size, - md5sum, - offset, - data): - """ Upload a file in chunks - path: the relative path to upload to - name: the name of the file - size: size of the contents (bytes) - md5: md5sum (hex digest) of contents - data: base64 encoded file contents - offset: the offset of the chunk - Files can be uploaded in chunks, if so the md5 and the size - describe the chunk rather than the whole file. The offset - indicates where the chunk belongs + def task_upload_file(self, task_id, path, name, size, md5sum, offset, data): + """Upload a file in chunks + path: the relative path to upload to + name: the name of the file + size: size of the contents (bytes) + md5: md5sum (hex digest) of contents + data: base64 encoded file contents + offset: the offset of the chunk + Files can be uploaded in chunks, if so the md5 and the size + describe the chunk rather than the whole file. The offset + indicates where the chunk belongs """ # Originally offset=-1 had special meaning, but that was unused - logger.debug("task_upload_file task_id:%s name:%s offset:%s size:%s", - task_id, name, offset, size) + logger.debug( + "task_upload_file task_id:%s name:%s offset:%s size:%s", + task_id, + name, + offset, + size, + ) with self.log_storage.task(str(task_id), os.path.join(path, name)) as log_file: log_file.update_chunk(base64.decodestring(data), int(offset or 0)) return True - def task_start(self, - task_id, - kill_time=None): - """ tell the scheduler that we are starting a task - default watchdog time can be overridden with kill_time seconds """ + def task_start(self, task_id, kill_time=None): + """tell the scheduler that we are starting a task + default watchdog time can be overridden with kill_time seconds""" logger.debug("task_start %s", task_id) return self.hub.recipes.tasks.start(task_id, kill_time) - def install_start(self, recipe_id=None): - """ Called from %pre of the test machine. We call + """Called from %pre of the test machine. We call the server's install_start() """ _debug_id = "(unspecified recipe)" if recipe_id is None else recipe_id @@ -640,29 +690,36 @@ def install_start(self, recipe_id=None): return self.hub.recipes.install_start(recipe_id) def clear_netboot(self, fqdn): - ''' Called from %post section to remove netboot entry ''' - logger.debug('clear_netboot %s', fqdn) - p = subprocess.Popen(["sudo", "/usr/bin/beaker-clear-netboot", fqdn], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + """Called from %post section to remove netboot entry""" + logger.debug("clear_netboot %s", fqdn) + p = subprocess.Popen( + ["sudo", "/usr/bin/beaker-clear-netboot", fqdn], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) output, _ = p.communicate() if p.returncode: - raise RuntimeError('sudo beaker-clear-netboot failed: %s' % output.strip()) - logger.debug('clear_netboot %s completed', fqdn) + raise RuntimeError("sudo beaker-clear-netboot failed: %s" % output.strip()) + logger.debug("clear_netboot %s completed", fqdn) return self.hub.labcontrollers.add_completed_command(fqdn, "clear_netboot") def postreboot(self, recipe_id): # XXX would be nice if we could limit this so that systems could only # reboot themselves, instead of accepting any arbitrary recipe id - logger.debug('postreboot %s', recipe_id) + logger.debug("postreboot %s", recipe_id) return self.hub.recipes.postreboot(recipe_id) def power(self, hostname, action): # XXX this should also be authenticated and # restricted to systems in the same recipeset as the caller - logger.debug('power %s %s', hostname, action) - return self.hub.systems.power(action, hostname, False, - # force=True because we are not the system's user - True) + logger.debug("power %s %s", hostname, action) + return self.hub.systems.power( + action, + hostname, + False, + # force=True because we are not the system's user + True, + ) def install_done(self, recipe_id=None, fqdn=None): logger.debug("install_done recipe_id=%s fqdn=%s", recipe_id, fqdn) @@ -678,83 +735,77 @@ def postinstall_done(self, recipe_id=None): return self.hub.recipes.postinstall_done(recipe_id) def status_watchdog(self, task_id): - """ Ask the scheduler how many seconds are left on a watchdog for this task - """ + """Ask the scheduler how many seconds are left on a watchdog for this task""" logger.debug("status_watchdog %s", task_id) return self.hub.recipes.tasks.watchdog(task_id) - def task_stop(self, - task_id, - stop_type, - msg=None): - """ tell the scheduler that we are stoping a task - stop_type = ['stop', 'abort', 'cancel'] - msg to record if issuing Abort or Cancel """ + def task_stop(self, task_id, stop_type, msg=None): + """tell the scheduler that we are stoping a task + stop_type = ['stop', 'abort', 'cancel'] + msg to record if issuing Abort or Cancel""" logger.debug("task_stop %s", task_id) return self.hub.recipes.tasks.stop(task_id, stop_type, msg) - def result_upload_file(self, - result_id, - path, - name, - size, - md5sum, - offset, - data): - """ Upload a file in chunks - path: the relative path to upload to - name: the name of the file - size: size of the contents (bytes) - md5: md5sum (hex digest) of contents - data: base64 encoded file contents - offset: the offset of the chunk - Files can be uploaded in chunks, if so the md5 and the size - describe the chunk rather than the whole file. The offset - indicates where the chunk belongs + def result_upload_file(self, result_id, path, name, size, md5sum, offset, data): + """Upload a file in chunks + path: the relative path to upload to + name: the name of the file + size: size of the contents (bytes) + md5: md5sum (hex digest) of contents + data: base64 encoded file contents + offset: the offset of the chunk + Files can be uploaded in chunks, if so the md5 and the size + describe the chunk rather than the whole file. The offset + indicates where the chunk belongs """ # Originally offset=-1 had special meaning, but that was unused - logger.debug("result_upload_file result_id:%s name:%s offset:%s size:%s", - result_id, name, offset, size) - with self.log_storage.result(str(result_id), os.path.join(path, name)) as log_file: + logger.debug( + "result_upload_file result_id:%s name:%s offset:%s size:%s", + result_id, + name, + offset, + size, + ) + with self.log_storage.result( + str(result_id), os.path.join(path, name) + ) as log_file: log_file.update_chunk(base64.decodestring(data), int(offset or 0)) return True def push(self, fqdn, inventory): - """ Push inventory data to Scheduler - """ + """Push inventory data to Scheduler""" return self.hub.push(fqdn, inventory) def legacypush(self, fqdn, inventory): - """ Push legacy inventory data to Scheduler - """ + """Push legacy inventory data to Scheduler""" return self.hub.legacypush(fqdn, inventory) def updateDistro(self, distro, arch): - """ This proxy method allows the installed machine - to report that the distro was successfully installed - The Scheduler will add an INSTALLS tag to this - distro/arch, and if all distro/arch combo's - contain an INSTALLS tag then it will also add - a STABLE tag signifying that it successfully installed - on all applicable arches. + """This proxy method allows the installed machine + to report that the distro was successfully installed + The Scheduler will add an INSTALLS tag to this + distro/arch, and if all distro/arch combo's + contain an INSTALLS tag then it will also add + a STABLE tag signifying that it successfully installed + on all applicable arches. """ return self.hub.tags.updateDistro(distro, arch) def add_distro_tree(self, distro): - """ This proxy method allows the lab controller to add new - distros to the Scheduler/Inventory server. + """This proxy method allows the lab controller to add new + distros to the Scheduler/Inventory server. """ return self.hub.labcontrollers.add_distro_tree(distro) def remove_distro_trees(self, distro_tree_ids): - """ This proxy method allows the lab controller to remove - distro_tree_ids from the Scheduler/Inventory server. + """This proxy method allows the lab controller to remove + distro_tree_ids from the Scheduler/Inventory server. """ return self.hub.labcontrollers.remove_distro_trees(distro_tree_ids) def get_distro_trees(self, filter=None): - """ This proxy method allows the lab controller to query - for all distro_trees that are associated to it. + """This proxy method allows the lab controller to query + for all distro_trees that are associated to it. """ return self.hub.labcontrollers.get_distro_trees(filter) @@ -765,83 +816,95 @@ def get_installation_for_system(self, fqdn): """ return self.hub.labcontrollers.get_installation_for_system(fqdn) -class ProxyHTTP(object): +class ProxyHTTP(object): def __init__(self, proxy): self.hub = proxy.hub self.log_storage = proxy.log_storage def get_recipe(self, req, recipe_id): - if req.accept_mimetypes.provided and \ - 'application/xml' not in req.accept_mimetypes: + if ( + req.accept_mimetypes.provided + and "application/xml" not in req.accept_mimetypes + ): raise NotAcceptable() - return Response(self.hub.recipes.to_xml(recipe_id), - content_type='application/xml') - - _result_types = { # maps from public API names to internal Beaker names - 'pass': 'pass_', - 'warn': 'warn', - 'fail': 'fail', - 'none': 'result_none', - 'skip': 'skip', + return Response( + self.hub.recipes.to_xml(recipe_id), content_type="application/xml" + ) + + _result_types = { # maps from public API names to internal Beaker names + "pass": "pass_", + "warn": "warn", + "fail": "fail", + "none": "result_none", + "skip": "skip", } + def post_result(self, req, recipe_id, task_id): - if 'result' not in req.form: + if "result" not in req.form: raise BadRequest('Missing "result" parameter') - result = req.form['result'].lower() + result = req.form["result"].lower() if result not in self._result_types: - raise BadRequest('Unknown result type %r' % req.form['result']) + raise BadRequest("Unknown result type %r" % req.form["result"]) try: - result_id = self.hub.recipes.tasks.result(task_id, - self._result_types[result], - req.form.get('path'), req.form.get('score'), - req.form.get('message')) - except xmlrpclib.Fault, fault: + result_id = self.hub.recipes.tasks.result( + task_id, + self._result_types[result], + req.form.get("path"), + req.form.get("score"), + req.form.get("message"), + ) + except xmlrpc_client.Fault as fault: # XXX need to find a less fragile way to do this - if 'Cannot record result for finished task' in fault.faultString: - return Response(status=409, response=fault.faultString, - content_type='text/plain') - elif 'Too many results in recipe' in fault.faultString: - return Response(status=403, response=fault.faultString, - content_type='text/plain') + if "Cannot record result for finished task" in fault.faultString: + return Response( + status=409, response=fault.faultString, content_type="text/plain" + ) + elif "Too many results in recipe" in fault.faultString: + return Response( + status=403, response=fault.faultString, content_type="text/plain" + ) else: raise - return redirect('/recipes/%s/tasks/%s/results/%s' % ( - recipe_id, task_id, result_id), code=201) + return redirect( + "/recipes/%s/tasks/%s/results/%s" % (recipe_id, task_id, result_id), + code=201, + ) def post_recipe_status(self, req, recipe_id): - if 'status' not in req.form: + if "status" not in req.form: raise BadRequest('Missing "status" parameter') - status = req.form['status'].lower() - if status != 'aborted': - raise BadRequest('Unknown status %r' % req.form['status']) - self.hub.recipes.stop(recipe_id, 'abort', - req.form.get('message')) + status = req.form["status"].lower() + if status != "aborted": + raise BadRequest("Unknown status %r" % req.form["status"]) + self.hub.recipes.stop(recipe_id, "abort", req.form.get("message")) return Response(status=204) def post_task_status(self, req, recipe_id, task_id): - if 'status' not in req.form: + if "status" not in req.form: raise BadRequest('Missing "status" parameter') - self._update_status(task_id, req.form['status'], req.form.get('message')) + self._update_status(task_id, req.form["status"], req.form.get("message")) return Response(status=204) def _update_status(self, task_id, status, message): status = status.lower() - if status not in ['running', 'completed', 'aborted']: - raise BadRequest('Unknown status %r' % status) + if status not in ["running", "completed", "aborted"]: + raise BadRequest("Unknown status %r" % status) try: - if status == 'running': + if status == "running": self.hub.recipes.tasks.start(task_id) - elif status == 'completed': - self.hub.recipes.tasks.stop(task_id, 'stop') - elif status == 'aborted': - self.hub.recipes.tasks.stop(task_id, 'abort', message) - except xmlrpclib.Fault as fault: + elif status == "completed": + self.hub.recipes.tasks.stop(task_id, "stop") + elif status == "aborted": + self.hub.recipes.tasks.stop(task_id, "abort", message) + except xmlrpc_client.Fault as fault: # XXX This has to be completely replaced with JSON response in next major release # We don't want to blindly return 500 because of opposite side # will try to retry request - which is almost in all situation wrong - if ('Cannot restart finished task' in fault.faultString - or 'Cannot change status for finished task' in fault.faultString): + if ( + "Cannot restart finished task" in fault.faultString + or "Cannot change status for finished task" in fault.faultString + ): raise Conflict(fault.faultString) else: raise @@ -853,30 +916,33 @@ def patch_task(self, request, recipe_id, task_id): data = request.form.to_dict() else: raise UnsupportedMediaType - if 'status' in data: - status = data.pop('status') - self._update_status(task_id, status, data.pop('message', None)) + if "status" in data: + status = data.pop("status") + self._update_status(task_id, status, data.pop("message", None)) # If the caller only wanted to update the status and nothing else, # we will avoid making a second XML-RPC call. - updated = {'status': status} + updated = {"status": status} if data: updated = self.hub.recipes.tasks.update(task_id, data) - return Response(status=200, response=json.dumps(updated), - content_type='application/json') + return Response( + status=200, response=json.dumps(updated), content_type="application/json" + ) def get_watchdog(self, req, recipe_id): seconds = self.hub.recipes.watchdog(recipe_id) - return Response(status=200, - response=json.dumps({'seconds': seconds}), - content_type='application/json') + return Response( + status=200, + response=json.dumps({"seconds": seconds}), + content_type="application/json", + ) def post_watchdog(self, req, recipe_id): - if 'seconds' not in req.form: + if "seconds" not in req.form: raise BadRequest('Missing "seconds" parameter') try: - seconds = int(req.form['seconds']) + seconds = int(req.form["seconds"]) except ValueError: - raise BadRequest('Invalid "seconds" parameter %r' % req.form['seconds']) + raise BadRequest('Invalid "seconds" parameter %r' % req.form["seconds"]) self.hub.recipes.extend(recipe_id, seconds) return Response(status=204) @@ -886,17 +952,17 @@ def post_watchdog(self, req, recipe_id): def _put_log(self, log_file, req): if req.content_length is None: raise LengthRequired() - content_range = parse_content_range_header(req.headers.get('Content-Range')) + content_range = parse_content_range_header(req.headers.get("Content-Range")) if content_range: # a few sanity checks if req.content_length != (content_range.stop - content_range.start): - raise BadRequest('Content length does not match range length') + raise BadRequest("Content length does not match range length") if content_range.length and content_range.length < content_range.stop: - raise BadRequest('Total length is smaller than range end') + raise BadRequest("Total length is smaller than range end") try: with log_file: if content_range: - if content_range.length: # length may be '*' meaning unspecified + if content_range.length: # length may be '*' meaning unspecified log_file.truncate(content_range.length) log_file.update_chunk(req.data, content_range.start) else: @@ -904,13 +970,15 @@ def _put_log(self, log_file, req): log_file.truncate(req.content_length) log_file.update_chunk(req.data, 0) # XXX need to find a less fragile way to do this - except xmlrpclib.Fault, fault: - if 'Cannot register file for finished ' in fault.faultString: - return Response(status=409, response=fault.faultString, - content_type='text/plain') - elif 'Too many ' in fault.faultString: - return Response(status=403, response=fault.faultString, - content_type='text/plain') + except xmlrpc_client.Fault as fault: + if "Cannot register file for finished " in fault.faultString: + return Response( + status=409, response=fault.faultString, content_type="text/plain" + ) + elif "Too many " in fault.faultString: + return Response( + status=403, response=fault.faultString, content_type="text/plain" + ) else: raise return Response(status=204) @@ -918,97 +986,112 @@ def _put_log(self, log_file, req): def _get_log(self, log_file, req): try: f = log_file.open_ro() - except IOError, e: + except IOError as e: if e.errno == errno.ENOENT: raise NotFound() else: raise - return Response(status=200, response=wrap_file(req.environ, f), - content_type='text/plain', direct_passthrough=True) + return Response( + status=200, + response=wrap_file(req.environ, f), + content_type="text/plain", + direct_passthrough=True, + ) def do_recipe_log(self, req, recipe_id, path): log_file = self.log_storage.recipe(recipe_id, path) - if req.method == 'GET': + if req.method == "GET": return self._get_log(log_file, req) - elif req.method == 'PUT': + elif req.method == "PUT": return self._put_log(log_file, req) def do_task_log(self, req, recipe_id, task_id, path): log_file = self.log_storage.task(task_id, path) - if req.method == 'GET': + if req.method == "GET": return self._get_log(log_file, req) - elif req.method == 'PUT': + elif req.method == "PUT": return self._put_log(log_file, req) def do_result_log(self, req, recipe_id, task_id, result_id, path): log_file = self.log_storage.result(result_id, path) - if req.method == 'GET': + if req.method == "GET": return self._get_log(log_file, req) - elif req.method == 'PUT': + elif req.method == "PUT": return self._put_log(log_file, req) # XXX use real templates here, make the Atom feed valid def _html_log_index(self, logs): - hrefs = [os.path.join((log['path'] or '').lstrip('/'), log['filename']) - for log in logs] - lis = ['
  • %s
  • ' % (xml_quoteattr(href), xml_escape(href)) - for href in hrefs] - html = '
      %s
    ' % ''.join(lis) - return Response(status=200, content_type='text/html', response=html) + hrefs = [ + os.path.join((log["path"] or "").lstrip("/"), log["filename"]) + for log in logs + ] + lis = [ + "
  • %s
  • " % (xml_quoteattr(href), xml_escape(href)) + for href in hrefs + ] + html = "
      %s
    " % "".join(lis) + return Response(status=200, content_type="text/html", response=html) def _atom_log_index(self, logs): - hrefs = [os.path.join((log['path'] or '').lstrip('/'), log['filename']) - for log in logs] - entries = ['%s' - % (xml_quoteattr(href), xml_escape(href)) for href in hrefs] - atom = '%s' % ''.join(entries) - return Response(status=200, content_type='application/atom+xml', response=atom) + hrefs = [ + os.path.join((log["path"] or "").lstrip("/"), log["filename"]) + for log in logs + ] + entries = [ + '%s' + % (xml_quoteattr(href), xml_escape(href)) + for href in hrefs + ] + atom = '%s' % "".join(entries) + return Response(status=200, content_type="application/atom+xml", response=atom) def _log_index(self, req, logs): if not req.accept_mimetypes.provided: - response_type = 'text/html' + response_type = "text/html" else: - response_type = req.accept_mimetypes.best_match(['text/html', 'application/atom+xml']) + response_type = req.accept_mimetypes.best_match( + ["text/html", "application/atom+xml"] + ) if not response_type: raise NotAcceptable() - if response_type == 'text/html': + if response_type == "text/html": return self._html_log_index(logs) - elif response_type == 'application/atom+xml': + elif response_type == "application/atom+xml": return self._atom_log_index(logs) def list_recipe_logs(self, req, recipe_id): try: - logs = self.hub.taskactions.files('R:%s' % recipe_id) - except xmlrpclib.Fault, fault: + logs = self.hub.taskactions.files("R:%s" % recipe_id) + except xmlrpc_client.Fault as fault: # XXX need to find a less fragile way to do this - if 'is not a valid Recipe id' in fault.faultString: + if "is not a valid Recipe id" in fault.faultString: raise NotFound() else: raise # The server includes all sub-elements' logs, filter them out - logs = [log for log in logs if log['tid'].startswith('R:')] + logs = [log for log in logs if log["tid"].startswith("R:")] return self._log_index(req, logs) def list_task_logs(self, req, recipe_id, task_id): try: - logs = self.hub.taskactions.files('T:%s' % task_id) - except xmlrpclib.Fault, fault: + logs = self.hub.taskactions.files("T:%s" % task_id) + except xmlrpc_client.Fault as fault: # XXX need to find a less fragile way to do this - if 'is not a valid RecipeTask id' in fault.faultString: + if "is not a valid RecipeTask id" in fault.faultString: raise NotFound() else: raise # The server includes all sub-elements' logs, filter them out - logs = [log for log in logs if log['tid'].startswith('T:')] + logs = [log for log in logs if log["tid"].startswith("T:")] return self._log_index(req, logs) def list_result_logs(self, req, recipe_id, task_id, result_id): try: - logs = self.hub.taskactions.files('TR:%s' % result_id) - except xmlrpclib.Fault, fault: + logs = self.hub.taskactions.files("TR:%s" % result_id) + except xmlrpc_client.Fault as fault: # XXX need to find a less fragile way to do this - if 'is not a valid RecipeTaskResult id' in fault.faultString: + if "is not a valid RecipeTaskResult id" in fault.faultString: raise NotFound() else: raise @@ -1029,11 +1112,11 @@ def put_power(self, req, fqdn): else: raise UnsupportedMediaType - if 'action' not in payload: + if "action" not in payload: raise BadRequest('Missing "action" parameter') - action = payload['action'] - if action not in ['on', 'off', 'reboot']: - raise BadRequest('Unknown action {}'.format(action)) + action = payload["action"] + if action not in ["on", "off", "reboot"]: + raise BadRequest("Unknown action {}".format(action)) self.hub.systems.power(action, fqdn, False, True) return Response(status=204) @@ -1045,6 +1128,6 @@ def healthz(self, req): :param req: request """ # HEAD is identical to GET except that it MUST NOT return a body in the response - response = "We are healthy!" if req.method == 'GET' else None + response = "We are healthy!" if req.method == "GET" else None return Response(status=200, response=response) diff --git a/LabController/src/bkr/labcontroller/pxemenu.py b/LabController/src/bkr/labcontroller/pxemenu.py index fa7e7ffeb..ac3f57588 100644 --- a/LabController/src/bkr/labcontroller/pxemenu.py +++ b/LabController/src/bkr/labcontroller/pxemenu.py @@ -9,51 +9,62 @@ import re import shutil import sys -import urllib2 -import urlparse -import xmlrpclib from optparse import OptionParser from jinja2 import Environment, PackageLoader - -from bkr.common.helpers import atomically_replaced_file, siphon, makedirs_ignore, atomic_symlink +from six.moves import urllib, xmlrpc_client + +from bkr.common.helpers import ( + atomic_symlink, + atomically_replaced_file, + makedirs_ignore, + siphon, +) from bkr.labcontroller.config import get_conf def _get_url(available): for lc, url in available: # We prefer http - if url.startswith('http:') or url.startswith('https:'): + if url.startswith("http:") or url.startswith("https:"): return url for lc, url in available: - if url.startswith('ftp:'): + if url.startswith("ftp:"): return url - raise ValueError('Unrecognised URL scheme found in distro tree URL(s) %s' % - [url for lc, url in available]) + raise ValueError( + "Unrecognised URL scheme found in distro tree URL(s) %s" + % [url for lc, url in available] + ) def _group_distro_trees(distro_trees): grouped = {} for dt in distro_trees: - grouped.setdefault(dt['distro_osmajor'], {}) \ - .setdefault(dt['distro_osversion'], []) \ - .append(dt) + grouped.setdefault(dt["distro_osmajor"], {}).setdefault( + dt["distro_osversion"], [] + ).append(dt) return grouped def _get_images(tftp_root, distro_tree_id, url, images): - dest_dir = os.path.join(tftp_root, 'distrotrees', str(distro_tree_id)) + dest_dir = os.path.join(tftp_root, "distrotrees", str(distro_tree_id)) makedirs_ignore(dest_dir, mode=0o755) for image_type, path in images: - if image_type in ('kernel', 'initrd'): + if image_type in ("kernel", "initrd"): dest_path = os.path.join(dest_dir, image_type) if os.path.isfile(dest_path): - print('Skipping existing %s for distro tree %s' % (image_type, distro_tree_id)) + print( + "Skipping existing %s for distro tree %s" + % (image_type, distro_tree_id) + ) else: - image_url = urlparse.urljoin(url, path) - print('Fetching %s %s for distro tree %s' % (image_type, image_url, distro_tree_id)) + image_url = urllib.parse.urljoin(url, path) + print( + "Fetching %s %s for distro tree %s" + % (image_type, image_url, distro_tree_id) + ) with atomically_replaced_file(dest_path) as dest: - siphon(urllib2.urlopen(image_url), dest) + siphon(urllib.request.urlopen(image_url), dest) def _get_all_images(tftp_root, distro_trees): @@ -63,28 +74,32 @@ def _get_all_images(tftp_root, distro_trees): """ trees = [] for distro_tree in distro_trees: - url = _get_url(distro_tree['available']) + url = _get_url(distro_tree["available"]) try: - _get_images(tftp_root, distro_tree['distro_tree_id'], - url, distro_tree['images']) + _get_images( + tftp_root, distro_tree["distro_tree_id"], url, distro_tree["images"] + ) trees.append(distro_tree) except IOError as e: - sys.stderr.write('Error fetching images for distro tree %s: %s\n' % - (distro_tree['distro_tree_id'], e)) + sys.stderr.write( + "Error fetching images for distro tree %s: %s\n" + % (distro_tree["distro_tree_id"], e) + ) return trees # configure Jinja2 to load menu templates -template_env = Environment(loader=PackageLoader('bkr.labcontroller', 'pxemenu-templates'), - trim_blocks=True) -template_env.filters['get_url'] = _get_url +template_env = Environment( + loader=PackageLoader("bkr.labcontroller", "pxemenu-templates"), trim_blocks=True +) +template_env.filters["get_url"] = _get_url def write_menu(menu, template_name, distro_trees): osmajors = _group_distro_trees(distro_trees) with menu as menu: template = template_env.get_template(template_name) - menu.write(template.render({'osmajors': osmajors})) + menu.write(template.render({"osmajors": osmajors})) def write_menus(tftp_root, tags, xml_filter): @@ -96,109 +111,158 @@ def write_menus(tftp_root, tags, xml_filter): # then fetch the list of trees, # and then remove any which aren't in the list. try: - existing_tree_ids = os.listdir(os.path.join(tftp_root, 'distrotrees')) + existing_tree_ids = os.listdir(os.path.join(tftp_root, "distrotrees")) except OSError as e: if e.errno != errno.ENOENT: raise existing_tree_ids = [] - proxy = xmlrpclib.ServerProxy('http://localhost:8000', allow_none=True) - distro_trees = proxy.get_distro_trees({ - 'arch': ['x86_64', 'i386', 'aarch64', 'ppc64', 'ppc64le'], - 'tags': tags, - 'xml': xml_filter, - }) - current_tree_ids = set(str(dt['distro_tree_id']) - for dt in distro_trees) + proxy = xmlrpc_client.ServerProxy("http://localhost:8000", allow_none=True) + distro_trees = proxy.get_distro_trees( + { + "arch": ["x86_64", "i386", "aarch64", "ppc64", "ppc64le"], + "tags": tags, + "xml": xml_filter, + } + ) + current_tree_ids = set(str(dt["distro_tree_id"]) for dt in distro_trees) obsolete_tree_ids = set(existing_tree_ids).difference(current_tree_ids) - print('Removing images for %s obsolete distro trees' % len(obsolete_tree_ids)) + print("Removing images for %s obsolete distro trees" % len(obsolete_tree_ids)) for obs in obsolete_tree_ids: - shutil.rmtree(os.path.join(tftp_root, 'distrotrees', obs), ignore_errors=True) + shutil.rmtree(os.path.join(tftp_root, "distrotrees", obs), ignore_errors=True) # Fetch images for all the distro trees first. - print('Fetching images for all the distro trees') + print("Fetching images for all the distro trees") distro_trees = _get_all_images(tftp_root, distro_trees) - x86_distrotrees = [distro for distro in distro_trees if distro['arch'] in ['x86_64', 'i386']] - print('Generating PXELINUX menus for %s distro trees' % len(x86_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'pxelinux.cfg'), mode=0o755) - pxe_menu = atomically_replaced_file(os.path.join(tftp_root, 'pxelinux.cfg', 'beaker_menu')) - write_menu(pxe_menu, u'pxelinux-menu', x86_distrotrees) - - ipxe_distrotrees = [distro for distro in distro_trees if distro['arch'] in ['x86_64', 'i386', 'aarch64']] - print('Generating iPXE menus for %s distro trees' % len(ipxe_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'ipxe'), mode=0o755) - pxe_menu = atomically_replaced_file(os.path.join(tftp_root, 'ipxe', 'beaker_menu')) - write_menu(pxe_menu, u'ipxe-menu', ipxe_distrotrees) - - x86_efi_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'x86_64'] + x86_distrotrees = [ + distro for distro in distro_trees if distro["arch"] in ["x86_64", "i386"] + ] + print("Generating PXELINUX menus for %s distro trees" % len(x86_distrotrees)) + makedirs_ignore(os.path.join(tftp_root, "pxelinux.cfg"), mode=0o755) + pxe_menu = atomically_replaced_file( + os.path.join(tftp_root, "pxelinux.cfg", "beaker_menu") + ) + write_menu(pxe_menu, "pxelinux-menu", x86_distrotrees) + + ipxe_distrotrees = [ + distro + for distro in distro_trees + if distro["arch"] in ["x86_64", "i386", "aarch64"] + ] + print("Generating iPXE menus for %s distro trees" % len(ipxe_distrotrees)) + makedirs_ignore(os.path.join(tftp_root, "ipxe"), mode=0o755) + pxe_menu = atomically_replaced_file(os.path.join(tftp_root, "ipxe", "beaker_menu")) + write_menu(pxe_menu, "ipxe-menu", ipxe_distrotrees) + + x86_efi_distrotrees = [ + distro for distro in distro_trees if distro["arch"] == "x86_64" + ] # Regardless of any filtering options selected by the admin, we always # filter out certain distros which are known not to have EFI support. This # is a space saving measure for the EFI GRUB menu, which can't be nested so # we try to keep it as small possible. - x86_efi_distrotrees = [distro for distro in x86_efi_distrotrees - if not re.match(conf['EFI_EXCLUDED_OSMAJORS_REGEX'], - distro['distro_osmajor'])] - - print('Generating EFI GRUB menus for %s distro trees' % len(x86_efi_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'grub'), mode=0o755) - atomic_symlink('../distrotrees', os.path.join(tftp_root, 'grub', 'distrotrees')) - efi_grub_menu = atomically_replaced_file(os.path.join(tftp_root, 'grub', 'efidefault')) - write_menu(efi_grub_menu, u'efi-grub-menu', x86_efi_distrotrees) - - print('Generating GRUB2 menus for x86 EFI for %s distro trees' % len(x86_efi_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755) - x86_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2', - 'beaker_menu_x86.cfg')) - write_menu(x86_grub2_menu, u'grub2-menu', x86_efi_distrotrees) - - ppc64_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'ppc64'] + x86_efi_distrotrees = [ + distro + for distro in x86_efi_distrotrees + if not re.match(conf["EFI_EXCLUDED_OSMAJORS_REGEX"], distro["distro_osmajor"]) + ] + + print("Generating EFI GRUB menus for %s distro trees" % len(x86_efi_distrotrees)) + makedirs_ignore(os.path.join(tftp_root, "grub"), mode=0o755) + atomic_symlink("../distrotrees", os.path.join(tftp_root, "grub", "distrotrees")) + efi_grub_menu = atomically_replaced_file( + os.path.join(tftp_root, "grub", "efidefault") + ) + write_menu(efi_grub_menu, "efi-grub-menu", x86_efi_distrotrees) + + print( + "Generating GRUB2 menus for x86 EFI for %s distro trees" + % len(x86_efi_distrotrees) + ) + makedirs_ignore(os.path.join(tftp_root, "boot", "grub2"), mode=0o755) + x86_grub2_menu = atomically_replaced_file( + os.path.join(tftp_root, "boot", "grub2", "beaker_menu_x86.cfg") + ) + write_menu(x86_grub2_menu, "grub2-menu", x86_efi_distrotrees) + + ppc64_distrotrees = [distro for distro in distro_trees if distro["arch"] == "ppc64"] if ppc64_distrotrees: - print('Generating GRUB2 menus for PPC64 EFI for %s distro trees' % len(ppc64_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755) - ppc64_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2', - 'beaker_menu_ppc64.cfg')) - write_menu(ppc64_grub2_menu, u'grub2-menu', ppc64_distrotrees) - - ppc64le_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'ppc64le'] + print( + "Generating GRUB2 menus for PPC64 EFI for %s distro trees" + % len(ppc64_distrotrees) + ) + makedirs_ignore(os.path.join(tftp_root, "boot", "grub2"), mode=0o755) + ppc64_grub2_menu = atomically_replaced_file( + os.path.join(tftp_root, "boot", "grub2", "beaker_menu_ppc64.cfg") + ) + write_menu(ppc64_grub2_menu, "grub2-menu", ppc64_distrotrees) + + ppc64le_distrotrees = [ + distro for distro in distro_trees if distro["arch"] == "ppc64le" + ] if ppc64le_distrotrees: - print('Generating GRUB2 menus for PPC64LE EFI for %s distro trees' % len(ppc64_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'boot', 'grub2'), mode=0o755) - ppc64le_grub2_menu = atomically_replaced_file(os.path.join(tftp_root, 'boot', 'grub2', - 'beaker_menu_ppc64le.cfg')) - write_menu(ppc64le_grub2_menu, u'grub2-menu', ppc64le_distrotrees) + print( + "Generating GRUB2 menus for PPC64LE EFI for %s distro trees" + % len(ppc64_distrotrees) + ) + makedirs_ignore(os.path.join(tftp_root, "boot", "grub2"), mode=0o755) + ppc64le_grub2_menu = atomically_replaced_file( + os.path.join(tftp_root, "boot", "grub2", "beaker_menu_ppc64le.cfg") + ) + write_menu(ppc64le_grub2_menu, "grub2-menu", ppc64le_distrotrees) # XXX: would be nice if we can find a good time to move this into boot/grub2 - aarch64_distrotrees = [distro for distro in distro_trees if distro['arch'] == 'aarch64'] + aarch64_distrotrees = [ + distro for distro in distro_trees if distro["arch"] == "aarch64" + ] if aarch64_distrotrees: - print('Generating GRUB2 menus for aarch64 for %s distro trees' % len(aarch64_distrotrees)) - makedirs_ignore(os.path.join(tftp_root, 'aarch64'), mode=0o755) + print( + "Generating GRUB2 menus for aarch64 for %s distro trees" + % len(aarch64_distrotrees) + ) + makedirs_ignore(os.path.join(tftp_root, "aarch64"), mode=0o755) aarch64_menu = atomically_replaced_file( - os.path.join(tftp_root, 'aarch64', 'beaker_menu.cfg')) - write_menu(aarch64_menu, u'grub2-menu', aarch64_distrotrees) + os.path.join(tftp_root, "aarch64", "beaker_menu.cfg") + ) + write_menu(aarch64_menu, "grub2-menu", aarch64_distrotrees) def main(): - parser = OptionParser(description='''Writes a netboot menu to the TFTP root -directory, containing distros from Beaker.''') - parser.add_option('--tag', metavar='TAG', action='append', dest='tags', - help='Only include distros tagged with TAG') - parser.add_option('--xml-filter', metavar='XML', - help='Only include distro trees which match the given ' - 'XML filter criteria, as in ') - parser.add_option('--tftp-root', metavar='DIR', - default='/var/lib/tftpboot', - help='Path to TFTP root directory [default: %default]') - parser.add_option('-q', '--quiet', action='store_true', - help='Suppress informational output') + parser = OptionParser( + description="""Writes a netboot menu to the TFTP root +directory, containing distros from Beaker.""" + ) + parser.add_option( + "--tag", + metavar="TAG", + action="append", + dest="tags", + help="Only include distros tagged with TAG", + ) + parser.add_option( + "--xml-filter", + metavar="XML", + help="Only include distro trees which match the given " + "XML filter criteria, as in ", + ) + parser.add_option( + "--tftp-root", + metavar="DIR", + default="/var/lib/tftpboot", + help="Path to TFTP root directory [default: %default]", + ) + parser.add_option( + "-q", "--quiet", action="store_true", help="Suppress informational output" + ) (opts, args) = parser.parse_args() if args: - parser.error('This command does not accept any arguments') + parser.error("This command does not accept any arguments") if opts.quiet: - os.dup2(os.open('/dev/null', os.O_WRONLY), 1) + os.dup2(os.open("/dev/null", os.O_WRONLY), 1) write_menus(opts.tftp_root, opts.tags, opts.xml_filter) return 0 -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/LabController/src/bkr/labcontroller/test_async.py b/LabController/src/bkr/labcontroller/test_concurrency.py similarity index 52% rename from LabController/src/bkr/labcontroller/test_async.py rename to LabController/src/bkr/labcontroller/test_concurrency.py index 3f8ec340e..f0f66da77 100644 --- a/LabController/src/bkr/labcontroller/test_async.py +++ b/LabController/src/bkr/labcontroller/test_concurrency.py @@ -4,72 +4,117 @@ # (at your option) any later version. import os -import errno -import unittest -import subprocess import signal -from time import sleep +import subprocess +import time +import unittest + import gevent +import psutil + +from bkr.labcontroller.concurrency import ( + MonitoredSubprocess, + _kill_process_group, + _read_from_pipe, +) try: - # pylint: disable=E0611 - from gevent import wait as gevent_wait + from unittest.mock import MagicMock, patch except ImportError: - # gevent.wait was gevent.run in 1.0 beta - # pylint: disable=E0611 - from gevent import run as gevent_wait -from bkr.labcontroller.async import MonitoredSubprocess + from mock import MagicMock, patch -class SubprocessTest(unittest.TestCase): +class TestReadFromPipe(unittest.TestCase): + @patch("fcntl.fcntl", MagicMock()) + @patch("gevent.socket.wait_read", MagicMock()) + def test_read_normal(self): + mock_file = MagicMock() + mock_file.read.side_effect = [b"foo", b"bar", b""] + + result = _read_from_pipe(mock_file) + expected_result = "foobar" + self.assertEqual(result, expected_result) + + @patch("fcntl.fcntl", MagicMock()) + @patch("gevent.socket.wait_read", MagicMock()) + def test_read_discarding(self): + mock_file = MagicMock() + mock_file.read.side_effect = ( + [b"a" * 4096] * 1001 + [b"This line shouldn't be in output"] + [b""] # EOT + ) + + result = _read_from_pipe(mock_file) + expected_result = "a" * 4096 * 1000 + "+++ DISCARDED" + self.assertEqual(result, expected_result) + + +class TestKillProcessGroup(unittest.TestCase): + @patch("os.killpg") + @patch("gevent.sleep", MagicMock()) + def test_kill_process_group(self, mock_killpg): + t_pgid = 12345 + + _kill_process_group(t_pgid) + + # Verify that SIGTERM and SIGKILL are both called + expected_calls = [((t_pgid, signal.SIGTERM),), ((t_pgid, signal.SIGKILL),)] + self.assertEqual(mock_killpg.call_args_list, expected_calls) + + +class SubprocessTest(unittest.TestCase): def _assert_child_is_process_group_leader(self, p): self.assertEqual(os.getpgid(p.pid), p.pid) def _assert_process_group_is_removed(self, pgid): - try: - # There's seems to sometimes be a delay from when the process is killed - # and when os.killpg believes it is killed - for _ in range(1, 6): - os.killpg(pgid, signal.SIGKILL) - sleep(0.5) - self.fail("The process group should've already been removed") - except OSError, e: - if e.errno != errno.ESRCH: - self.fail("The process group should've already been removed") + processes_in_group = [] + + for proc in psutil.process_iter(["pid", "name"]): + try: + if os.getpgid(proc.pid) == pgid: + processes_in_group.append(proc) + except (psutil.NoSuchProcess, psutil.AccessDenied, OSError): + pass + + gone, alive = psutil.wait_procs(processes_in_group, timeout=10) + + self.assertEqual([], alive) def test_runaway_output_is_discarded(self): def _test(): - p = MonitoredSubprocess(['seq', '--format=%0.0f cans of spam on the wall', - str(1024 * 1024)], stdout=subprocess.PIPE, - timeout=5) + p = MonitoredSubprocess( + ["seq", "--format=%0.0f cans of spam on the wall", str(8096 * 8096)], + stdout=subprocess.PIPE, + timeout=5, + ) p.dead.wait() out = p.stdout_reader.get() - self.assert_(len(out) <= 4096013, len(out)) - self.assert_(out.endswith('+++ DISCARDED'), out[:-10240]) + self.assertEqual(p.returncode, -signal.SIGTERM) + self.assertTrue(len(out) <= 4096013, len(out)) + self.assertTrue(out.endswith("+++ DISCARDED"), out[:-10240]) greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) def test_timeout_is_enforced(self): def _test(): - p = MonitoredSubprocess(['sleep', '10'], timeout=1) + p = MonitoredSubprocess(["sleep", "10"], timeout=1) p.dead.wait() - self.assertEquals(p.returncode, -signal.SIGTERM) + self.assertEqual(p.returncode, -signal.SIGTERM) greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) def test_child_is_process_group_leader(self): def _test(): - p = MonitoredSubprocess(['sleep', '1'], timeout=2) + p = MonitoredSubprocess(["sleep", "1"], timeout=2) self._assert_child_is_process_group_leader(p) p.dead.wait() greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) def test_process_group_is_killed_on_leader_timeout(self): @@ -90,7 +135,9 @@ def test_process_group_is_killed_on_leader_timeout(self): # The process group leader should timeout, and everything in the # process group should be terminated/killed. def _test(): - p = MonitoredSubprocess(['bash', '-c', '{ sleep 30 ; } & sleep 10'], timeout=1) + p = MonitoredSubprocess( + ["bash", "-c", "{ sleep 30 ; } & sleep 10"], timeout=1 + ) # The rest of this test hinges on this assertion self._assert_child_is_process_group_leader(p) p.dead.wait() @@ -98,7 +145,7 @@ def _test(): self._assert_process_group_is_removed(p.pid) greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) def test_orphan_child_is_killed_when_parent_exits(self): @@ -119,25 +166,26 @@ def test_orphan_child_is_killed_when_parent_exits(self): # These should all be in the same process group and should # all be killed when the process group leader exits normally. def _test(): - p = MonitoredSubprocess(['bash', '-c', '{ sleep 60 ; } & sleep 1'], timeout=10) + p = MonitoredSubprocess( + ["bash", "-c", "{ sleep 60 ; } & sleep 1"], timeout=10 + ) # The rest of this test hinges on this assertion self._assert_child_is_process_group_leader(p) p.dead.wait() - self.assertEquals(p.returncode, 0) + self.assertEqual(p.returncode, 0) self._assert_process_group_is_removed(p.pid) greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) # https://bugzilla.redhat.com/show_bug.cgi?id=832250 def test_reaper_race(self): def _test(): - procs = [MonitoredSubprocess(['true'], timeout=10) - for _ in xrange(600)] + procs = [MonitoredSubprocess(["true"], timeout=10) for _ in range(600)] for p in procs: p.dead.wait() greenlet = gevent.spawn(_test) - gevent_wait() + gevent.wait() greenlet.get(block=False) diff --git a/LabController/src/bkr/labcontroller/test_log_storage.py b/LabController/src/bkr/labcontroller/test_log_storage.py index 033ac6387..31f203dc3 100644 --- a/LabController/src/bkr/labcontroller/test_log_storage.py +++ b/LabController/src/bkr/labcontroller/test_log_storage.py @@ -1,31 +1,31 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import unittest + from bkr.labcontroller.log_storage import LogStorage + def test_log_storage_paths(): - log_storage = LogStorage('/dummy', 'http://dummy/', object()) + log_storage = LogStorage("/dummy", "http://dummy/", object()) cases = [ - ('recipe', '1', 'console.log', '/dummy/recipes/0+/1/console.log'), - ('recipe', '1', '/console.log', '/dummy/recipes/0+/1/console.log'), - ('recipe', '1', '//console.log', '/dummy/recipes/0+/1/console.log'), - ('recipe', '1', 'debug/beah_raw', '/dummy/recipes/0+/1/debug/beah_raw'), - ('recipe', '1001', 'console.log', '/dummy/recipes/1+/1001/console.log'), - ('task', '1', 'TESTOUT.log', '/dummy/tasks/0+/1/TESTOUT.log'), - ('task', '1', '/TESTOUT.log', '/dummy/tasks/0+/1/TESTOUT.log'), - ('task', '1', '//TESTOUT.log', '/dummy/tasks/0+/1/TESTOUT.log'), - ('task', '1', 'debug/beah_raw', '/dummy/tasks/0+/1/debug/beah_raw'), - ('task', '1001', 'TESTOUT.log', '/dummy/tasks/1+/1001/TESTOUT.log'), - ('result', '1', 'TESTOUT.log', '/dummy/results/0+/1/TESTOUT.log'), - ('result', '1', '/TESTOUT.log', '/dummy/results/0+/1/TESTOUT.log'), - ('result', '1', '//TESTOUT.log', '/dummy/results/0+/1/TESTOUT.log'), - ('result', '1', 'debug/beah_raw', '/dummy/results/0+/1/debug/beah_raw'), - ('result', '1001', 'TESTOUT.log', '/dummy/results/1+/1001/TESTOUT.log'), + ("recipe", "1", "console.log", "/dummy/recipes/0+/1/console.log"), + ("recipe", "1", "/console.log", "/dummy/recipes/0+/1/console.log"), + ("recipe", "1", "//console.log", "/dummy/recipes/0+/1/console.log"), + ("recipe", "1", "debug/beah_raw", "/dummy/recipes/0+/1/debug/beah_raw"), + ("recipe", "1001", "console.log", "/dummy/recipes/1+/1001/console.log"), + ("task", "1", "TESTOUT.log", "/dummy/tasks/0+/1/TESTOUT.log"), + ("task", "1", "/TESTOUT.log", "/dummy/tasks/0+/1/TESTOUT.log"), + ("task", "1", "//TESTOUT.log", "/dummy/tasks/0+/1/TESTOUT.log"), + ("task", "1", "debug/beah_raw", "/dummy/tasks/0+/1/debug/beah_raw"), + ("task", "1001", "TESTOUT.log", "/dummy/tasks/1+/1001/TESTOUT.log"), + ("result", "1", "TESTOUT.log", "/dummy/results/0+/1/TESTOUT.log"), + ("result", "1", "/TESTOUT.log", "/dummy/results/0+/1/TESTOUT.log"), + ("result", "1", "//TESTOUT.log", "/dummy/results/0+/1/TESTOUT.log"), + ("result", "1", "debug/beah_raw", "/dummy/results/0+/1/debug/beah_raw"), + ("result", "1001", "TESTOUT.log", "/dummy/results/1+/1001/TESTOUT.log"), ] - for log_type, id, path, expected in cases: - actual = getattr(log_storage, log_type)(id, path).path + for log_type, entity_id, path, expected in cases: + actual = getattr(log_storage, log_type)(entity_id, path).path assert actual == expected, actual diff --git a/LabController/src/bkr/labcontroller/test_netboot.py b/LabController/src/bkr/labcontroller/test_netboot.py index 2bb3b3ce1..d1db3b1f0 100644 --- a/LabController/src/bkr/labcontroller/test_netboot.py +++ b/LabController/src/bkr/labcontroller/test_netboot.py @@ -3,18 +3,22 @@ # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os, os.path -import socket -import unittest -import tempfile +import os +import os.path import random import shutil -from bkr.labcontroller import netboot +import socket +import tempfile +import unittest + +import six + from bkr.common.helpers import makedirs_ignore +from bkr.labcontroller import netboot # This FQDN is embedded in a lot of the expected output for test cases -TEST_FQDN = 'fqdn.example.invalid' -TEST_ADDRESS = '127.0.0.255' +TEST_FQDN = "fqdn.example.invalid" +TEST_ADDRESS = "127.0.0.255" # Path maps CONFIGURED_PATHS = { @@ -23,25 +27,15 @@ ("images", TEST_FQDN, "kernel"), ("images", TEST_FQDN, "initrd"), ), - "armlinux": ( - ("arm", "pxelinux.cfg", netboot.pxe_basename(TEST_ADDRESS)), - ), - "pxelinux": ( - ("pxelinux.cfg", netboot.pxe_basename(TEST_ADDRESS)), - ), - "ipxe": ( - ("ipxe", netboot.pxe_basename(TEST_ADDRESS).lower()), - ), - "efigrub": ( - ("grub", netboot.pxe_basename(TEST_ADDRESS)), - ), + "armlinux": (("arm", "pxelinux.cfg", netboot.pxe_basename(TEST_ADDRESS)),), + "pxelinux": (("pxelinux.cfg", netboot.pxe_basename(TEST_ADDRESS)),), + "ipxe": (("ipxe", netboot.pxe_basename(TEST_ADDRESS).lower()),), + "efigrub": (("grub", netboot.pxe_basename(TEST_ADDRESS)),), "zpxe": ( ("s390x", "s_%s_conf" % TEST_FQDN), ("s390x", "s_%s_parm" % TEST_FQDN), ), - "elilo": ( - (netboot.pxe_basename(TEST_ADDRESS) + ".conf",), - ), + "elilo": ((netboot.pxe_basename(TEST_ADDRESS) + ".conf",),), "yaboot": ( ("etc", netboot.pxe_basename(TEST_ADDRESS).lower()), ("ppc", netboot.pxe_basename(TEST_ADDRESS).lower()), @@ -50,30 +44,19 @@ PERSISTENT_PATHS = { # These exist even after calling clear_ - "armlinux": ( - ("arm", "empty"), - ), - "pxelinux": ( - ("pxelinux.cfg", "default"), - ), - "ipxe": ( - ("ipxe", "default"), - ), - "efigrub": ( - ("grub", "images"), - ), - "zpxe": ( - ("s390x", "s_%s" % TEST_FQDN), - ), + "armlinux": (("arm", "empty"),), + "pxelinux": (("pxelinux.cfg", "default"),), + "ipxe": (("ipxe", "default"),), + "efigrub": (("grub", "images"),), + "zpxe": (("s390x", "s_%s" % TEST_FQDN),), } class NetBootTestCase(unittest.TestCase): - def setUp(self): - self.tftp_root = tempfile.mkdtemp(prefix='test_netboot', suffix='tftproot') + self.tftp_root = tempfile.mkdtemp(prefix="test_netboot", suffix="tftproot") self.fake_conf = { - 'TFTP_ROOT': self.tftp_root, + "TFTP_ROOT": self.tftp_root, } self._orig_get_conf = netboot.get_conf netboot.get_conf = lambda: self.fake_conf @@ -90,107 +73,134 @@ def check_netboot_absent(self, category): paths = self.make_filenames(CONFIGURED_PATHS[category]) paths += self.make_filenames(PERSISTENT_PATHS.get(category, ())) for path in paths: - self.assertFalse(os.path.lexists(path), - "Unexpected %r file: %r" % (category, path)) + self.assertFalse( + os.path.lexists(path), "Unexpected %r file: %r" % (category, path) + ) def check_netboot_configured(self, category): """Check state after calling fetch_images or configure_""" paths = self.make_filenames(CONFIGURED_PATHS[category]) paths += self.make_filenames(PERSISTENT_PATHS.get(category, ())) for path in paths: - self.assertTrue(os.path.lexists(path), - "Missing %r file: %r" % (category, path)) + self.assertTrue( + os.path.lexists(path), "Missing %r file: %r" % (category, path) + ) def check_netboot_cleared(self, category): """Check state after calling clear_""" paths = self.make_filenames(CONFIGURED_PATHS[category]) for path in paths: - self.assertFalse(os.path.lexists(path), - "Unexpected %r file: %r" % (category, path)) + self.assertFalse( + os.path.lexists(path), "Unexpected %r file: %r" % (category, path) + ) persistent = self.make_filenames(PERSISTENT_PATHS.get(category, ())) for path in persistent: - self.assertTrue(os.path.lexists(path), - "Missing persistent %r file: %r" % - (category, path)) + self.assertTrue( + os.path.lexists(path), + "Missing persistent %r file: %r" % (category, path), + ) def check_netbootloader_leak(self, config): - self.assertNotIn('netbootloader=', open(config).read()) + self.assertNotIn("netbootloader=", open(config).read()) def make_filenames(self, paths): return [os.path.join(self.tftp_root, *parts) for parts in paths] class LoaderImagesTest(NetBootTestCase): - # https://bugzilla.redhat.com/show_bug.cgi?id=866765 def test_pxelinux_is_populated(self): - if not os.path.exists('/usr/share/syslinux'): - raise unittest.SkipTest('syslinux is not installed') + if not os.path.exists("/usr/share/syslinux"): + raise unittest.SkipTest("syslinux is not installed") netboot.copy_default_loader_images() - pxelinux_path = os.path.join(self.tftp_root, 'pxelinux.0') + pxelinux_path = os.path.join(self.tftp_root, "pxelinux.0") self.assertTrue(os.path.exists(pxelinux_path)) - menuc32_path = os.path.join(self.tftp_root, 'menu.c32') + menuc32_path = os.path.join(self.tftp_root, "menu.c32") self.assertTrue(os.path.exists(menuc32_path)) class ImagesBaseTestCase(NetBootTestCase): - def setUp(self): super(ImagesBaseTestCase, self).setUp() # make some dummy images - self.kernel = tempfile.NamedTemporaryFile(prefix='test_netboot', suffix='kernel') - for _ in xrange(4 * 1024): - self.kernel.write(chr(random.randrange(0, 256)) * 1024) + self.kernel = tempfile.NamedTemporaryFile( + prefix="test_netboot", suffix="kernel" + ) + for _ in range(4 * 1024): + self.kernel.write(bytes(str(chr(random.randrange(0, 128))).encode()) * 1024) self.kernel.flush() - self.initrd = tempfile.NamedTemporaryFile(prefix='test_netboot', suffix='initrd') - for _ in xrange(8 * 1024): - self.initrd.write(chr(random.randrange(0, 256)) * 1024) + self.initrd = tempfile.NamedTemporaryFile( + prefix="test_netboot", suffix="initrd" + ) + for _ in range(8 * 1024): + self.initrd.write(bytes(str(chr(random.randrange(0, 128))).encode()) * 1024) self.initrd.flush() - self.image = tempfile.NamedTemporaryFile(prefix='test_netboot', suffix='image') - for _ in xrange(4 * 1024): - self.image.write(chr(random.randrange(0, 256)) * 1024) + self.image = tempfile.NamedTemporaryFile(prefix="test_netboot", suffix="image") + for _ in range(4 * 1024): + self.image.write(bytes(str(chr(random.randrange(0, 128))).encode()) * 1024) self.image.flush() class ImagesTest(ImagesBaseTestCase): - def test_fetch_then_clear(self): - netboot.fetch_images(1234, 'file://%s' % self.kernel.name, - 'file://%s' % self.initrd.name, - TEST_FQDN) + netboot.fetch_images( + 1234, + "file://%s" % self.kernel.name, + "file://%s" % self.initrd.name, + TEST_FQDN, + ) self.check_netboot_configured("images") - kernel_path = os.path.join(self.tftp_root, 'images', TEST_FQDN, 'kernel') - initrd_path = os.path.join(self.tftp_root, 'images', TEST_FQDN, 'initrd') - self.assertEquals(os.path.getsize(kernel_path), 4 * 1024 * 1024) - self.assertEquals(os.path.getsize(initrd_path), 8 * 1024 * 1024) + kernel_path = os.path.join(self.tftp_root, "images", TEST_FQDN, "kernel") + initrd_path = os.path.join(self.tftp_root, "images", TEST_FQDN, "initrd") + self.assertEqual(4 * 1024 * 1024, os.path.getsize(kernel_path)) + self.assertEqual(8 * 1024 * 1024, os.path.getsize(initrd_path)) netboot.clear_images(TEST_FQDN) self.check_netboot_cleared("images") # https://bugzilla.redhat.com/show_bug.cgi?id=833662 def test_fetch_twice(self): - netboot.fetch_images(1234, 'file://%s' % self.kernel.name, - 'file://%s' % self.initrd.name, - TEST_FQDN) - netboot.fetch_images(1234, 'file://%s' % self.kernel.name, - 'file://%s' % self.initrd.name, - TEST_FQDN) + netboot.fetch_images( + 1234, + "file://%s" % self.kernel.name, + "file://%s" % self.initrd.name, + TEST_FQDN, + ) + netboot.fetch_images( + 1234, + "file://%s" % self.kernel.name, + "file://%s" % self.initrd.name, + TEST_FQDN, + ) self.check_netboot_configured("images") - kernel_path = os.path.join(self.tftp_root, 'images', TEST_FQDN, 'kernel') - initrd_path = os.path.join(self.tftp_root, 'images', TEST_FQDN, 'initrd') - self.assertEquals(os.path.getsize(kernel_path), 4 * 1024 * 1024) - self.assertEquals(os.path.getsize(initrd_path), 8 * 1024 * 1024) + kernel_path = os.path.join(self.tftp_root, "images", TEST_FQDN, "kernel") + initrd_path = os.path.join(self.tftp_root, "images", TEST_FQDN, "initrd") + self.assertEqual(os.path.getsize(kernel_path), 4 * 1024 * 1024) + self.assertEqual(os.path.getsize(initrd_path), 8 * 1024 * 1024) class ArchBasedConfigTest(ImagesBaseTestCase): - common_categories = ("images", "armlinux", "efigrub", - "elilo", "yaboot", "pxelinux", "ipxe") + common_categories = ( + "images", + "armlinux", + "efigrub", + "elilo", + "yaboot", + "pxelinux", + "ipxe", + ) def configure(self, arch): - netboot.configure_all(TEST_FQDN, arch, 1234, - 'file://%s' % self.kernel.name, - 'file://%s' % self.initrd.name, "", - 'file://%s' % self.image.name, self.tftp_root) + netboot.configure_all( + TEST_FQDN, + arch, + 1234, + "file://%s" % self.kernel.name, + "file://%s" % self.initrd.name, + "", + "file://%s" % self.image.name, + self.tftp_root, + ) def get_categories(self, arch): this = self.common_categories @@ -223,264 +233,329 @@ def test_configure_then_clear_common(self): class PxelinuxTest(NetBootTestCase): - def test_configure_symlink_then_clear(self): """ Verify that kernel and initrd path points to images directory located in tftp root dir This is necessary in case of PXELINUX. PXELINUX is using relative paths from location of NBP instead of absolute paths as we can see in GRUB2. """ - bootloader_confs = os.path.join(self.tftp_root, 'bootloader', TEST_FQDN) - netboot.configure_pxelinux(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', - bootloader_confs, - symlink=True) + bootloader_confs = os.path.join(self.tftp_root, "bootloader", TEST_FQDN) + netboot.configure_pxelinux( + TEST_FQDN, + "console=ttyS0,115200 ks=http://lol/", + bootloader_confs, + symlink=True, + ) pxelinux_bootloader_path = os.path.join( - self.tftp_root, 'bootloader', TEST_FQDN, 'pxelinux.cfg') - pxelinux_config_path = os.path.join(pxelinux_bootloader_path, '7F0000FF') - pxelinux_default_path = os.path.join(pxelinux_bootloader_path, 'default') + self.tftp_root, "bootloader", TEST_FQDN, "pxelinux.cfg" + ) + pxelinux_config_path = os.path.join(pxelinux_bootloader_path, "7F0000FF") + pxelinux_default_path = os.path.join(pxelinux_bootloader_path, "default") open(pxelinux_config_path).readlines() - self.assertEquals(open(pxelinux_config_path).read(), - '''default linux + self.assertEqual( + open(pxelinux_config_path).read(), + """default linux prompt 0 timeout 100 label linux kernel ../../images/fqdn.example.invalid/kernel ipappend 2 append initrd=../../images/fqdn.example.invalid/initrd console=ttyS0,115200 ks=http://lol/ netboot_method=pxe -''') - self.assertEquals(open(pxelinux_default_path).read(), - '''default local +""", + ) + self.assertEqual( + open(pxelinux_default_path).read(), + """default local prompt 0 timeout 0 label local localboot 0 -''') +""", + ) self.check_netbootloader_leak(pxelinux_config_path) netboot.clear_pxelinux(TEST_FQDN, bootloader_confs) - self.assert_(not os.path.exists(pxelinux_config_path)) + self.assertTrue(not os.path.exists(pxelinux_config_path)) def test_configure_then_clear(self): - netboot.configure_pxelinux(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - pxelinux_config_path = os.path.join(self.tftp_root, 'pxelinux.cfg', '7F0000FF') - pxelinux_default_path = os.path.join(self.tftp_root, 'pxelinux.cfg', 'default') - self.assertEquals(open(pxelinux_config_path).read(), - '''default linux + netboot.configure_pxelinux( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + pxelinux_config_path = os.path.join(self.tftp_root, "pxelinux.cfg", "7F0000FF") + pxelinux_default_path = os.path.join(self.tftp_root, "pxelinux.cfg", "default") + self.assertEqual( + open(pxelinux_config_path).read(), + """default linux prompt 0 timeout 100 label linux kernel /images/fqdn.example.invalid/kernel ipappend 2 append initrd=/images/fqdn.example.invalid/initrd console=ttyS0,115200 ks=http://lol/ netboot_method=pxe -''') - self.assertEquals(open(pxelinux_default_path).read(), - '''default local +""", + ) + self.assertEqual( + open(pxelinux_default_path).read(), + """default local prompt 0 timeout 0 label local localboot 0 -''') +""", + ) self.check_netbootloader_leak(pxelinux_config_path) netboot.clear_pxelinux(TEST_FQDN, self.tftp_root) - self.assert_(not os.path.exists(pxelinux_config_path)) + self.assertTrue(not os.path.exists(pxelinux_config_path)) def test_multiple_initrds(self): - netboot.configure_pxelinux(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://lol/', self.tftp_root) - pxelinux_config_path = os.path.join(self.tftp_root, 'pxelinux.cfg', '7F0000FF') - self.assertEquals(open(pxelinux_config_path).read(), - '''default linux + netboot.configure_pxelinux( + TEST_FQDN, "initrd=/mydriverdisk.img ks=http://lol/", self.tftp_root + ) + pxelinux_config_path = os.path.join(self.tftp_root, "pxelinux.cfg", "7F0000FF") + self.assertEqual( + open(pxelinux_config_path).read(), + """default linux prompt 0 timeout 100 label linux kernel /images/fqdn.example.invalid/kernel ipappend 2 append initrd=/images/fqdn.example.invalid/initrd,/mydriverdisk.img ks=http://lol/ netboot_method=pxe -''') +""", + ) # https://bugzilla.redhat.com/show_bug.cgi?id=1067924 def test_kernel_options_are_not_quoted(self): - netboot.configure_pxelinux(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart', - self.tftp_root) - pxelinux_config_path = os.path.join(self.tftp_root, 'pxelinux.cfg', '7F0000FF') + netboot.configure_pxelinux( + TEST_FQDN, + "initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart", + self.tftp_root, + ) + pxelinux_config_path = os.path.join(self.tftp_root, "pxelinux.cfg", "7F0000FF") config = open(pxelinux_config_path).read() - self.assertIn(' append ' - 'initrd=/images/fqdn.example.invalid/initrd,/mydriverdisk.img ' - 'ks=http://example.com/~user/kickstart netboot_method=pxe', - config) + self.assertIn( + " append " + "initrd=/images/fqdn.example.invalid/initrd,/mydriverdisk.img " + "ks=http://example.com/~user/kickstart netboot_method=pxe", + config, + ) def test_doesnt_overwrite_existing_default_config(self): - pxelinux_dir = os.path.join(self.tftp_root, 'pxelinux.cfg') - makedirs_ignore(pxelinux_dir, mode=0755) - pxelinux_default_path = os.path.join(pxelinux_dir, 'default') + mode = "x" if six.PY3 else "wx" + pxelinux_dir = os.path.join(self.tftp_root, "pxelinux.cfg") + makedirs_ignore(pxelinux_dir, mode=0o755) + pxelinux_default_path = os.path.join(pxelinux_dir, "default") # in reality it will probably be a menu - custom = ''' + custom = """ default local prompt 10 timeout 200 label local localboot 0 label jabberwocky - boot the thing''' - open(pxelinux_default_path, 'wx').write(custom) - netboot.configure_pxelinux(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - self.assertEquals(open(pxelinux_default_path).read(), custom) + boot the thing""" + open(pxelinux_default_path, mode).write(custom) + netboot.configure_pxelinux( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + self.assertEqual(open(pxelinux_default_path).read(), custom) -class IpxeTest(NetBootTestCase): +class IpxeTest(NetBootTestCase): def test_configure_then_clear(self): - netboot.configure_ipxe(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - ipxe_config_path = os.path.join(self.tftp_root, 'ipxe', '7f0000ff') - ipxe_default_path = os.path.join(self.tftp_root, 'ipxe', 'default') - self.assertEquals(open(ipxe_config_path).read(), - '''#!ipxe + netboot.configure_ipxe( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + ipxe_config_path = os.path.join(self.tftp_root, "ipxe", "7f0000ff") + ipxe_default_path = os.path.join(self.tftp_root, "ipxe", "default") + self.assertEqual( + open(ipxe_config_path).read(), + """#!ipxe kernel /images/fqdn.example.invalid/kernel initrd /images/fqdn.example.invalid/initrd imgargs kernel initrd=initrd console=ttyS0,115200 ks=http://lol/ netboot_method=ipxe BOOTIF=01-${netX/mac:hexhyp} boot || exit 1 -''') - self.assertEquals(open(ipxe_default_path).read(), - '''#!ipxe +""", + ) + self.assertEqual( + open(ipxe_default_path).read(), + """#!ipxe iseq ${builtin/platform} pcbios && sanboot --no-describe --drive 0x80 || exit 1 -''') +""", + ) self.check_netbootloader_leak(ipxe_config_path) netboot.clear_ipxe(TEST_FQDN, self.tftp_root) - self.assert_(not os.path.exists(ipxe_config_path)) + self.assertTrue(not os.path.exists(ipxe_config_path)) def test_multiple_initrds(self): - netboot.configure_ipxe(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://lol/', self.tftp_root) - ipxe_config_path = os.path.join(self.tftp_root, 'ipxe', '7f0000ff') - self.assertEquals(open(ipxe_config_path).read(), - '''#!ipxe + netboot.configure_ipxe( + TEST_FQDN, "initrd=/mydriverdisk.img ks=http://lol/", self.tftp_root + ) + ipxe_config_path = os.path.join(self.tftp_root, "ipxe", "7f0000ff") + self.assertEqual( + open(ipxe_config_path).read(), + """#!ipxe kernel /images/fqdn.example.invalid/kernel initrd /images/fqdn.example.invalid/initrd initrd /mydriverdisk.img imgargs kernel initrd=initrd ks=http://lol/ netboot_method=ipxe BOOTIF=01-${netX/mac:hexhyp} boot || exit 1 -''') +""", + ) # https://bugzilla.redhat.com/show_bug.cgi?id=1067924 def test_kernel_options_are_not_quoted(self): - netboot.configure_ipxe(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart', self.tftp_root) - ipxe_config_path = os.path.join(self.tftp_root, 'ipxe', '7f0000ff') + netboot.configure_ipxe( + TEST_FQDN, + "initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart", + self.tftp_root, + ) + ipxe_config_path = os.path.join(self.tftp_root, "ipxe", "7f0000ff") config = open(ipxe_config_path).read() - self.assertIn('imgargs kernel initrd=initrd ' - 'ks=http://example.com/~user/kickstart netboot_method=ipxe', - config) + self.assertIn( + "imgargs kernel initrd=initrd " + "ks=http://example.com/~user/kickstart netboot_method=ipxe", + config, + ) def test_doesnt_overwrite_existing_default_config(self): - ipxe_dir = os.path.join(self.tftp_root, 'ipxe') - makedirs_ignore(ipxe_dir, mode=0755) - ipxe_default_path = os.path.join(ipxe_dir, 'default') + mode = "x" if six.PY3 else "wx" + ipxe_dir = os.path.join(self.tftp_root, "ipxe") + makedirs_ignore(ipxe_dir, mode=0o755) + ipxe_default_path = os.path.join(ipxe_dir, "default") # in reality it will probably be a menu - custom = '''#!ipxe + custom = """#!ipxe chain /ipxe/beaker_menu exit 1 -''' - open(ipxe_default_path, 'wx').write(custom) - netboot.configure_ipxe(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - self.assertEquals(open(ipxe_default_path).read(), custom) +""" + open(ipxe_default_path, mode).write(custom) + netboot.configure_ipxe( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + self.assertEqual(open(ipxe_default_path).read(), custom) class EfigrubTest(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_efigrub(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - grub_config_path = os.path.join(self.tftp_root, 'grub', '7F0000FF') - self.assertEquals(open(grub_config_path).read(), - '''default 0 + netboot.configure_efigrub( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + grub_config_path = os.path.join(self.tftp_root, "grub", "7F0000FF") + self.assertEqual( + open(grub_config_path).read(), + """default 0 timeout 10 title Beaker scheduled job for fqdn.example.invalid root (nd) kernel /images/fqdn.example.invalid/kernel console=ttyS0,115200 ks=http://lol/ netboot_method=efigrub initrd /images/fqdn.example.invalid/initrd -''') +""", + ) self.check_netbootloader_leak(grub_config_path) netboot.clear_efigrub(TEST_FQDN, self.tftp_root) - self.assert_(not os.path.exists(grub_config_path)) + self.assertTrue(not os.path.exists(grub_config_path)) def test_multiple_initrds(self): - netboot.configure_efigrub(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://lol/', self.tftp_root) - grub_config_path = os.path.join(self.tftp_root, 'grub', '7F0000FF') - self.assertEquals(open(grub_config_path).read(), - '''default 0 + netboot.configure_efigrub( + TEST_FQDN, "initrd=/mydriverdisk.img ks=http://lol/", self.tftp_root + ) + grub_config_path = os.path.join(self.tftp_root, "grub", "7F0000FF") + self.assertEqual( + open(grub_config_path).read(), + """default 0 timeout 10 title Beaker scheduled job for fqdn.example.invalid root (nd) kernel /images/fqdn.example.invalid/kernel ks=http://lol/ netboot_method=efigrub initrd /images/fqdn.example.invalid/initrd /mydriverdisk.img -''') +""", + ) # https://bugzilla.redhat.com/show_bug.cgi?id=1067924 def test_kernel_options_are_not_quoted(self): - netboot.configure_efigrub(TEST_FQDN, - 'initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart', - self.tftp_root) - grub_config_path = os.path.join(self.tftp_root, 'grub', '7F0000FF') + netboot.configure_efigrub( + TEST_FQDN, + "initrd=/mydriverdisk.img ks=http://example.com/~user/kickstart", + self.tftp_root, + ) + grub_config_path = os.path.join(self.tftp_root, "grub", "7F0000FF") config = open(grub_config_path).read() - self.assertIn(' kernel /images/fqdn.example.invalid/kernel ' - 'ks=http://example.com/~user/kickstart netboot_method=efigrub', - config) + self.assertIn( + " kernel /images/fqdn.example.invalid/kernel " + "ks=http://example.com/~user/kickstart netboot_method=efigrub", + config, + ) class ZpxeTest(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_zpxe(TEST_FQDN, - 'ftp://lab.example.invalid/kernel.img', - 'ftp://lab.example.invalid/initrd.img', - # lots of options to test the 80-char wrapping - 'LAYER2=1 NETTYPE=qeth PORTNO=0 IPADDR=10.16.66.192 ' - 'SUBCHANNELS=0.0.8000,0.0.8001,0.0.8002 MTU=1500 ' - 'BROADCAST=10.16.71.255 SEARCHDNS= NETMASK=255.255.248.0 ' - 'DNS=10.16.255.2 PORTNAME=z10-01 DASD=208C,218C,228C,238C ' - 'GATEWAY=10.16.71.254 NETWORK=10.16.64.0 ' - 'MACADDR=02:DE:AD:BE:EF:01 ks=http://lol/', self.tftp_root) - self.assertEquals(open(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid')).read(), - '''ftp://lab.example.invalid/kernel.img + netboot.configure_zpxe( + TEST_FQDN, + "ftp://lab.example.invalid/kernel.img", + "ftp://lab.example.invalid/initrd.img", + # lots of options to test the 80-char wrapping + "LAYER2=1 NETTYPE=qeth PORTNO=0 IPADDR=10.16.66.192 " + "SUBCHANNELS=0.0.8000,0.0.8001,0.0.8002 MTU=1500 " + "BROADCAST=10.16.71.255 SEARCHDNS= NETMASK=255.255.248.0 " + "DNS=10.16.255.2 PORTNAME=z10-01 DASD=208C,218C,228C,238C " + "GATEWAY=10.16.71.254 NETWORK=10.16.64.0 " + "MACADDR=02:DE:AD:BE:EF:01 ks=http://lol/", + self.tftp_root, + ) + self.assertEqual( + open( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid") + ).read(), + """ftp://lab.example.invalid/kernel.img ftp://lab.example.invalid/initrd.img -''') - self.assertEquals(open(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid_parm')).read(), - '''LAYER2=1 NETTYPE=qeth PORTNO=0 IPADDR=10.16.66.192 SUBCHANNELS=0.0.8000,0.0.8001 +""", + ) + self.assertEqual( + open( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid_parm") + ).read(), + """LAYER2=1 NETTYPE=qeth PORTNO=0 IPADDR=10.16.66.192 SUBCHANNELS=0.0.8000,0.0.8001 ,0.0.8002 MTU=1500 BROADCAST=10.16.71.255 SEARCHDNS= NETMASK=255.255.248.0 DNS=1 0.16.255.2 PORTNAME=z10-01 DASD=208C,218C,228C,238C GATEWAY=10.16.71.254 NETWORK =10.16.64.0 MACADDR=02:DE:AD:BE:EF:01 ks=http://lol/ netboot_method=zpxe -''') - self.assertEquals(open(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid_conf')).read(), - '') +""", + ) + self.assertEqual( + open( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid_conf") + ).read(), + "", + ) netboot.clear_zpxe(TEST_FQDN, self.tftp_root) - self.assertEquals(open(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid')).read(), - 'local\n') - self.assert_(not os.path.exists(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid_parm'))) - self.assert_(not os.path.exists(os.path.join(self.tftp_root, 's390x', - 's_fqdn.example.invalid_conf'))) + self.assertEqual( + open( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid") + ).read(), + "local\n", + ) + self.assertTrue( + not os.path.exists( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid_parm") + ) + ) + self.assertTrue( + not os.path.exists( + os.path.join(self.tftp_root, "s390x", "s_fqdn.example.invalid_conf") + ) + ) class EliloTest(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_elilo(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - elilo_config_path = os.path.join(self.tftp_root, '7F0000FF.conf') - self.assertEquals(open(elilo_config_path).read(), - '''relocatable + netboot.configure_elilo( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + elilo_config_path = os.path.join(self.tftp_root, "7F0000FF.conf") + self.assertEqual( + open(elilo_config_path).read(), + """relocatable image=/images/fqdn.example.invalid/kernel label=netinstall @@ -488,20 +563,22 @@ def test_configure_then_clear(self): initrd=/images/fqdn.example.invalid/initrd read-only root=/dev/ram -''') +""", + ) self.check_netbootloader_leak(elilo_config_path) netboot.clear_elilo(TEST_FQDN, self.tftp_root) - self.assert_(not os.path.exists(elilo_config_path)) + self.assertTrue(not os.path.exists(elilo_config_path)) class YabootTest(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_yaboot(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - yaboot_config_path = os.path.join(self.tftp_root, 'etc', '7f0000ff') - self.assertEquals(open(yaboot_config_path).read(), - '''init-message="Beaker scheduled job for fqdn.example.invalid" + netboot.configure_yaboot( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + yaboot_config_path = os.path.join(self.tftp_root, "etc", "7f0000ff") + self.assertEqual( + open(yaboot_config_path).read(), + """init-message="Beaker scheduled job for fqdn.example.invalid" timeout=80 delay=10 default=linux @@ -510,154 +587,190 @@ def test_configure_then_clear(self): label=linux initrd=/images/fqdn.example.invalid/initrd append="console=ttyS0,115200 ks=http://lol/ netboot_method=yaboot" -''') - yaboot_symlink_path = os.path.join(self.tftp_root, 'ppc', '7f0000ff') - self.assertEquals(os.readlink(yaboot_symlink_path), '../yaboot') +""", + ) + yaboot_symlink_path = os.path.join(self.tftp_root, "ppc", "7f0000ff") + self.assertEqual(os.readlink(yaboot_symlink_path), "../yaboot") self.check_netbootloader_leak(yaboot_config_path) netboot.clear_yaboot(TEST_FQDN, self.tftp_root) - self.assert_(not os.path.exists(yaboot_config_path)) - self.assert_(not os.path.exists(yaboot_symlink_path)) + self.assertTrue(not os.path.exists(yaboot_config_path)) + self.assertTrue(not os.path.exists(yaboot_symlink_path)) # https://bugzilla.redhat.com/show_bug.cgi?id=829984 def test_configure_twice(self): - netboot.configure_yaboot(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - netboot.configure_yaboot(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - yaboot_symlink_path = os.path.join(self.tftp_root, 'ppc', '7f0000ff') - self.assertEquals(os.readlink(yaboot_symlink_path), '../yaboot') + netboot.configure_yaboot( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + netboot.configure_yaboot( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + yaboot_symlink_path = os.path.join(self.tftp_root, "ppc", "7f0000ff") + self.assertEqual(os.readlink(yaboot_symlink_path), "../yaboot") class Grub2PPC64Test(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_ppc64(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - grub2_configs_path = [os.path.join(self.tftp_root, 'ppc', 'grub.cfg-7F0000FF'), - os.path.join(self.tftp_root, 'boot', 'grub2', 'grub.cfg-7F0000FF'), - os.path.join(self.tftp_root, 'grub.cfg-7F0000FF')] + netboot.configure_ppc64( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + grub2_configs_path = [ + os.path.join(self.tftp_root, "ppc", "grub.cfg-7F0000FF"), + os.path.join(self.tftp_root, "boot", "grub2", "grub.cfg-7F0000FF"), + os.path.join(self.tftp_root, "grub.cfg-7F0000FF"), + ] for path in grub2_configs_path: - self.assertEquals(open(path).read(), """\ + self.assertEqual( + open(path).read(), + """\ linux /images/fqdn.example.invalid/kernel console=ttyS0,115200 ks=http://lol/ netboot_method=grub2 initrd /images/fqdn.example.invalid/initrd boot -""") +""", + ) self.check_netbootloader_leak(path) - grub2_symlink_path = os.path.join(self.tftp_root, 'ppc', '7f0000ff-grub2') - self.assertEquals(os.readlink(grub2_symlink_path), - '../boot/grub2/powerpc-ieee1275/core.elf') + grub2_symlink_path = os.path.join(self.tftp_root, "ppc", "7f0000ff-grub2") + self.assertEqual( + os.readlink(grub2_symlink_path), "../boot/grub2/powerpc-ieee1275/core.elf" + ) netboot.clear_ppc64(TEST_FQDN, self.tftp_root) for path in grub2_configs_path: - self.assert_(not os.path.exists(path)) - self.assert_(not os.path.exists(grub2_symlink_path)) + self.assertTrue(not os.path.exists(path)) + self.assertTrue(not os.path.exists(grub2_symlink_path)) class Grub2TestX8664(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_x86_64(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - grub2_configs_path = [os.path.join(self.tftp_root, 'x86_64', 'grub.cfg-7F0000FF'), - os.path.join(self.tftp_root, 'boot', 'grub2', 'grub.cfg-7F0000FF')] - grub2_default_path = [os.path.join(self.tftp_root, 'x86_64', 'grub.cfg'), - os.path.join(self.tftp_root, 'x86_64', 'grub.cfg')] + netboot.configure_x86_64( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + grub2_configs_path = [ + os.path.join(self.tftp_root, "x86_64", "grub.cfg-7F0000FF"), + os.path.join(self.tftp_root, "boot", "grub2", "grub.cfg-7F0000FF"), + ] + grub2_default_path = [ + os.path.join(self.tftp_root, "x86_64", "grub.cfg"), + os.path.join(self.tftp_root, "x86_64", "grub.cfg"), + ] for path in grub2_configs_path: - self.assertEquals(open(path).read(), """\ + self.assertEqual( + open(path).read(), + """\ linux /images/fqdn.example.invalid/kernel console=ttyS0,115200 ks=http://lol/ netboot_method=grub2 initrd /images/fqdn.example.invalid/initrd boot -""") +""", + ) self.check_netbootloader_leak(path) for path in grub2_default_path: - self.assertEquals(open(path).read(), 'exit\n') + self.assertEqual(open(path).read(), "exit\n") netboot.clear_x86_64(TEST_FQDN, self.tftp_root) for path in grub2_configs_path: - self.assert_(not os.path.exists(path)) + self.assertTrue(not os.path.exists(path)) # Keep default for path in grub2_default_path: - self.assert_(os.path.exists(path)) + self.assertTrue(os.path.exists(path)) class Aarch64Test(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_aarch64(TEST_FQDN, - 'console=ttyS0,115200 ks=http://lol/', self.tftp_root) - grub_config_path = os.path.join(self.tftp_root, 'aarch64', 'grub.cfg-7F0000FF') - grub_default_path = os.path.join(self.tftp_root, 'aarch64', 'grub.cfg') - self.assertEquals(open(grub_config_path).read(), """\ + netboot.configure_aarch64( + TEST_FQDN, "console=ttyS0,115200 ks=http://lol/", self.tftp_root + ) + grub_config_path = os.path.join(self.tftp_root, "aarch64", "grub.cfg-7F0000FF") + grub_default_path = os.path.join(self.tftp_root, "aarch64", "grub.cfg") + self.assertEqual( + open(grub_config_path).read(), + """\ linux /images/fqdn.example.invalid/kernel console=ttyS0,115200 ks=http://lol/ netboot_method=grub2 initrd /images/fqdn.example.invalid/initrd boot -""") - self.assertEquals(open(grub_default_path).read(), 'exit\n') +""", + ) + self.assertEqual(open(grub_default_path).read(), "exit\n") self.check_netbootloader_leak(grub_config_path) netboot.clear_aarch64(TEST_FQDN, self.tftp_root) self.assertFalse(os.path.exists(grub_config_path)) # https://bugzilla.redhat.com/show_bug.cgi?id=1100008 def test_alternate_devicetree(self): - netboot.configure_aarch64(TEST_FQDN, - 'devicetree=custom.dtb ks=http://lol/', self.tftp_root) - grub_config_path = os.path.join(self.tftp_root, 'aarch64', 'grub.cfg-7F0000FF') - self.assertEquals(open(grub_config_path).read(), """\ + netboot.configure_aarch64( + TEST_FQDN, "devicetree=custom.dtb ks=http://lol/", self.tftp_root + ) + grub_config_path = os.path.join(self.tftp_root, "aarch64", "grub.cfg-7F0000FF") + self.assertEqual( + open(grub_config_path).read(), + """\ linux /images/fqdn.example.invalid/kernel ks=http://lol/ netboot_method=grub2 initrd /images/fqdn.example.invalid/initrd devicetree custom.dtb boot -""") +""", + ) class PetitbootTest(NetBootTestCase): - def test_configure_then_clear(self): - netboot.configure_petitboot(TEST_FQDN, - 'ks=http://lol/ ksdevice=bootif', self.tftp_root) - petitboot_config_path = os.path.join(self.tftp_root, 'bootloader', - TEST_FQDN, 'petitboot.cfg') - self.assertEquals(open(petitboot_config_path).read(), """\ + netboot.configure_petitboot( + TEST_FQDN, "ks=http://lol/ ksdevice=bootif", self.tftp_root + ) + petitboot_config_path = os.path.join( + self.tftp_root, "bootloader", TEST_FQDN, "petitboot.cfg" + ) + self.assertEqual( + open(petitboot_config_path).read(), + """\ default Beaker scheduled job for fqdn.example.invalid label Beaker scheduled job for fqdn.example.invalid kernel ::/images/fqdn.example.invalid/kernel initrd ::/images/fqdn.example.invalid/initrd append ks=http://lol/ ksdevice=bootif netboot_method=petitboot -""") +""", + ) self.check_netbootloader_leak(petitboot_config_path) netboot.clear_petitboot(TEST_FQDN, self.tftp_root) self.assertFalse(os.path.exists(petitboot_config_path)) class NetbootloaderTest(ImagesBaseTestCase): - def test_configure_then_clear(self): - netboot.configure_all(TEST_FQDN, 'ppc64', 1234, - 'file://%s' % self.kernel.name, - 'file://%s' % self.initrd.name, - 'netbootloader=myawesome/netbootloader', - None - ) - bootloader_config_symlink = os.path.join(self.tftp_root, 'bootloader', TEST_FQDN, 'image') + netboot.configure_all( + TEST_FQDN, + "ppc64", + 1234, + "file://%s" % self.kernel.name, + "file://%s" % self.initrd.name, + "netbootloader=myawesome/netbootloader", + None, + ) + bootloader_config_symlink = os.path.join( + self.tftp_root, "bootloader", TEST_FQDN, "image" + ) self.assertTrue(os.path.lexists(bootloader_config_symlink)) - self.assertEquals(os.path.realpath(bootloader_config_symlink), - os.path.join(self.tftp_root, 'myawesome/netbootloader')) + self.assertEqual( + os.path.realpath(bootloader_config_symlink), + os.path.join(self.tftp_root, "myawesome/netbootloader"), + ) # this tests ppc64 netboot creation - grub2_config_file = os.path.join(self.tftp_root, 'bootloader', TEST_FQDN, - 'grub.cfg-7F0000FF') + grub2_config_file = os.path.join( + self.tftp_root, "bootloader", TEST_FQDN, "grub.cfg-7F0000FF" + ) self.assertTrue(os.path.exists(grub2_config_file)) self.check_netbootloader_leak(grub2_config_file) # Clear netboot.clear_netbootloader_directory(TEST_FQDN) # the FQDN directory is not removed - self.assertTrue(os.path.exists(os.path.join(self.tftp_root, 'bootloader', TEST_FQDN))) + self.assertTrue( + os.path.exists(os.path.join(self.tftp_root, "bootloader", TEST_FQDN)) + ) # the image symlink is removed self.assertFalse(os.path.lexists(bootloader_config_symlink)) # The config files for grub2 should be removed (since this is for PPC64) diff --git a/LabController/src/bkr/labcontroller/test_provision.py b/LabController/src/bkr/labcontroller/test_provision.py new file mode 100644 index 000000000..4ef4da6c7 --- /dev/null +++ b/LabController/src/bkr/labcontroller/test_provision.py @@ -0,0 +1,54 @@ +# Copyright Contributors to the Beaker project. +# SPDX-License-Identifier: GPL-2.0-or-later + +import unittest + +import six + +from bkr.common.helpers import SensitiveUnicode +from bkr.labcontroller.provision import build_power_env + + +class TestBuildPowerEnv(unittest.TestCase): + def test_build_power_env(self): + t_command = { + "power": { + "address": u"192.168.1.1", + "id": u"42", + "user": u"root", + "passwd": SensitiveUnicode(u"toor"), + }, + "action": u"reboot", + } + + expected = { + "power_address": "192.168.1.1", + "power_id": "42", + "power_user": "root", + "power_pass": "toor", + "power_mode": "reboot", + } + + actual = build_power_env(t_command) + + for key, value in six.iteritems(expected): + self.assertEqual(expected[key], actual[key]) + + def test_build_power_env_with_missing_fields(self): + t_command = { + "power": {"address": u"192.168.1.1", "passwd": SensitiveUnicode(u"toor")}, + "action": u"reboot", + } + + expected = { + "power_address": "192.168.1.1", + "power_id": "", + "power_user": "", + "power_pass": "toor", + "power_mode": "reboot", + } + + actual = build_power_env(t_command) + + for key, value in six.iteritems(expected): + self.assertEqual(expected[key], actual[key]) diff --git a/LabController/src/bkr/labcontroller/test_proxy.py b/LabController/src/bkr/labcontroller/test_proxy.py index b235cad58..a5e2fb80e 100644 --- a/LabController/src/bkr/labcontroller/test_proxy.py +++ b/LabController/src/bkr/labcontroller/test_proxy.py @@ -10,42 +10,47 @@ class TestPanicDetector(unittest.TestCase): - def setUp(self): self.conf = _conf self.panic_detector = PanicDetector(self.conf["PANIC_REGEX"]) self.should_panic = [ - 'Internal error: Oops - BUG: 0 [#2] PREEMPT ARM', # oops.kernel.org examples - 'Oops: 0000 [#1] SMP\\', - 'Oops[#1]', - 'Oops - bad mode', # jbastian example bz:1538906 - 'kernel BUG at fs/ext4/super.c:1022!' # xifeng example bz:1778643 + "Internal error: Oops - BUG: 0 [#2] PREEMPT ARM", # oops.kernel.org examples + "Oops: 0000 [#1] SMP\\", + "Oops[#1]", + "Oops - bad mode", # jbastian example bz:1538906 + "kernel BUG at fs/ext4/super.c:1022!", # xifeng example bz:1778643 ] # From bz:1538906 self.should_not_panic = [ - 'regression-bz123456789-Oops-when-some-thing-happens-', - 'I can\'t believe it\'s not a panic', - 'looking for a kernel BUG at my setup!' + "regression-bz123456789-Oops-when-some-thing-happens-", + "I can't believe it's not a panic", + "looking for a kernel BUG at my setup!", ] - self.acceptable_panic_matches = ['Oops:', 'Oops ', 'Oops[', - 'kernel BUG at fs/ext4/super.c:1022!'] + self.acceptable_panic_matches = [ + "Oops:", + "Oops ", + "Oops[", + "kernel BUG at fs/ext4/super.c:1022!", + ] def test_panic_detector_detects_correctly(self): for line in self.should_panic: self.panic_detector.fired = False match = self.panic_detector.feed(line) - self.assertTrue(self.panic_detector.fired, - "Failed to detect: %r" % (line)) - self.assertTrue(match in self.acceptable_panic_matches, - "%r is not an acceptable match. Line: %r" % (match, line)) + self.assertTrue(self.panic_detector.fired, "Failed to detect: %r" % line) + self.assertTrue( + match in self.acceptable_panic_matches, + "%r is not an acceptable match. Line: %r" % (match, line), + ) def test_panic_detector_ignores_false_panic(self): for line in self.should_not_panic: match = self.panic_detector.feed(line) - self.assertFalse(self.panic_detector.fired, - "Panic detector erroneously detected: %r" % (line)) - self.assertIsNone(match, - "feed result ( %r ) wasn't NoneType" % (match)) + self.assertFalse( + self.panic_detector.fired, + "Panic detector erroneously detected: %r" % line, + ) + self.assertIsNone(match, "feed result ( %r ) wasn't NoneType" % match) diff --git a/LabController/src/bkr/labcontroller/transfer.py b/LabController/src/bkr/labcontroller/transfer.py index df6b3446a..dff36c287 100644 --- a/LabController/src/bkr/labcontroller/transfer.py +++ b/LabController/src/bkr/labcontroller/transfer.py @@ -1,28 +1,33 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os -import sys -import signal import logging -import time -import socket +import signal +import sys +from optparse import OptionParser + import daemon from daemon import pidfile -from optparse import OptionParser -from bkr.labcontroller.proxy import LogArchiver + from bkr.labcontroller.config import get_conf, load_conf from bkr.labcontroller.exceptions import ShutdownException +from bkr.labcontroller.proxy import LogArchiver from bkr.log import log_to_stream, log_to_syslog +try: + from ssl import SSLError +except (ImportError, AttributeError): + from socket import sslerror as SSLError + logger = logging.getLogger(__name__) + def daemon_shutdown(*args, **kwargs): raise ShutdownException() + def main_loop(logarchiver, conf=None): """infinite daemon loop""" @@ -31,7 +36,7 @@ def main_loop(logarchiver, conf=None): while True: try: - # Look for logs to transfer if none transfered then sleep + # Look for logs to transfer if none transferred then sleep if not logarchiver.transfer_logs(): logarchiver.sleep() @@ -39,32 +44,34 @@ def main_loop(logarchiver, conf=None): sys.stdout.flush() sys.stderr.flush() - except socket.sslerror: - pass # will try again.. + except SSLError: + pass # will try again.. except (ShutdownException, KeyboardInterrupt): # ignore keyboard interrupts and sigterm signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) - logger.info('Exiting...') + logger.info("Exiting...") break - except: + except: # noqa # this is a little extreme: log the exception and continue - logger.exception('Error in main loop') + logger.exception("Error in main loop") logarchiver.sleep() - def main(): parser = OptionParser() - parser.add_option("-c", "--config", - help="Full path to config file to use") - parser.add_option("-f", "--foreground", default=False, action="store_true", - help="run in foreground (do not spawn a daemon)") - parser.add_option("-p", "--pid-file", - help="specify a pid file") + parser.add_option("-c", "--config", help="Full path to config file to use") + parser.add_option( + "-f", + "--foreground", + default=False, + action="store_true", + help="run in foreground (do not spawn a daemon)", + ) + parser.add_option("-p", "--pid-file", help="specify a pid file") (opts, args) = parser.parse_args() if opts.config: @@ -74,18 +81,20 @@ def main(): pid_file = opts.pid_file if pid_file is None: - pid_file = conf.get("WPID_FILE", "/var/run/beaker-lab-controller/beaker-transfer.pid") + pid_file = conf.get( + "WPID_FILE", "/var/run/beaker-lab-controller/beaker-transfer.pid" + ) - if not conf.get('ARCHIVE_SERVER'): - sys.stderr.write('Archive server settings are missing from config file\n') + if not conf.get("ARCHIVE_SERVER"): + sys.stderr.write("Archive server settings are missing from config file\n") sys.exit(1) - # HubProxy will try to log some stuff, even though we - # haven't configured our logging handlers yet. So we send logs to stderr + # HubProxy will try to log some stuff, even though we + # haven't configured our logging handlers yet. So we send logs to stderr # temporarily here, and configure it again below. log_to_stream(sys.stderr, level=logging.WARNING) try: logarchiver = LogArchiver(conf=conf) - except Exception, ex: + except Exception as ex: sys.stderr.write("Error starting beaker-transfer: %s\n" % ex) sys.exit(1) @@ -95,10 +104,13 @@ def main(): else: # See BZ#977269 logarchiver.close() - with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile( - pid_file, acquire_timeout=0), detach_process=True): - log_to_syslog('beaker-transfer') + with daemon.DaemonContext( + pidfile=pidfile.TimeoutPIDLockFile(pid_file, acquire_timeout=0), + detach_process=True, + ): + log_to_syslog("beaker-transfer") main_loop(logarchiver=logarchiver, conf=conf) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/LabController/src/bkr/labcontroller/utils.py b/LabController/src/bkr/labcontroller/utils.py index b13fdc94d..528aea741 100644 --- a/LabController/src/bkr/labcontroller/utils.py +++ b/LabController/src/bkr/labcontroller/utils.py @@ -1,61 +1,13 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os -import subprocess import logging -from bkr.labcontroller.config import get_conf +import os logger = logging.getLogger(__name__) -class CalledProcessError(Exception): - """This exception is raised when a process run by check_call() or - check_output() returns a non-zero exit status. - The exit status will be stored in the returncode attribute; - check_output() will also store the output in the output attribute. - """ - def __init__(self, returncode, cmd, output=None): - self.returncode = returncode - self.cmd = cmd - self.output = output - def __str__(self): - return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) - -def check_output(*popenargs, **kwargs): - r"""Run command with arguments and return its output as a byte string. - - If the exit code was non-zero it raises a CalledProcessError. The - CalledProcessError object will have the return code in the returncode - attribute and output in the output attribute. - - The arguments are the same as for the Popen constructor. Example: - - >>> check_output(["ls", "-l", "/dev/null"]) - 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' - - The stdout argument is not allowed as it is used internally. - To capture standard error in the result, use stderr=STDOUT. - - >>> check_output(["/bin/sh", "-c", - ... "ls -l non_existent_file ; exit 0"], - ... stderr=STDOUT) - 'ls: non_existent_file: No such file or directory\n' - """ - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise CalledProcessError(retcode, cmd, output=output) - return output - def get_console_files(console_logs_directory, system_name): """Return a list of the console log files for a system @@ -70,8 +22,9 @@ def get_console_files(console_logs_directory, system_name): :return: List[Tuple[absolute path to log file, name to use for log file]] """ if not os.path.isdir(console_logs_directory): - logger.info("Console files directory does not exist: %s", - console_logs_directory) + logger.info( + "Console files directory does not exist: %s", console_logs_directory + ) return [] if not system_name: @@ -87,7 +40,7 @@ def get_console_files(console_logs_directory, system_name): else: description = filename[len(system_name):] # Remove leading hyphens - description = description.lstrip('-') + description = description.lstrip("-") logfile_name = "console-{}.log".format(description) output.append((full_path, logfile_name)) return output diff --git a/LabController/src/bkr/labcontroller/watchdog.py b/LabController/src/bkr/labcontroller/watchdog.py index f3685cc06..ec9ac8717 100644 --- a/LabController/src/bkr/labcontroller/watchdog.py +++ b/LabController/src/bkr/labcontroller/watchdog.py @@ -1,24 +1,25 @@ - # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -import os -import sys -import signal import logging -import time -import socket -import xmlrpclib +import signal import subprocess -import lxml.etree +import sys +from optparse import OptionParser + import daemon +import gevent +import gevent.event +import gevent.hub +import gevent.monkey +import lxml.etree from daemon import pidfile -from optparse import OptionParser -import gevent, gevent.hub, gevent.event, gevent.monkey -from bkr.labcontroller.proxy import ProxyHelper, Monitor -from bkr.labcontroller.config import load_conf, get_conf +from six.moves import xmlrpc_client + +from bkr.labcontroller.config import get_conf, load_conf +from bkr.labcontroller.proxy import Monitor, ProxyHelper from bkr.log import log_to_stream, log_to_syslog # Like beaker-provision and beaker-transfer, this daemon is structured as @@ -38,10 +39,12 @@ # Note that we must construct it *after* we daemonize, inside the main loop below. shutting_down = None + def shutdown_handler(signum, frame): - logger.info('Received signal %s, shutting down', signum) + logger.info("Received signal %s, shutting down", signum) shutting_down.set() + def run_monitor(monitor): while True: updated = monitor.run() @@ -51,97 +54,124 @@ def run_monitor(monitor): if shutting_down.is_set(): break else: - if shutting_down.wait(timeout=monitor.conf.get('SLEEP_TIME', 20)): + if shutting_down.wait(timeout=monitor.conf.get("SLEEP_TIME", 20)): break -class Watchdog(ProxyHelper): +class Watchdog(ProxyHelper): def __init__(self, *args, **kwargs): super(Watchdog, self).__init__(*args, **kwargs) - self.monitor_greenlets = {} #: dict of (recipe id -> greenlet which is monitoring its console log) + self.monitor_greenlets = ( + {} + ) #: dict of (recipe id -> greenlet which is monitoring its console log) def get_active_watchdogs(self): - logger.debug('Polling for active watchdogs') + logger.debug("Polling for active watchdogs") try: - return self.hub.recipes.tasks.watchdogs('active') - except xmlrpclib.Fault as fault: - if 'not currently logged in' in fault.faultString: - logger.debug('Session expired, re-authenticating') + return self.hub.recipes.tasks.watchdogs("active") + except xmlrpc_client.Fault as fault: + if "not currently logged in" in fault.faultString: + logger.debug("Session expired, re-authenticating") self.hub._login() - return self.hub.recipes.tasks.watchdogs('active') + return self.hub.recipes.tasks.watchdogs("active") else: raise def get_expired_watchdogs(self): - logger.debug('Polling for expired watchdogs') + logger.debug("Polling for expired watchdogs") try: - return self.hub.recipes.tasks.watchdogs('expired') - except xmlrpclib.Fault as fault: - if 'not currently logged in' in fault.faultString: - logger.debug('Session expired, re-authenticating') + return self.hub.recipes.tasks.watchdogs("expired") + except xmlrpc_client.Fault as fault: + if "not currently logged in" in fault.faultString: + logger.debug("Session expired, re-authenticating") self.hub._login() - return self.hub.recipes.tasks.watchdogs('expired') + return self.hub.recipes.tasks.watchdogs("expired") else: raise def abort(self, recipe_id, system): # Don't import this at global scope. It triggers gevent to create its default hub, # but we need to ensure the gevent hub is not created until *after* we have daemonized. - from bkr.labcontroller.async import MonitoredSubprocess - logger.info('External Watchdog Expired for recipe %s on system %s', recipe_id, system) + from bkr.labcontroller.concurrency import MonitoredSubprocess + + logger.info( + "External Watchdog Expired for recipe %s on system %s", recipe_id, system + ) if self.conf.get("WATCHDOG_SCRIPT"): job = lxml.etree.fromstring(self.get_my_recipe(dict(recipe_id=recipe_id))) - recipe = job.find('recipeSet/guestrecipe') + recipe = job.find("recipeSet/guestrecipe") if recipe is None: - recipe = job.find('recipeSet/recipe') - for task in recipe.iterfind('task'): - if task.get('status') == 'Running': + recipe = job.find("recipeSet/recipe") + task = None + for task in recipe.iterfind("task"): + if task.get("status") == "Running": break - task_id = task.get('id') - args = [self.conf.get('WATCHDOG_SCRIPT'), str(system), str(recipe_id), str(task_id)] - logger.debug('Invoking external watchdog script %r', args) - p = MonitoredSubprocess(args, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - timeout=300) - logger.debug('Waiting on external watchdog script pid %s', p.pid) + + if task is None: + logger.error("Unable to find task for recipe %s\n", recipe_id) + return + + task_id = task.get("id") + args = [ + self.conf.get("WATCHDOG_SCRIPT"), + str(system), + str(recipe_id), + str(task_id), + ] + logger.debug("Invoking external watchdog script %r", args) + p = MonitoredSubprocess( + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=300 + ) + logger.debug("Waiting on external watchdog script pid %s", p.pid) p.dead.wait() output = p.stdout_reader.get() if p.returncode != 0: - logger.error('External watchdog script exited with status %s:\n%s', - p.returncode, output) + logger.error( + "External watchdog script exited with status %s:\n%s", + p.returncode, + output, + ) else: try: extend_seconds = int(output) except ValueError: - logger.error('Expected external watchdog script to print number of seconds ' - 'to extend watchdog by, got:\n%s', output) + logger.error( + "Expected external watchdog script to print number of seconds " + "to extend watchdog by, got:\n%s", + output, + ) else: - logger.debug('Extending T:%s watchdog %d', task_id, extend_seconds) + logger.debug("Extending T:%s watchdog %d", task_id, extend_seconds) self.extend_watchdog(task_id, extend_seconds) # Don't abort it here, we assume the script took care of things. return - self.recipe_stop(recipe_id, 'abort', 'External Watchdog Expired') + self.recipe_stop(recipe_id, "abort", "External Watchdog Expired") def spawn_monitor(self, watchdog): monitor = Monitor(watchdog, self) - greenlet = gevent.spawn(run_monitor, monitor) - self.monitor_greenlets[watchdog['recipe_id']] = greenlet + monitor_greenlet = gevent.spawn(run_monitor, monitor) + self.monitor_greenlets[watchdog["recipe_id"]] = monitor_greenlet + def completion_callback(greenlet): if greenlet.exception: - logger.error('Monitor greenlet %r had unhandled exception: %r', - greenlet, greenlet.exception) - del self.monitor_greenlets[watchdog['recipe_id']] - greenlet.link(completion_callback) + logger.error( + "Monitor greenlet %r had unhandled exception: %r", + greenlet, + greenlet.exception, + ) + del self.monitor_greenlets[watchdog["recipe_id"]] + + monitor_greenlet.link(completion_callback) def poll(self): for expired_watchdog in self.get_expired_watchdogs(): try: - recipe_id = expired_watchdog['recipe_id'] - system = expired_watchdog['system'] + recipe_id = expired_watchdog["recipe_id"] + system = expired_watchdog["system"] self.abort(recipe_id, system) - except Exception: + except Exception: # noqa # catch and ignore here, so that we keep going through the loop - logger.exception('Failed to abort expired watchdog') + logger.exception("Failed to abort expired watchdog") if shutting_down.is_set(): return # Get active watchdogs *after* we finish running @@ -151,15 +181,16 @@ def poll(self): active_watchdogs = self.get_active_watchdogs() # Start a new monitor for any active watchdog we are not already monitoring. for watchdog in active_watchdogs: - if watchdog['recipe_id'] not in self.monitor_greenlets: + if watchdog["recipe_id"] not in self.monitor_greenlets: self.spawn_monitor(watchdog) # Kill any running monitors that are gone from the list. - active_recipes = set(w['recipe_id'] for w in active_watchdogs) + active_recipes = set(w["recipe_id"] for w in active_watchdogs) for recipe_id, greenlet in list(self.monitor_greenlets.items()): if recipe_id not in active_recipes: - logger.info('Stopping monitor for recipe %s', recipe_id) + logger.info("Stopping monitor for recipe %s", recipe_id) greenlet.kill() + def main_loop(watchdog, conf): global shutting_down shutting_down = gevent.event.Event() @@ -168,25 +199,29 @@ def main_loop(watchdog, conf): signal.signal(signal.SIGINT, shutdown_handler) signal.signal(signal.SIGTERM, shutdown_handler) - logger.debug('Entering main watchdog loop') + logger.debug("Entering main watchdog loop") while True: try: watchdog.poll() - except: - logger.exception('Failed to poll for watchdogs') - if shutting_down.wait(timeout=conf.get('SLEEP_TIME', 20)): - gevent.hub.get_hub().join() # let running greenlets terminate + except Exception: # noqa + logger.exception("Failed to poll for watchdogs") + if shutting_down.wait(timeout=conf.get("SLEEP_TIME", 20)): + gevent.hub.get_hub().join() # let running greenlets terminate break - logger.debug('Exited main watchdog loop') + logger.debug("Exited main watchdog loop") + def main(): parser = OptionParser() - parser.add_option("-c", "--config", - help="Full path to config file to use") - parser.add_option("-f", "--foreground", default=False, action="store_true", - help="run in foreground (do not spawn a daemon)") - parser.add_option("-p", "--pid-file", - help="specify a pid file") + parser.add_option("-c", "--config", help="Full path to config file to use") + parser.add_option( + "-f", + "--foreground", + default=False, + action="store_true", + help="run in foreground (do not spawn a daemon)", + ) + parser.add_option("-p", "--pid-file", help="specify a pid file") (opts, args) = parser.parse_args() if opts.config: load_conf(opts.config) @@ -195,15 +230,17 @@ def main(): conf = get_conf() pid_file = opts.pid_file if pid_file is None: - pid_file = conf.get("WATCHDOG_PID_FILE", "/var/run/beaker-lab-controller/beaker-watchdog.pid") + pid_file = conf.get( + "WATCHDOG_PID_FILE", "/var/run/beaker-lab-controller/beaker-watchdog.pid" + ) - # HubProxy will try to log some stuff, even though we - # haven't configured our logging handlers yet. So we send logs to stderr + # HubProxy will try to log some stuff, even though we + # haven't configured our logging handlers yet. So we send logs to stderr # temporarily here, and configure it again below. log_to_stream(sys.stderr, level=logging.WARNING) try: watchdog = Watchdog(conf=conf) - except Exception, ex: + except Exception as ex: sys.stderr.write("Error starting beaker-watchdog: %s\n" % ex) sys.exit(1) @@ -213,14 +250,17 @@ def main(): else: # See BZ#977269 watchdog.close() - with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile( - pid_file, acquire_timeout=0), detach_process=True): - log_to_syslog('beaker-watchdog') + with daemon.DaemonContext( + pidfile=pidfile.TimeoutPIDLockFile(pid_file, acquire_timeout=0), + detach_process=True, + ): + log_to_syslog("beaker-watchdog") try: main_loop(watchdog, conf) except Exception: - logger.exception('Unhandled exception in main_loop') + logger.exception("Unhandled exception in main_loop") raise -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/Makefile b/Makefile index 47c5e9084..4571a4c72 100644 --- a/Makefile +++ b/Makefile @@ -5,12 +5,14 @@ # (at your option) any later version. # Python 2 default -ifeq ($(BKR_PY3),) - BKR_PY3 :=0 -endif +BKR_PY3 ?= 0 +DEPCMD := yum-builddep +SUBDIRS := Common Client documentation Server LabController IntegrationTests -DEPCMD := $(shell if [ -f /usr/bin/dnf ]; then echo "dnf builddep"; else echo "yum-builddep"; fi) -SUBDIRS := $(shell if [[ $(BKR_PY3) == 0 ]]; then echo "Common Client documentation Server LabController IntegrationTests"; else echo "Common Client documentation"; fi) +ifeq ($(BKR_PY3),1) + DEPCMD := dnf builddep + SUBDIRS := Common Client documentation LabController +endif .PHONY: build diff --git a/beaker.spec b/beaker.spec index f5150ece7..366099a0c 100644 --- a/beaker.spec +++ b/beaker.spec @@ -216,58 +216,76 @@ Requires: python-mock Requires: python-cssselect %endif -%if %{without python3} %package lab-controller -Summary: Daemons for controlling a Beaker lab -Group: Applications/Internet -Requires: %{name}-common = %{version}-%{release} -Requires: python -Requires: crontabs -Requires: httpd -Requires: syslinux -Requires: fence-agents -Requires: ipmitool -Requires: wsmancli -Requires: /usr/bin/virsh -Requires: telnet -Requires: dnf -Requires: sudo -# old style Python package names -# These LC dependencies are needed in build due to tests -BuildRequires: python-lxml -%if 0%{?rhel} == 7 -BuildRequires: python2-gevent112 -%else -BuildRequires: python-gevent >= 1.0 -%endif -Requires: python-cpio -Requires: python-setuptools -Requires: python-lxml -Requires: python-gssapi -%if 0%{?rhel} == 7 -Requires: python2-gevent112 +Summary: Daemons for controlling a Beaker lab +Group: Applications/Internet + +BuildRequires: systemd +BuildRequires: pkgconfig(systemd) + +# The build dependencies listed below are not directly required for building the component. +# However, they are included to ensure that all unit tests can be executed during the check macro. +%if %{with python3} +BuildRequires: python3-gevent +BuildRequires: python3-lxml +BuildRequires: python3-werkzeug +BuildRequires: python3-psutil +BuildRequires: python3-daemon %else -Requires: python-gevent >= 1.0 -%endif -Requires: python-daemon -Requires: python-werkzeug -Requires: python-flask -BuildRequires: systemd -BuildRequires: pkgconfig(systemd) -Requires: systemd-units -Requires(post): systemd -Requires(pre): systemd +# python2-gevent112 is a special build created for labcontroller. It includes backports to ensure compatibility with py2.7.9 SSL backport. +BuildRequires: python2-gevent112 +BuildRequires: python2-psutil +BuildRequires: python-lxml +BuildRequires: python-daemon +%endif + +# Syslinux is only available on x86_64. This package is used to provide pxelinux.0, which is then copied to the TFTP directory. +# Removing this package will result in no default boot loader, but conversely will allow multi-arch support. +Requires: syslinux +Requires: %{name}-common = %{version}-%{release} +Requires: crontabs +Requires: httpd +Requires: ipmitool +Requires: wsmancli +Requires: /usr/bin/virsh +Requires: telnet +Requires: dnf +Requires: sudo +Requires: systemd-units +Requires(post): systemd +Requires(pre): systemd Requires(postun): systemd + +%if %{with python3} +Requires: fence-agents-all +Requires: python3 +Requires: python3-cpio +Requires: python3-daemon +Requires: python3-flask +Requires: python3-gssapi +Requires: python3-lxml +Requires: python3-setuptools +Requires: python3-werkzeug +Requires: python3-gevent +%else +Requires: fence-agents +Requires: python +Requires: python-cpio +Requires: python-daemon +Requires: python-flask +Requires: python-gssapi +Requires: python-lxml +Requires: python-setuptools +Requires: python-werkzeug +Requires: python2-gevent112 %endif -%if %{without python3} %package lab-controller-addDistro Summary: Optional hooks for distro import on Beaker lab controllers Group: Applications/Internet Requires: %{name}-common = %{version}-%{release} Requires: %{name}-lab-controller = %{version}-%{release} Requires: %{name}-client = %{version}-%{release} -%endif %description @@ -297,7 +315,6 @@ This package contains integration tests for Beaker, which require a running database and Beaker server. %endif -%if %{without python3} %description lab-controller The lab controller daemons connect to a central Beaker server in order to manage a local lab of test systems. @@ -307,14 +324,11 @@ The daemons and associated lab controller tools: * control power for test systems * collect logs and results from test runs * track distros available from the lab's local mirror -%endif -%if %{without python3} %description lab-controller-addDistro addDistro.sh can be called after distros have been imported into Beaker. You can install this on your lab controller to automatically launch jobs against newly imported distros. -%endif %prep %setup -q -n %{name}-%{name}-%{upstream_version} @@ -351,6 +365,10 @@ DESTDIR=%{buildroot} make install # Newer RPM fails if site.less doesn't exist, even though it's marked %%ghost # and therefore is not included in the RPM. Seems like an RPM bug... ln -s /dev/null %{buildroot}%{_datadir}/bkr/server/assets/site.less +%else +install -m0755 -d %{buildroot}/%{_localstatedir}/log/%{name} +install -m0755 -d %{buildroot}/%{_sysconfdir}/logrotate.d +install -m0644 Server/logrotate.d/beaker %{buildroot}/%{_sysconfdir}/logrotate.d/beaker %endif %check @@ -379,7 +397,6 @@ if [ ! -f %{_datadir}/bkr/server/assets/site.less ] ; then fi %endif -%if %{without python3} %post lab-controller %systemd_post %{_lc_services} @@ -389,27 +406,22 @@ chown root:root %{_localstatedir}/log/%{name}/*.log >/dev/null 2>&1 || : chmod go-w %{_localstatedir}/log/%{name}/*.log >/dev/null 2>&1 || : # Restart rsyslog so that it notices the config which we ship /sbin/service rsyslog condrestart >/dev/null 2>&1 || : -%endif %if %{without python3} %postun server %systemd_postun_with_restart beakerd.service %endif -%if %{without python3} %postun lab-controller %systemd_postun_with_restart %{_lc_services} -%endif %if %{without python3} %preun server %systemd_preun beakerd.service %endif -%if %{without python3} %preun lab-controller %systemd_preun %{_lc_services} -%endif %files common %if %{with python3} @@ -504,15 +516,11 @@ chmod go-w %{_localstatedir}/log/%{name}/*.log >/dev/null 2>&1 || : %{_sysconfdir}/bash_completion.d %endif -%if %{without python3} %files lab-controller %dir %{_sysconfdir}/%{name} %config(noreplace) %{_sysconfdir}/%{name}/labcontroller.conf %{_sysconfdir}/%{name}/power-scripts/ %{_sysconfdir}/%{name}/install-failure-patterns/ -%{python2_sitelib}/bkr/labcontroller/ -%{python2_sitelib}/beaker_lab_controller-*-nspkg.pth -%{python2_sitelib}/beaker_lab_controller-*.egg-info/ %{_bindir}/beaker-proxy %{_bindir}/beaker-watchdog %{_bindir}/beaker-transfer @@ -540,12 +548,19 @@ chmod go-w %{_localstatedir}/log/%{name}/*.log >/dev/null 2>&1 || : %attr(0440,root,root) %config(noreplace) %{_sysconfdir}/sudoers.d/beaker_proxy_clear_netboot %config(noreplace) %{_sysconfdir}/rsyslog.d/beaker-lab-controller.conf %config(noreplace) %{_sysconfdir}/logrotate.d/beaker + +%if %{with python3} +%{python3_sitelib}/bkr/labcontroller/ +%{python3_sitelib}/beaker_lab_controller-*-nspkg.pth +%{python3_sitelib}/beaker_lab_controller-*.egg-info/ +%else +%{python2_sitelib}/bkr/labcontroller/ +%{python2_sitelib}/beaker_lab_controller-*-nspkg.pth +%{python2_sitelib}/beaker_lab_controller-*.egg-info/ %endif -%if %{without python3} %files lab-controller-addDistro %{_var}/lib/%{name}/addDistro.sh %{_var}/lib/%{name}/addDistro.d/* -%endif %changelog diff --git a/documentation/Makefile b/documentation/Makefile index 3c4aed409..222a41f1f 100644 --- a/documentation/Makefile +++ b/documentation/Makefile @@ -62,9 +62,7 @@ build: man text install: man install -m0755 -d $(DESTDIR)/usr/share/man/man{1,8} install -m0644 _build/man/*.1 $(DESTDIR)/usr/share/man/man1 - if [[ ${BKR_PY3} == 0 ]]; then \ - install -m0644 _build/man/*.8 $(DESTDIR)/usr/share/man/man8; \ - fi + install -m0644 _build/man/*.8 $(DESTDIR)/usr/share/man/man8 ln -s bkr-distro-trees-verify.1.gz $(DESTDIR)/usr/share/man/man1/bkr-distros-verify.1.gz ln -s bkr-system-list.1.gz $(DESTDIR)/usr/share/man/man1/bkr-list-systems.1.gz ln -s bkr-labcontroller-list $(DESTDIR)/usr/share/man/man1/bkr-list-labcontrollers.1.gz diff --git a/documentation/conf.py b/documentation/conf.py index 5ffff4f1e..65fa37870 100644 --- a/documentation/conf.py +++ b/documentation/conf.py @@ -30,6 +30,8 @@ [u'David Sommerseth '], 1), ('man/beaker-wizard', 'beaker-wizard', 'Tool to ease the creation of a new Beaker task', [u'Petr Splichal '], 1), + ('admin-guide/man/beaker-import', 'beaker-import', 'Import distros', + [u'The Beaker team '], 8), ] man_server_pages = [ ('admin-guide/man/beaker-create-kickstart', @@ -38,8 +40,6 @@ ('admin-guide/man/beaker-create-ipxe-image', 'beaker-create-ipxe-image', 'Generate and upload iPXE boot image to Glance', [u'The Beaker team '], 8), - ('admin-guide/man/beaker-import', 'beaker-import', 'Import distros', - [u'The Beaker team '], 8), ('admin-guide/man/beaker-init', 'beaker-init', 'Initialize and populate the Beaker database', [u'The Beaker team '], 8),