From 9357c3899f6f6551c1969573f59b7f99568c34c1 Mon Sep 17 00:00:00 2001 From: Yuanhang Sun Date: Thu, 4 Jul 2024 03:10:22 +0800 Subject: [PATCH 001/131] feat(aosc): Add 'AOSC OS' support (#5310) --- cloudinit/config/cc_ca_certs.py | 10 +- cloudinit/config/cc_ntp.py | 7 ++ cloudinit/distros/__init__.py | 1 + cloudinit/distros/aosc.py | 148 +++++++++++++++++++++++++++ cloudinit/util.py | 1 + config/cloud.cfg.tmpl | 10 +- doc/rtd/reference/distros.rst | 1 + tests/unittests/distros/test_aosc.py | 10 ++ tests/unittests/test_cli.py | 3 +- tools/.github-cla-signers | 1 + tools/render-template | 1 + 11 files changed, 186 insertions(+), 7 deletions(-) create mode 100644 cloudinit/distros/aosc.py create mode 100644 tests/unittests/distros/test_aosc.py diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 61345fcb58d..4e80947fd13 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -23,6 +23,13 @@ "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { + "aosc": { + "ca_cert_path": "/etc/ssl/certs/", + "ca_cert_local_path": "/etc/ssl/certs/", + "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.pem", + "ca_cert_config": "/etc/ca-certificates/conf.d/cloud-init.conf", + "ca_cert_update_cmd": ["update-ca-bundle"], + }, "fedora": { "ca_cert_path": "/etc/pki/ca-trust/", "ca_cert_local_path": "/usr/share/pki/ca-trust-source/", @@ -71,6 +78,7 @@ distros = [ "almalinux", + "aosc", "cloudlinux", "alpine", "debian", @@ -149,7 +157,7 @@ def disable_default_ca_certs(distro_name, distro_cfg): """ if distro_name in ["rhel", "photon"]: remove_default_ca_certs(distro_cfg) - elif distro_name in ["alpine", "debian", "ubuntu"]: + elif distro_name in ["alpine", "aosc", "debian", "ubuntu"]: disable_system_ca_certs(distro_cfg) if distro_name in ["debian", "ubuntu"]: diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 3d659525eef..e2b83191a19 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,6 +24,7 @@ distros = [ "almalinux", "alpine", + "aosc", "azurelinux", "centos", "cloudlinux", @@ -109,6 +110,12 @@ "service_name": "ntpd", }, }, + "aosc": { + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", + }, + }, "azurelinux": { "chrony": { "service_name": "chronyd", diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4557d4320ee..73873cebeca 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -60,6 +60,7 @@ OSFAMILIES = { "alpine": ["alpine"], + "aosc": ["aosc"], "arch": ["arch"], "debian": ["debian", "ubuntu"], "freebsd": ["freebsd", "dragonfly"], diff --git a/cloudinit/distros/aosc.py b/cloudinit/distros/aosc.py new file mode 100644 index 00000000000..0460c740d5c --- /dev/null +++ b/cloudinit/distros/aosc.py @@ -0,0 +1,148 @@ +# Copyright (C) 2024 AOSC Developers +# +# Author: Yuanhang Sun +# +# This file is part of cloud-init. See LICENSE file for license information. +import logging + +from cloudinit import distros, helpers, subp, util +from cloudinit.distros import PackageList +from cloudinit.distros.parsers.hostname import HostnameConf +from cloudinit.distros.parsers.sys_conf import SysConf +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +class Distro(distros.Distro): + systemd_locale_conf_fn = "/etc/locale.conf" + init_cmd = ["systemctl"] + network_conf_dir = "/etc/sysconfig/network" + resolve_conf_fn = "/etc/systemd/resolved.conf" + tz_local_fn = "/etc/localtime" + + dhclient_lease_directory = "/var/lib/NetworkManager" + dhclient_lease_file_regex = r"dhclient-[\w-]+\.lease" + + renderer_configs = { + "sysconfig": { + "control": "etc/sysconfig/network", + "iface_templates": "%(base)s/network-scripts/ifcfg-%(name)s", + "route_templates": { + "ipv4": "%(base)s/network-scripts/route-%(name)s", + "ipv6": "%(base)s/network-scripts/route6-%(name)s", + }, + } + } + + prefer_fqdn = False + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + self._runner = helpers.Runners(paths) + self.osfamily = "aosc" + self.default_locale = "en_US.UTF-8" + cfg["ssh_svcname"] = "sshd" + + def apply_locale(self, locale, out_fn=None): + if not out_fn: + out_fn = self.systemd_locale_conf_fn + locale_cfg = { + "LANG": locale, + } + update_locale_conf(out_fn, locale_cfg) + + def _write_hostname(self, hostname, filename): + if filename.endswith("/previous-hostname"): + conf = HostnameConf("") + conf.set_hostname(hostname) + util.write_file(filename, str(conf), 0o644) + create_hostname_file = util.get_cfg_option_bool( + self._cfg, "create_hostname_file", True + ) + if create_hostname_file: + subp.subp(["hostnamectl", "set-hostname", str(hostname)]) + else: + subp.subp( + [ + "hostnamectl", + "set-hostname", + "--transient", + str(hostname), + ] + ) + LOG.info("create_hostname_file is False; hostname set transiently") + + def _read_hostname(self, filename, default=None): + if filename.endswith("/previous-hostname"): + return util.load_text_file(filename).strip() + (out, _err) = subp.subp(["hostname"]) + out = out.strip() + if len(out): + return out + else: + return default + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def set_timezone(self, tz): + tz_file = self._find_tz_file(tz) + util.del_file(self.tz_local_fn) + util.sym_link(tz_file, self.tz_local_fn) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ["oma"] + if command: + cmd.append(command) + cmd.append("-y") + cmd.extend(pkgs) + + subp.subp(cmd, capture=False) + + def install_packages(self, pkglist: PackageList): + self.package_command("install", pkgs=pkglist) + + def update_package_sources(self): + self._runner.run( + "update-sources", + self.package_command, + "refresh", + freq=PER_INSTANCE, + ) + + +def read_locale_conf(sys_path): + exists = False + try: + contents = util.load_text_file(sys_path).splitlines() + exists = True + except IOError: + contents = [] + return (exists, SysConf(contents)) + + +def update_locale_conf(sys_path, locale_cfg): + if not locale_cfg: + return + (exists, contents) = read_locale_conf(sys_path) + updated_am = 0 + for (k, v) in locale_cfg.items(): + if v is None: + continue + v = str(v) + if len(v) == 0: + continue + contents[k] = v + updated_am += 1 + if updated_am: + lines = [ + str(contents), + ] + if not exists: + lines.insert(0, util.make_header()) + util.write_file(sys_path, "\n".join(lines) + "\n", 0o644) diff --git a/cloudinit/util.py b/cloudinit/util.py index 98dd66d59fc..505ae1b8693 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -656,6 +656,7 @@ def _get_variant(info): if linux_dist in ( "almalinux", "alpine", + "aosc", "arch", "azurelinux", "centos", diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 68175cd0ad9..4b1efdbcbf1 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -11,7 +11,7 @@ "netbsd": "NetBSD", "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} -{% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", +{% set groups = ({"alpine": "adm, wheel", "aosc": "wheel", "arch": "wheel, users", "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", @@ -220,7 +220,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "freebsd", "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu"] or is_rhel %} @@ -238,7 +238,7 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu", "unknown"] @@ -320,7 +320,7 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", +{% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS", "ubuntu", "unknown"] @@ -368,7 +368,7 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", +{% elif variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", "TencentOS"] or is_rhel %} diff --git a/doc/rtd/reference/distros.rst b/doc/rtd/reference/distros.rst index 59309ece211..d54cb889153 100644 --- a/doc/rtd/reference/distros.rst +++ b/doc/rtd/reference/distros.rst @@ -7,6 +7,7 @@ Unix family of operating systems. See the complete list below. * AlmaLinux * Alpine Linux +* AOSC OS * Arch Linux * CentOS * CloudLinux diff --git a/tests/unittests/distros/test_aosc.py b/tests/unittests/distros/test_aosc.py new file mode 100644 index 00000000000..e8a66b7aef2 --- /dev/null +++ b/tests/unittests/distros/test_aosc.py @@ -0,0 +1,10 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.distros import _get_distro +from tests.unittests.helpers import CiTestCase + + +class TestAOSC(CiTestCase): + def test_get_distro(self): + distro = _get_distro("aosc") + self.assertEqual(distro.osfamily, "aosc") diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 3a92d29e261..6ab6d496b16 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -319,7 +319,8 @@ def test_wb_schema_subcommand_parser(self, m_read_cfg, capsys): ["all"], [ "**Supported distros:** all", - "**Supported distros:** almalinux, alpine, azurelinux, " + "**Supported distros:** " + "almalinux, alpine, aosc, azurelinux, " "centos, cloudlinux, cos, debian, eurolinux, fedora, " "freebsd, mariner, miraclelinux, openbsd, openeuler, " "OpenCloudOS, openmandriva, opensuse, opensuse-microos, " diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d9accd11460..34dd55c3f6d 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -105,6 +105,7 @@ klausenbusk KsenijaS landon912 ld9379435 +leavelet licebmi linitio LKHN diff --git a/tools/render-template b/tools/render-template index c3af642a08f..78beeecb2cf 100755 --- a/tools/render-template +++ b/tools/render-template @@ -14,6 +14,7 @@ def main(): "almalinux", "alpine", "amazon", + "aosc", "arch", "azurelinux", "benchmark", From 053331e5c58962672693288f681661d644129b9b Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Wed, 3 Jul 2024 21:32:14 +0200 Subject: [PATCH 002/131] fix(openbsd): fix mtu on newline in hostname files (#5412) The /etc/hostname.* files should have the mtu on a separate line otherwise it gives error: ifconfig: mtu: bad value The lines are executed in order by ifconfig and mtu should be on it's own line. Fixes: GH-5413 --- cloudinit/net/openbsd.py | 2 +- tools/.github-cla-signers | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 3a4cdf2707c..83b33e0380c 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -27,7 +27,7 @@ def write_config(self): ) mtu = v.get("mtu") if mtu: - content += " mtu %d" % mtu + content += "\nmtu %d" % mtu content += "\n" + self.interface_routes util.write_file(fn, content) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 34dd55c3f6d..30c411e466a 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -179,6 +179,7 @@ TheRealFalcon thetoolsmith timothegenzmer tnt-dev +tobias-urdin tomponline tsanghan tSU-RooT From 2b6fe6403db769de14f7c7b7e4aa65f5bea8f3e0 Mon Sep 17 00:00:00 2001 From: PengpengSun <40026211+PengpengSun@users.noreply.github.com> Date: Thu, 4 Jul 2024 04:06:39 +0800 Subject: [PATCH 003/131] fix(vmware): Set IPv6 to dhcp when there is no IPv6 addr (#5471) When there is no IPv6 addr given in the customization configuration, we shall set IPv6 type to dhcp6, then customized Linux network will be set to dhcp IPv6 explicitly. --- .../sources/helpers/vmware/imc/config_nic.py | 2 +- .../sources/vmware/test_vmware_config_file.py | 68 ++++++++++++++----- 2 files changed, 52 insertions(+), 18 deletions(-) diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index b07214a228b..254518af9e3 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -207,7 +207,7 @@ def gen_ipv6(self, name, nic): """ if not nic.staticIpv6: - return ([], []) + return ([{"type": "dhcp6"}], []) subnet_list = [] # Static Ipv6 diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index fd4bb481e46..c1415934141 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -241,27 +241,45 @@ def test_get_nics_list_dhcp(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) + # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" ) subnets = nic1.get("subnets") - self.assertEqual(1, len(subnets), "number of subnets for NIC1") - subnet = subnets[0] - self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1") - self.assertEqual("auto", subnet.get("control"), "NIC1 Control type") + self.assertEqual(2, len(subnets), "number of subnets for NIC1") + subnet_ipv4 = subnets[0] + self.assertEqual( + "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC1" + ) + self.assertEqual( + "auto", subnet_ipv4.get("control"), "NIC1 Control type" + ) + subnet_ipv6 = subnets[1] + self.assertEqual( + "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC1" + ) + # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2" ) subnets = nic2.get("subnets") - self.assertEqual(1, len(subnets), "number of subnets for NIC2") - subnet = subnets[0] - self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2") - self.assertEqual("auto", subnet.get("control"), "NIC2 Control type") + self.assertEqual(2, len(subnets), "number of subnets for NIC2") + subnet_ipv4 = subnets[0] + self.assertEqual( + "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC2" + ) + self.assertEqual( + "auto", subnet_ipv4.get("control"), "NIC2 Control type" + ) + subnet_ipv6 = subnets[1] + self.assertEqual( + "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC2" + ) def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates network subnets @@ -286,6 +304,7 @@ def test_get_nics_list_static(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) + # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( @@ -345,6 +364,7 @@ def test_get_nics_list_static(self): else: self.assertEqual(True, False, "invalid gateway %s" % (gateway)) + # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( @@ -352,16 +372,18 @@ def test_get_nics_list_static(self): ) subnets = nic2.get("subnets") - self.assertEqual(1, len(subnets), "Number of subnets for NIC2") + self.assertEqual(2, len(subnets), "Number of subnets for NIC2") - subnet = subnets[0] - self.assertEqual("static", subnet.get("type"), "Subnet type") + subnet_ipv4 = subnets[0] + self.assertEqual("static", subnet_ipv4.get("type"), "Subnet type") self.assertEqual( - "192.168.6.102", subnet.get("address"), "Subnet address" + "192.168.6.102", subnet_ipv4.get("address"), "Subnet address" ) self.assertEqual( - "255.255.0.0", subnet.get("netmask"), "Subnet netmask" + "255.255.0.0", subnet_ipv4.get("netmask"), "Subnet netmask" ) + subnet_ipv6 = subnets[1] + self.assertEqual("dhcp6", subnet_ipv6.get("type"), "Subnet type") def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -448,7 +470,10 @@ def test_non_primary_nic_without_gateway(self): "type": "static", "address": "10.20.87.154", "netmask": "255.255.252.0", - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -499,7 +524,10 @@ def test_non_primary_nic_with_gateway(self): "metric": 10000, } ], - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -559,7 +587,10 @@ def test_cust_non_primary_nic_with_gateway_(self): "metric": 10000, } ], - } + }, + { + "type": "dhcp6", + }, ], } ], @@ -604,7 +635,10 @@ def test_a_primary_nic_with_gateway(self): "address": "10.20.87.154", "netmask": "255.255.252.0", "gateway": "10.20.87.253", - } + }, + { + "type": "dhcp6", + }, ], } ], From 0af459eac9c670f2a7215ecfe96f4a4b6444eaba Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 8 Jul 2024 11:03:25 -0500 Subject: [PATCH 004/131] test: pytestify and cleanup test_cc_mounts.py (#5459) * Remove `unittest` constructs and remove base classes. * Replace tests that don't test things with tests that do * Add fstab and mounts combinations test --- tests/unittests/config/test_cc_mounts.py | 389 ++++++++++++++--------- 1 file changed, 234 insertions(+), 155 deletions(-) diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index 4795357c039..07ce4b0ba40 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -1,8 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import math import os.path import re +import textwrap from collections import namedtuple from unittest import mock @@ -27,193 +29,176 @@ M_PATH = "cloudinit.config.cc_mounts." -class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestSanitizeDevname, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - def _touch(self, path): - path = os.path.join(self.new_root, path.lstrip("/")) +class TestSanitizeDevname: + def _touch(self, path, new_root): + path = os.path.join(new_root, path.lstrip("/")) basedir = os.path.dirname(path) if not os.path.exists(basedir): os.makedirs(basedir) open(path, "a").close() - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip("/")) + def _makedirs(self, directory, new_root): + directory = os.path.join(new_root, directory.lstrip("/")) if not os.path.exists(directory): os.makedirs(directory) - def mock_existence_of_disk(self, disk_path): - self._touch(disk_path) - self._makedirs(os.path.join("/sys/block", disk_path.split("/")[-1])) + def mock_existence_of_disk(self, disk_path, new_root): + self._touch(disk_path, new_root) + self._makedirs( + os.path.join("/sys/block", disk_path.split("/")[-1]), new_root + ) - def mock_existence_of_partition(self, disk_path, partition_number): - self.mock_existence_of_disk(disk_path) - self._touch(disk_path + str(partition_number)) + def mock_existence_of_partition( + self, disk_path, partition_number, new_root + ): + self.mock_existence_of_disk(disk_path, new_root) + self._touch(disk_path + str(partition_number), new_root) disk_name = disk_path.split("/")[-1] self._makedirs( os.path.join( "/sys/block", disk_name, disk_name + str(partition_number) - ) + ), + new_root, ) - def test_existent_full_disk_path_is_returned(self): + def test_existent_full_disk_path_is_returned(self, fake_filesystem): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_path, lambda x: None), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + disk_path, lambda x: None ) - def test_existent_disk_name_returns_full_path(self): + def test_existent_disk_name_returns_full_path(self, fake_filesystem): disk_name = "sda" disk_path = "/dev/" + disk_name - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_name, lambda x: None), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + disk_name, lambda x: None ) - def test_existent_meta_disk_is_returned(self): + def test_existent_meta_disk_is_returned(self, fake_filesystem): actual_disk_path = "/dev/sda" - self.mock_existence_of_disk(actual_disk_path) - self.assertEqual( - actual_disk_path, - cc_mounts.sanitize_devname( - "ephemeral0", - lambda x: actual_disk_path, - ), + self.mock_existence_of_disk(actual_disk_path, fake_filesystem) + assert actual_disk_path == cc_mounts.sanitize_devname( + "ephemeral0", + lambda x: actual_disk_path, ) - def test_existent_meta_partition_is_returned(self): + def test_existent_meta_partition_is_returned(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.1", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.1", + lambda x: disk_name, ) - def test_existent_meta_partition_with_p_is_returned(self): + def test_existent_meta_partition_with_p_is_returned(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "p1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.1", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.1", + lambda x: disk_name, ) - def test_first_partition_returned_if_existent_disk_is_partitioned(self): + def test_first_partition_returned_if_existent_disk_is_partitioned( + self, fake_filesystem + ): disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0", + lambda x: disk_name, ) - def test_nth_partition_returned_if_requested(self): + def test_nth_partition_returned_if_requested(self, fake_filesystem): disk_name, partition_part = "/dev/sda", "3" actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname( - "ephemeral0.3", - lambda x: disk_name, - ), + self.mock_existence_of_partition( + disk_name, partition_part, fake_filesystem + ) + assert actual_partition_path == cc_mounts.sanitize_devname( + "ephemeral0.3", + lambda x: disk_name, ) - def test_transformer_returning_none_returns_none(self): - self.assertIsNone( + def test_transformer_returning_none_returns_none(self, fake_filesystem): + assert ( cc_mounts.sanitize_devname( "ephemeral0", lambda x: None, ) + is None ) - def test_missing_device_returns_none(self): - self.assertIsNone( + def test_missing_device_returns_none(self, fake_filesystem): + assert ( cc_mounts.sanitize_devname( "/dev/sda", None, ) + is None ) - def test_missing_sys_returns_none(self): + def test_missing_sys_returns_none(self, fake_filesystem): disk_path = "/dev/sda" - self._makedirs(disk_path) - self.assertIsNone( + self._makedirs(disk_path, fake_filesystem) + assert ( cc_mounts.sanitize_devname( disk_path, None, ) + is None ) - def test_existent_disk_but_missing_partition_returns_none(self): + def test_existent_disk_but_missing_partition_returns_none( + self, fake_filesystem + ): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertIsNone( + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert ( cc_mounts.sanitize_devname( "ephemeral0.1", lambda x: disk_path, ) + is None ) - def test_network_device_returns_network_device(self): + def test_network_device_returns_network_device(self, fake_filesystem): disk_path = "netdevice:/path" - self.assertEqual( + assert disk_path == cc_mounts.sanitize_devname( disk_path, - cc_mounts.sanitize_devname( - disk_path, - None, - ), + None, ) - def test_device_aliases_remapping(self): + def test_device_aliases_remapping(self, fake_filesystem): disk_path = "/dev/sda" - self.mock_existence_of_disk(disk_path) - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname( - "mydata", lambda x: None, {"mydata": disk_path} - ), + self.mock_existence_of_disk(disk_path, fake_filesystem) + assert disk_path == cc_mounts.sanitize_devname( + "mydata", lambda x: None, {"mydata": disk_path} ) -class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestSwapFileCreation, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, "etc/fstab") - self.swap_path = os.path.join(self.new_root, "swap.img") +class TestSwapFileCreation: + @pytest.fixture(autouse=True) + def setup(self, mocker, fake_filesystem: str): + self.new_root = fake_filesystem + self.swap_path = os.path.join(fake_filesystem, "swap.img") + fstab_path = os.path.join(fake_filesystem, "etc/fstab") self._makedirs("/etc") - self.add_patch( - "cloudinit.config.cc_mounts.FSTAB_PATH", - "mock_fstab_path", - self.fstab_path, - autospec=False, - ) - - self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") - - self.add_patch( - "cloudinit.config.cc_mounts.util.mounts", - "mock_util_mounts", + self.m_fstab = mocker.patch(f"{M_PATH}FSTAB_PATH", fstab_path) + self.m_subp = mocker.patch(f"{M_PATH}subp.subp") + self.m_mounts = mocker.patch( + f"{M_PATH}util.mounts", return_value={ "/dev/sda1": { "fstype": "ext4", @@ -257,7 +242,7 @@ def test_swap_creation_method_fallocate_on_xfs( m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( ["fallocate", "-l", "0M", self.swap_path], capture=True @@ -276,7 +261,7 @@ def test_swap_creation_method_xfs( m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( [ @@ -302,7 +287,7 @@ def test_swap_creation_method_btrfs( m_get_mount_info.return_value = ["", "btrfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call(["truncate", "-s", "0", self.swap_path]), mock.call(["chattr", "+C", self.swap_path]), @@ -324,7 +309,7 @@ def test_swap_creation_method_ext4( m_get_mount_info.return_value = ["", "ext4"] cc_mounts.handle(None, self.cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + self.m_subp.assert_has_calls( [ mock.call( ["fallocate", "-l", "0M", self.swap_path], capture=True @@ -335,35 +320,20 @@ def test_swap_creation_method_ext4( ) -class TestFstabHandling(test_helpers.FilesystemMockingTestCase): +class TestFstabHandling: swap_path = "/dev/sdb1" - def setUp(self): - super(TestFstabHandling, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) + @pytest.fixture(autouse=True) + def setup(self, mocker, fake_filesystem: str): + self.new_root = fake_filesystem self.fstab_path = os.path.join(self.new_root, "etc/fstab") self._makedirs("/etc") - self.add_patch( - "cloudinit.config.cc_mounts.FSTAB_PATH", - "mock_fstab_path", - self.fstab_path, - autospec=False, - ) - - self.add_patch( - "cloudinit.config.cc_mounts._is_block_device", - "mock_is_block_device", - return_value=True, - ) - - self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") - - self.add_patch( - "cloudinit.config.cc_mounts.util.mounts", - "mock_util_mounts", + self.m_fstab = mocker.patch(f"{M_PATH}FSTAB_PATH", self.fstab_path) + self.m_subp = mocker.patch(f"{M_PATH}subp.subp") + self.m_mounts = mocker.patch( + f"{M_PATH}util.mounts", return_value={ "/dev/sda1": { "fstype": "ext4", @@ -373,6 +343,10 @@ def setUp(self): }, ) + self.m_is_block_device = mocker.patch( + f"{M_PATH}_is_block_device", return_value=True + ) + self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() self.mock_cloud.device_name_to_device = self.device_name_to_device @@ -392,7 +366,7 @@ def device_name_to_device(self, path): def test_no_fstab(self): """Handle images which do not include an fstab.""" - self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) + assert not os.path.exists(cc_mounts.FSTAB_PATH) fstab_expected_content = ( "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n" % (self.swap_path,) @@ -400,19 +374,70 @@ def test_no_fstab(self): cc_mounts.handle(None, {}, self.mock_cloud, []) with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content - def test_swap_integrity(self): - """Ensure that the swap file is correctly created and can - swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes""" + @pytest.mark.parametrize( + "fstype, expected", + [ + ( + "btrfs", + [ + mock.call(["truncate", "-s", "0", "/swap.img"]), + mock.call(["chattr", "+C", "/swap.img"]), + mock.call( + ["fallocate", "-l", "0M", "/swap.img"], capture=True + ), + ], + ), + ( + "xfs", + [ + mock.call( + [ + "dd", + "if=/dev/zero", + "of=/swap.img", + "bs=1M", + "count=0", + ], + capture=True, + ) + ], + ), + ( + "ext4", + [ + mock.call( + ["fallocate", "-l", "0M", "/swap.img"], capture=True + ) + ], + ), + ], + ) + def test_swap_creation_command(self, fstype, expected, mocker): + """Ensure that the swap file is correctly created. + + Different filesystems require different methods. + """ + mocker.patch( + "cloudinit.util.get_mount_info", return_value=["", fstype] + ) + mocker.patch("cloudinit.util.kernel_version", return_value=(4, 17)) fstab = "/swap.img swap swap defaults 0 0\n" with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab) - cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]} + cc = { + "swap": {"filename": "/swap.img", "size": "512", "maxsize": "512"} + } cc_mounts.handle(None, cc, self.mock_cloud, []) + assert self.m_subp.call_args_list == expected + [ + mock.call(["mkswap", "/swap.img"]), + mock.call(["swapon", "-a"]), + mock.call(["mount", "-a"]), + mock.call(["systemctl", "daemon-reload"]), + ] def test_fstab_no_swap_device(self): """Ensure that cloud-init adds a discovered swap partition @@ -431,7 +456,7 @@ def test_fstab_no_swap_device(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_fstab_same_swap_device_already_configured(self): """Ensure that cloud-init will not add a swap device if the same @@ -449,7 +474,7 @@ def test_fstab_same_swap_device_already_configured(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_fstab_alternate_swap_device_already_configured(self): """Ensure that cloud-init will add a discovered swap device to @@ -470,30 +495,84 @@ def test_fstab_alternate_swap_device_already_configured(self): with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) + assert fstab_expected_content == fstab_new_content def test_no_change_fstab_sets_needs_mount_all(self): """verify unchanged fstab entries are mounted if not call mount -a""" - fstab_original_content = ( - "LABEL=cloudimg-rootfs / ext4 defaults 0 0\n" - "LABEL=UEFI /boot/efi vfat defaults 0 0\n" - "/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n" + fstab_original_content = textwrap.dedent( + f""" + LABEL=cloudimg-rootfs / ext4 defaults 0 0 + LABEL=UEFI /boot/efi vfat defaults 0 0 + /dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2 + {self.swap_path} none swap sw,comment=cloudconfig 0 0 + """ # noqa: E501 ) - fstab_expected_content = fstab_original_content cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]} with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) + cc_mounts.handle(None, cc, self.mock_cloud, []) with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - cc_mounts.handle(None, cc, self.mock_cloud, []) - self.m_subp_subp.assert_has_calls( + assert fstab_original_content == fstab_new_content + self.m_subp.assert_has_calls( [ mock.call(["mount", "-a"]), mock.call(["systemctl", "daemon-reload"]), ] ) + def test_fstab_mounts_combinations(self): + """Verify various combinations of mount entries in /etc/fstab.""" + # First and third lines show that even with errors we keep fstab lines + # unedited unless they contain the cloudconfig comment. + # 2nd line shows we remove a line with a cloudconfig comment that + # can be added back in with the mounts config. + # 4th line shows we remove a line with a cloudconfig comment + # indiscriminately. + fstab_original_content = ( + "LABEL=keepme none ext4 defaults 0 0\n" + "/dev/sda1 /a auto defaults,comment=cloudconfig 0 2\n" + "LABEL=UEFI\n" + "/dev/sda2 /b auto defaults,comment=cloudconfig 0 2\n" + ) + with open(cc_mounts.FSTAB_PATH, "w") as fd: + fd.write(fstab_original_content) + cfg = { + "mounts": [ + # Line that will be overridden due to later None value + ["/dev/sda3", "dontcare", "auto", "defaults", "0", "0"], + # Add the one missing default field to the end + ["/dev/sda4", "/mnt2", "auto", "nofail", "1"], + # Remove all "/dev/sda3"'s here and earlier + ["/dev/sda3", None], + # As long as we have two fields we get the rest of the defaults + ["/dev/sda5", "/mnt3"], + # Takes the place of the line that was removed from fstab + # with the cloudconfig comment + ["/dev/sda1", "/mnt", "xfs"], + # The line that survies after previous Nones + ["/dev/sda3", "/mnt4", "btrfs"], + ] + } + cc_mounts.handle(None, cfg, self.mock_cloud, []) + with open(cc_mounts.FSTAB_PATH, "r") as fd: + fstab_new_content = fd.read() + + assert ( + fstab_new_content.strip() + == textwrap.dedent( + """ + LABEL=keepme none ext4 defaults 0 0 + LABEL=UEFI + /dev/sda4 /mnt2 auto nofail,comment=cloudconfig 1 2 + /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda1 /mnt xfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sdb1 none swap sw,comment=cloudconfig 0 0 + """ # noqa: E501 + ).strip() + ) + class TestCreateSwapfile: @pytest.mark.parametrize("fstype", ("xfs", "btrfs", "ext4", "other")) From 8a582709aa189c31094cd2174325b0f9183615d2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 8 Jul 2024 19:11:42 -0500 Subject: [PATCH 005/131] test: Ensure mkcert executable in ftp tests (#5493) --- tests/integration_tests/datasources/test_nocloud.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index c6c440840a3..cf11520662c 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -326,7 +326,8 @@ def _boot_with_cmdline( 'wget "https://github.com/FiloSottile/mkcert/releases/' "download/${latest_ver}/mkcert-" '${latest_ver}-linux-amd64"' - " -O mkcert" + " -O mkcert && " + "chmod 755 mkcert" ).ok # giddyup From 7130bbbb146a0d6612c11c44b82a65e6c1cd97aa Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 8 Jul 2024 19:26:23 -0500 Subject: [PATCH 006/131] test: Add missing assert to test_status.py (#5494) --- tests/integration_tests/cmd/test_status.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index 23509c57cef..50396be709c 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -117,7 +117,7 @@ def test_status_json_errors(client): fi cloud-init status --wait --long > $1 date +%s.%N > $MARKER_FILE -""" # noqa: E501 +""" BEFORE_CLOUD_INIT_LOCAL = """\ @@ -162,7 +162,7 @@ def test_status_block_through_all_boot_status(client): # Assert that before-cloud-init-local.service started before # cloud-init-local.service could create status.json - client.execute("test -f /before-local.start-hasstatusjson").failed + assert client.execute("test -f /before-local.start-hasstatusjson").failed early_unit_timestamp = retry_read_from_file( client, "/before-local.start-nostatusjson" From db828d054b59b98905edf5b58aa7b923a3a8c0e9 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Fri, 5 Jul 2024 13:58:23 +0200 Subject: [PATCH 007/131] typing: fix check_untyped_defs in cloudinit.util (#5490) GH-5445 --- cloudinit/util.py | 49 +++++++++++++++++++++++++++++++---------------- pyproject.toml | 2 +- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 505ae1b8693..583a658719a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -40,7 +40,9 @@ from errno import ENOENT from functools import lru_cache, total_ordering from pathlib import Path +from types import ModuleType from typing import ( + IO, TYPE_CHECKING, Any, Callable, @@ -54,6 +56,7 @@ Sequence, TypeVar, Union, + cast, ) from urllib import parse @@ -190,6 +193,7 @@ class SeLinuxGuard: def __init__(self, path, recursive=False): # Late import since it might not always # be possible to use this + self.selinux: Optional[ModuleType] try: self.selinux = importer.import_module("selinux") except ImportError: @@ -630,7 +634,7 @@ def get_linux_distro(): dist = ("", "", "") try: # Was removed in 3.8 - dist = platform.dist() # pylint: disable=W1505,E1101 + dist = platform.dist() # type: ignore # pylint: disable=W1505,E1101 except Exception: pass finally: @@ -835,7 +839,9 @@ def set_subprocess_umask_and_gid(): stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) - new_fp = proc.stdin + # As stdin is PIPE, then proc.stdin is IO[bytes] + # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.stdin + new_fp = cast(IO[Any], proc.stdin) else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -862,7 +868,9 @@ def set_subprocess_umask_and_gid(): stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) - new_fp = proc.stdin + # As stdin is PIPE, then proc.stdin is IO[bytes] + # https://docs.python.org/3/library/subprocess.html#subprocess.Popen.stdin + new_fp = cast(IO[Any], proc.stdin) else: raise TypeError("Invalid type for error format: %s" % errfmt) @@ -1696,8 +1704,8 @@ def chownbyname(fname, user=None, group=None): # output: "| logger -p" # error: "> /dev/null" # this returns the specific 'mode' entry, cleanly formatted, with value -def get_output_cfg(cfg, mode): - ret = [None, None] +def get_output_cfg(cfg, mode) -> List[Optional[str]]: + ret: List[Optional[str]] = [None, None] if not cfg or "output" not in cfg: return ret @@ -1736,10 +1744,10 @@ def get_output_cfg(cfg, mode): ret[1] = ret[0] swlist = [">>", ">", "|"] - for i in range(len(ret)): - if not ret[i]: + for i, r in enumerate(ret): + if not r: continue - val = ret[i].lstrip() + val = r.lstrip() found = False for s in swlist: if val.startswith(s): @@ -1759,7 +1767,7 @@ def get_config_logfiles(cfg): @param cfg: The cloud-init merged configuration dictionary. """ - logs = [] + logs: List = [] rotated_logs = [] if not cfg or not isinstance(cfg, dict): return logs @@ -1928,10 +1936,11 @@ def mounts(): (dev, mp, fstype, opts, _freq, _passno) = mpline.split() else: m = re.search(mountre, mpline) - dev = m.group(1) - mp = m.group(2) - fstype = m.group(3) - opts = m.group(4) + # safe to type-ignore because of the try-except wrapping + dev = m.group(1) # type: ignore + mp = m.group(2) # type: ignore + fstype = m.group(3) # type: ignore + opts = m.group(4) # type: ignore except Exception: continue # If the name of the mount point contains spaces these @@ -2455,21 +2464,27 @@ def get_proc_env(pid, encoding="utf-8", errors="replace"): @param errors: only used if encoding is true.""" fn = os.path.join("/proc", str(pid), "environ") + contents: Union[str, bytes] try: contents = load_binary_file(fn) except (IOError, OSError): return {} env = {} + null: Union[str, bytes] + equal: Union[str, bytes] null, equal = (b"\x00", b"=") if encoding: null, equal = ("\x00", "=") contents = contents.decode(encoding, errors) - for tok in contents.split(null): + # mypy doesn't know that the types of null, equal and contents are the same + # depending on the previous if branch, see: + # https://github.com/python/mypy/issues/6233 + for tok in contents.split(null): # type: ignore if not tok: continue - (name, val) = tok.split(equal, 1) + (name, val) = tok.split(equal, 1) # type: ignore if name: env[name] = val return env @@ -2529,7 +2544,7 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False): devpth = None fs_type = None match_mount_point = None - match_mount_point_elements = None + match_mount_point_elements: Optional[List[str]] = None for i, line in enumerate(mountinfo_lines): parts = line.split() @@ -2668,7 +2683,7 @@ def parse_mount(path, get_mnt_opts=False): devpth = None mount_point = None match_mount_point = None - match_mount_point_elements = None + match_mount_point_elements: Optional[List[str]] = None for line in mountoutput.splitlines(): m = re.search(regex, line) if not m: diff --git a/pyproject.toml b/pyproject.toml index 7408488f975..d5578c1379b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ module = [ ignore_missing_imports = true no_implicit_optional = true +# See GH-5445 [[tool.mypy.overrides]] module = [ "cloudinit.analyze", @@ -118,7 +119,6 @@ module = [ "cloudinit.temp_utils", "cloudinit.templater", "cloudinit.user_data", - "cloudinit.util", "tests.integration_tests.instances", "tests.unittests.analyze.test_show", "tests.unittests.cmd.devel.test_hotplug_hook", From 188656b21e53ca2f151b439135b322040560c811 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Tue, 9 Jul 2024 09:21:59 +0200 Subject: [PATCH 008/131] refactor: util.get_proc_env to work with strs (#5490) There are no call sites requesting not decoding the environment vars. This change decodes then always, simplifying typing and logic. --- cloudinit/util.py | 29 ++++++++++++----------------- tests/unittests/test_util.py | 13 ------------- 2 files changed, 12 insertions(+), 30 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 583a658719a..09d28386646 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2454,14 +2454,16 @@ def is_lxd(): return os.path.exists("/dev/lxd/sock") -def get_proc_env(pid, encoding="utf-8", errors="replace"): +def get_proc_env( + pid, encoding: str = "utf-8", errors: str = "replace" +) -> Dict[str, str]: """ Return the environment in a dict that a given process id was started with. - @param encoding: if true, then decoding will be done with - .decode(encoding, errors) and text will be returned. - if false then binary will be returned. - @param errors: only used if encoding is true.""" + @param encoding: decoding will be done with .decode(encoding, errors) and + text will be returned. + @param errors: passed through .decode(encoding, errors). + """ fn = os.path.join("/proc", str(pid), "environ") contents: Union[str, bytes] @@ -2471,20 +2473,13 @@ def get_proc_env(pid, encoding="utf-8", errors="replace"): return {} env = {} - null: Union[str, bytes] - equal: Union[str, bytes] - null, equal = (b"\x00", b"=") - if encoding: - null, equal = ("\x00", "=") - contents = contents.decode(encoding, errors) - - # mypy doesn't know that the types of null, equal and contents are the same - # depending on the previous if branch, see: - # https://github.com/python/mypy/issues/6233 - for tok in contents.split(null): # type: ignore + null, equal = ("\x00", "=") + contents = contents.decode(encoding, errors) + + for tok in contents.split(null): if not tok: continue - (name, val) = tok.split(equal, 1) # type: ignore + (name, val) = tok.split(equal, 1) if name: env[name] = val return env diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d790bf4f1ca..8970fb4c863 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2802,19 +2802,6 @@ def test_non_utf8_in_environment(self, m_load_file): ) self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_binary_file") - def test_encoding_none_returns_bytes(self, m_load_file): - """encoding none returns bytes.""" - lines = (self.bootflag, self.simple1, self.simple2, self.mixed) - content = self.null.join(lines) - m_load_file.return_value = content - - self.assertEqual( - dict([t.split(b"=") for t in lines]), - util.get_proc_env(1, encoding=None), - ) - self.assertEqual(1, m_load_file.call_count) - @mock.patch(M_PATH + "load_binary_file") def test_all_utf8_encoded(self, m_load_file): """common path where only utf-8 decodable content.""" From 0128716c28eec2c92bd6e76d423c160c91cb3e5f Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Tue, 9 Jul 2024 10:11:09 +0200 Subject: [PATCH 009/131] refactor: util.mounts to handle errors (#5490) Instead of a broad try/except, do properly check for conditions that invalidate a mount location. --- cloudinit/util.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 09d28386646..8d7422aee09 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1927,22 +1927,23 @@ def mounts(): out = subp.subp("mount") mount_locs = out.stdout.splitlines() method = "mount" - mountre = r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$" + mountre = re.compile(r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$") for mpline in mount_locs: # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered) # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates) - try: - if method == "proc": - (dev, mp, fstype, opts, _freq, _passno) = mpline.split() - else: - m = re.search(mountre, mpline) - # safe to type-ignore because of the try-except wrapping - dev = m.group(1) # type: ignore - mp = m.group(2) # type: ignore - fstype = m.group(3) # type: ignore - opts = m.group(4) # type: ignore - except Exception: - continue + if method == "proc": + words = mpline.split() + if len(words) != 6: + continue + (dev, mp, fstype, opts, _freq, _passno) = words + else: + m = mountre.search(mpline) + if m is None or len(m.groups()) < 4: + continue + dev = m.group(1) + mp = m.group(2) + fstype = m.group(3) + opts = m.group(4) # If the name of the mount point contains spaces these # can be escaped as '\040', so undo that.. mp = mp.replace("\\040", " ") From 4c0468c5703ada736d4eac96ed20cad12603043b Mon Sep 17 00:00:00 2001 From: Curt Moore Date: Wed, 10 Jul 2024 14:10:46 -0500 Subject: [PATCH 010/131] Set MTU for bond parent interface (#5495) Support for jumbo frames requires that the underlying physical interfaces and the parent bond interface all have the larger MTU configured, not just the physical interfaces. --- cloudinit/net/network_manager.py | 4 ++++ tests/unittests/net/network_configs.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index a13d4c14f69..41dcd24b27f 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -431,6 +431,10 @@ def render_interface(self, iface, network_state, renderer): self.config["vlan"]["parent"] = renderer.con_ref( iface["vlan-raw-device"] ) + if if_type == "bond" and ipv4_mtu is not None: + if "ethernet" not in self.config: + self.config["ethernet"] = {} + self.config["ethernet"]["mtu"] = str(ipv4_mtu) if if_type == "bridge": # Bridge is ass-backwards compared to bond for port in iface["bridge_ports"]: diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py index b68319cc806..2b55bbf421a 100644 --- a/tests/unittests/net/network_configs.py +++ b/tests/unittests/net/network_configs.py @@ -3385,6 +3385,9 @@ route1=2001:67c::/32,2001:67c:1562::1 route2=3001:67c::/32,3001:67c:15::1 + [ethernet] + mtu=9000 + """ ), }, From 7d35664ef8b85840f92f18cc48187f7284d227bc Mon Sep 17 00:00:00 2001 From: Ani Sinha Date: Thu, 11 Jul 2024 00:49:58 +0530 Subject: [PATCH 011/131] fix: add schema rules for 'baseurl' and 'metalink' in yum repo config (#5501) At least one of (or both) 'baseurl' or 'metalink' should be provided for yum repository specification. Add schema changes to enforce it. Without this, with just 'metalink' property set, one would get the schema validator error \--- Error: Cloud config schema errors: yum_repos.epel-release: 'baseurl' is a required property \--- Signed-off-by: Ani Sinha --- .../config/schemas/schema-cloud-config-v1.json | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index f5609c539fc..e45504d4113 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -3452,6 +3452,11 @@ "format": "uri", "description": "URL to the directory where the yum repository's 'repodata' directory lives" }, + "metalink": { + "type": "string", + "format": "uri", + "description": "Specifies a URL to a metalink file for the repomd.xml" + }, "name": { "type": "string", "description": "Optional human-readable name of the yum repo." @@ -3479,8 +3484,17 @@ "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf" } }, - "required": [ - "baseurl" + "anyOf": [ + { + "required": [ + "baseurl" + ] + }, + { + "required": [ + "metalink" + ] + } ] } } From 4abdd5a7066c322163579d8d91a44426ac705172 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 24 Apr 2024 10:51:16 -0600 Subject: [PATCH 012/131] feat(systemd): Warn user of unexpected run mode (#5209) On systemd, services are started by PID 1. When this doesn't happen, cloud-init is in an unknown run state and should warn the user. Reorder pid log to be able to reuse Distro information. Add docstring deprecating util.is_Linux(). --- cloudinit/cmd/main.py | 31 ++++++++++++++++++++++--------- cloudinit/distros/__init__.py | 3 +++ cloudinit/distros/bsd.py | 7 +++++++ cloudinit/util.py | 6 ++++++ 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 4a1c8b2e28c..317f3d0ff59 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -79,17 +79,29 @@ def print_exc(msg=""): sys.stderr.write("\n") -def log_ppid(): - if util.is_Linux(): +def log_ppid(distro, bootstage_name): + if distro.is_linux: ppid = os.getppid() - LOG.info("PID [%s] started cloud-init.", ppid) + log = LOG.info + extra_message = "" + if 1 != ppid and distro.uses_systemd(): + log = LOG.warning + extra_message = ( + " Unsupported configuration: boot stage called " + "outside of systemd" + ) + log( + "PID [%s] started cloud-init '%s'.%s", + ppid, + bootstage_name, + extra_message, + ) def welcome(action, msg=None): if not msg: msg = welcome_format(action) util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) - log_ppid() return msg @@ -333,10 +345,8 @@ def main_init(name, args): # objects config as it may be different from init object # 10. Run the modules for the 'init' stage # 11. Done! - if not args.local: - w_msg = welcome_format(name) - else: - w_msg = welcome_format("%s-local" % (name)) + bootstage_name = "init-local" if args.local else "init" + w_msg = welcome_format(bootstage_name) init = stages.Init(ds_deps=deps, reporter=args.reporter) # Stage 1 init.read_cfg(extract_fns(args)) @@ -364,6 +374,7 @@ def main_init(name, args): # config applied. We send the welcome message now, as stderr/out have # been redirected and log now configured. welcome(name, msg=w_msg) + log_ppid(init.distro, bootstage_name) # re-play early log messages before logging was setup for lvl, msg in early_logs: @@ -591,7 +602,8 @@ def main_modules(action_name, args): # the modules objects configuration # 5. Run the modules for the given stage name # 6. Done! - w_msg = welcome_format("%s:%s" % (action_name, name)) + bootstage_name = "%s:%s" % (action_name, name) + w_msg = welcome_format(bootstage_name) init = stages.Init(ds_deps=[], reporter=args.reporter) # Stage 1 init.read_cfg(extract_fns(args)) @@ -628,6 +640,7 @@ def main_modules(action_name, args): # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) + log_ppid(init.distro, bootstage_name) if name == "init": util.deprecate( diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 73873cebeca..e6bfb1d3b48 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -171,6 +171,7 @@ def __init__(self, name, cfg, paths): self.package_managers: List[PackageManager] = [] self._dhcp_client = None self._fallback_interface = None + self.is_linux = True def _unpickle(self, ci_pkl_version: int) -> None: """Perform deserialization fixes for Distro.""" @@ -187,6 +188,8 @@ def _unpickle(self, ci_pkl_version: int) -> None: self._dhcp_client = None if not hasattr(self, "_fallback_interface"): self._fallback_interface = None + if not hasattr(self, "is_linux"): + self.is_linux = True def _validate_entry(self, entry): if isinstance(entry, str): diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py index 25b374ba3bc..15be9c36714 100644 --- a/cloudinit/distros/bsd.py +++ b/cloudinit/distros/bsd.py @@ -40,6 +40,13 @@ def __init__(self, name, cfg, paths): cfg["rsyslog_svcname"] = "rsyslogd" self.osfamily = platform.system().lower() self.net_ops = bsd_netops.BsdNetOps + self.is_linux = False + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + + # this needs to be after the super class _unpickle to override it + self.is_linux = False def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/util.py b/cloudinit/util.py index 8d7422aee09..19f1800928d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -499,6 +499,12 @@ def multi_log( @lru_cache() def is_Linux(): + """deprecated: prefer Distro object's `is_linux` property + + Multiple sources of truth is bad, and already know whether we are + working with Linux from the Distro class. Using Distro offers greater code + reusablity, cleaner code, and easier maintenance. + """ return "Linux" in platform.system() From 604d80eb6fc9c78ed5669a58204fa366b4b71bdf Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 3 Jun 2024 18:53:37 -0600 Subject: [PATCH 013/131] test: Don't fail tests which call cloud-init as a command (#5209) Implement verify_clean_boot() to ignore certain expected logs in a platform-specific way. --- .../datasources/test_ec2_ipv6.py | 19 +++-- tests/integration_tests/test_upgrade.py | 12 +-- tests/integration_tests/util.py | 75 ++++++++++++++++++- 3 files changed, 91 insertions(+), 15 deletions(-) diff --git a/tests/integration_tests/datasources/test_ec2_ipv6.py b/tests/integration_tests/datasources/test_ec2_ipv6.py index 41bb852e627..0f5a2dbf6bb 100644 --- a/tests/integration_tests/datasources/test_ec2_ipv6.py +++ b/tests/integration_tests/datasources/test_ec2_ipv6.py @@ -2,13 +2,22 @@ import pytest +from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM - - -def _test_crawl(client, ip): - assert client.execute("cloud-init clean --logs").ok - assert client.execute("cloud-init init --local").ok +from tests.integration_tests.util import get_feature_flag_value + + +def _test_crawl(client: IntegrationInstance, ip: str): + return_code = ( + 2 + if should_log_deprecation( + "24.3", get_feature_flag_value(client, "DEPRECATION_INFO_BOUNDARY") + ) + else 0 + ) + assert client.execute("cloud-init clean --logs") + assert return_code == client.execute("cloud-init init --local").return_code log = client.read_from_file("/var/log/cloud-init.log") assert f"Using metadata source: '{ip}'" in log result = re.findall(r"Crawl of metadata service.* (\d+.\d+) seconds", log) diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 970a2406d8a..0a53eabb50e 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -14,7 +14,7 @@ IS_UBUNTU, MANTIC, ) -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import verify_clean_boot, verify_clean_log LOG = logging.getLogger("integration_testing.test_upgrade") @@ -81,11 +81,8 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): pre_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues pre-upgrade - log = instance.read_from_file("/var/log/cloud-init.log") - assert not json.loads(pre_result)["v1"]["errors"] - try: - verify_clean_log(log) + verify_clean_boot(instance) except AssertionError: LOG.warning( "There were errors/warnings/tracebacks pre-upgrade. " @@ -122,10 +119,7 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): post_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues post-upgrade - assert not json.loads(pre_result)["v1"]["errors"] - - log = instance.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) + verify_clean_boot(instance) # Ensure important things stayed the same assert pre_hostname == post_hostname diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index d218861f549..1343ac0df73 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -14,6 +14,7 @@ import pytest from cloudinit.subp import subp +from tests.integration_tests.integration_settings import PLATFORM LOG = logging.getLogger("integration_testing.util") @@ -70,7 +71,8 @@ def verify_clean_boot( ): """raise assertions if the client experienced unexpected warnings or errors - fail when an required error isn't found + Fail when a required error isn't found. + Expected warnings and errors are defined in this function. This function is similar to verify_clean_log, hence the similar name. @@ -89,6 +91,77 @@ def verify_clean_boot( require_errors: Optional[list] = None, fail_when_expected_not_found: optional list of expected errors """ + + def append_or_create_list( + maybe_list: Optional[Union[List[str], bool]], value: str + ) -> List[str]: + """handle multiple types""" + if isinstance(maybe_list, list): + maybe_list.append(value) + elif maybe_list is None or isinstance(maybe_list, bool): + maybe_list = [value] + return maybe_list + + # Define exceptions by matrix of platform and Ubuntu release + if "azure" == PLATFORM: + # Consistently on all Azure launches: + ignore_warnings = append_or_create_list( + ignore_warnings, "No lease found; using default endpoint" + ) + elif "lxd_vm" == PLATFORM: + # Ubuntu lxd storage + ignore_warnings = append_or_create_list( + ignore_warnings, "thinpool by default on Ubuntu due to LP #1982780" + ) + ignore_warnings = append_or_create_list( + ignore_warnings, + "Could not match supplied host pattern, ignoring:", + ) + elif "oracle" == PLATFORM: + # LP: #1842752 + ignore_errors = append_or_create_list( + ignore_warnings, "Stderr: RTNETLINK answers: File exists" + ) + # LP: #1833446 + ignore_warnings = append_or_create_list( + ignore_warnings, + "UrlError: 404 Client Error: Not Found for url: " + "http://169.254.169.254/latest/meta-data/", + ) + # Oracle has a file in /etc/cloud/cloud.cfg.d that contains + # users: + # - default + # - name: opc + # ssh_redirect_user: true + # This can trigger a warning about opc having no public key + ignore_warnings = append_or_create_list( + ignore_warnings, + "Unable to disable SSH logins for opc given ssh_redirect_user", + ) + + _verify_clean_boot( + instance, + ignore_warnings=ignore_warnings, + ignore_errors=ignore_errors, + require_warnings=require_warnings, + require_errors=require_errors, + ) + # assert no Tracebacks + assert ( + "0" + == instance.execute( + "grep --count Traceback /var/log/cloud-init.log" + ).stdout.strip() + ), "Unexpected traceback found in /var/log/cloud-init.log" + + +def _verify_clean_boot( + instance: "IntegrationInstance", + ignore_warnings: Optional[Union[List[str], bool]] = None, + ignore_errors: Optional[Union[List[str], bool]] = None, + require_warnings: Optional[list] = None, + require_errors: Optional[list] = None, +): ignore_errors = ignore_errors or [] ignore_warnings = ignore_warnings or [] require_errors = require_errors or [] From 8aa1c30dda7a1bac08219994c446ef1ee23eea36 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 19 Jun 2024 10:37:22 -0600 Subject: [PATCH 014/131] test: allow verify_clean_boot to ignore all or specific tracebacks (#5209) Ensure ignore_warnings=True or ignore_errors=True is honored and not overridden by supplemental warning texts appended. --- .../datasources/test_nocloud.py | 4 ++ tests/integration_tests/util.py | 48 ++++++++++++++----- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index cf11520662c..6cfe037a448 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -428,6 +428,10 @@ def test_nocloud_ftps_unencrypted_server_fails( " a scheme of ftps://, which is not allowed. Use ftp:// " "to allow connecting to insecure ftp servers.", ], + ignore_tracebacks=[ + 'ftplib.error_perm: 500 Command "AUTH" not understood.', + "UrlError: Attempted to connect to an insecure ftp server", + ], ) def test_nocloud_ftps_encrypted_server_succeeds( diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 1343ac0df73..4830cf958de 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -66,6 +66,7 @@ def verify_clean_boot( instance: "IntegrationInstance", ignore_warnings: Optional[Union[List[str], bool]] = None, ignore_errors: Optional[Union[List[str], bool]] = None, + ignore_tracebacks: Optional[Union[List[str], bool]] = None, require_warnings: Optional[list] = None, require_errors: Optional[list] = None, ): @@ -94,14 +95,17 @@ def verify_clean_boot( def append_or_create_list( maybe_list: Optional[Union[List[str], bool]], value: str - ) -> List[str]: + ) -> Optional[Union[List[str], bool]]: """handle multiple types""" if isinstance(maybe_list, list): maybe_list.append(value) - elif maybe_list is None or isinstance(maybe_list, bool): + elif maybe_list is True: + return True # Ignoring all texts, so no need to append. + elif maybe_list in (None, False): maybe_list = [value] return maybe_list + traceback_texts = [] # Define exceptions by matrix of platform and Ubuntu release if "azure" == PLATFORM: # Consistently on all Azure launches: @@ -122,12 +126,17 @@ def append_or_create_list( ignore_errors = append_or_create_list( ignore_warnings, "Stderr: RTNETLINK answers: File exists" ) + traceback_texts.append("Stderr: RTNETLINK answers: File exists") # LP: #1833446 ignore_warnings = append_or_create_list( ignore_warnings, "UrlError: 404 Client Error: Not Found for url: " "http://169.254.169.254/latest/meta-data/", ) + traceback_texts.append( + "UrlError: 404 Client Error: Not Found for url: " + "http://169.254.169.254/latest/meta-data/" + ) # Oracle has a file in /etc/cloud/cloud.cfg.d that contains # users: # - default @@ -143,22 +152,17 @@ def append_or_create_list( instance, ignore_warnings=ignore_warnings, ignore_errors=ignore_errors, + ignore_tracebacks=ignore_tracebacks, require_warnings=require_warnings, require_errors=require_errors, ) - # assert no Tracebacks - assert ( - "0" - == instance.execute( - "grep --count Traceback /var/log/cloud-init.log" - ).stdout.strip() - ), "Unexpected traceback found in /var/log/cloud-init.log" def _verify_clean_boot( instance: "IntegrationInstance", ignore_warnings: Optional[Union[List[str], bool]] = None, ignore_errors: Optional[Union[List[str], bool]] = None, + ignore_tracebacks: Optional[Union[List[str], bool]] = None, require_warnings: Optional[list] = None, require_errors: Optional[list] = None, ): @@ -181,9 +185,9 @@ def _verify_clean_boot( if expected in current_error: required_errors_found.add(expected) - # check for unexpected errors if ignore_errors is True: continue + # check for unexpected errors for expected in [*ignore_errors, *require_errors]: if expected in current_error: break @@ -198,9 +202,9 @@ def _verify_clean_boot( if expected in current_warning: required_warnings_found.add(expected) - # check for unexpected warnings if ignore_warnings is True: continue + # check for unexpected warnings for expected in [*ignore_warnings, *require_warnings]: if expected in current_warning: break @@ -241,6 +245,28 @@ def _verify_clean_boot( ) assert not errors, message + if ignore_tracebacks is True: + return + # assert no unexpected Tracebacks + expected_traceback_count = 0 + traceback_count = int( + instance.execute( + "grep --count Traceback /var/log/cloud-init.log" + ).stdout.strip() + ) + if ignore_tracebacks: + for expected_traceback in ignore_tracebacks: + expected_traceback_count += int( + instance.execute( + f"grep --count '{expected_traceback}'" + " /var/log/cloud-init.log" + ).stdout.strip() + ) + assert expected_traceback_count == traceback_count, ( + f"{traceback_count - expected_traceback_count} unexpected traceback(s)" + " found in /var/log/cloud-init.log" + ) + def verify_clean_log(log: str, ignore_deprecations: bool = True): """Assert no unexpected tracebacks or warnings in logs""" From 75add5c72aa575d373825deddcb685f725e290d8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 19 Jun 2024 11:04:35 -0600 Subject: [PATCH 015/131] feat(systemd): convert warning level message to deprecation (#5209) Avoid using warning level messages as there may be some use-cases in the wild that need to invoke cloud-init boot stages after boot for some reason unknown to upstream. Provide a detailed warning message informing admins to file issues against cloud-init to better represent those feature needs before dropping this feature altogether. --- cloudinit/cmd/main.py | 31 +++++++++++++++++++------------ doc/rtd/reference/cli.rst | 18 +++++++++++------- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 317f3d0ff59..590173ae4fc 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -79,23 +79,30 @@ def print_exc(msg=""): sys.stderr.write("\n") +DEPRECATE_BOOT_STAGE_MESSAGE = ( + "Triggering cloud-init boot stages outside of intial system boot is not a" + " fully supported operation which can lead to incomplete or incorrect" + " configuration. As such, cloud-init is deprecating this feature in the" + " future. If you currently use cloud-init in this way," + " please file an issue describing in detail your use case so that" + " cloud-init can better support your needs:" + " https://github.com/canonical/cloud-init/issues/new" +) + + def log_ppid(distro, bootstage_name): if distro.is_linux: ppid = os.getppid() - log = LOG.info - extra_message = "" if 1 != ppid and distro.uses_systemd(): - log = LOG.warning - extra_message = ( - " Unsupported configuration: boot stage called " - "outside of systemd" + util.deprecate( + deprecated=( + "Unsupported configuration: boot stage called " + f"by PID [{ppid}] outside of systemd" + ), + deprecated_version="24.3", + extra_message=DEPRECATE_BOOT_STAGE_MESSAGE, ) - log( - "PID [%s] started cloud-init '%s'.%s", - ppid, - bootstage_name, - extra_message, - ) + LOG.info("PID [%s] started cloud-init '%s'.", ppid, bootstage_name) def welcome(action, msg=None): diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index 0a6bc55ff1f..9c0bbe9c3ee 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -212,9 +212,10 @@ Example output: Generally run by OS init systems to execute ``cloud-init``'s stages: *init* and *init-local*. See :ref:`boot_stages` for more info. -Can be run on the command line, but is generally gated to run only once -due to semaphores in :file:`/var/lib/cloud/instance/sem/` and -:file:`/var/lib/cloud/sem`. +Can be run on the command line, but is deprecated, because incomplete +configuration can be applied when run later in boot. The boot stages are +generally gated to run only once due to semaphores in +:file:`/var/lib/cloud/instance/sem/` and :file:`/var/lib/cloud/sem`. * :command:`--local`: Run *init-local* stage instead of *init*. * :command:`--file` : Use additional yaml configuration files. @@ -226,16 +227,19 @@ due to semaphores in :file:`/var/lib/cloud/instance/sem/` and Generally run by OS init systems to execute ``modules:config`` and ``modules:final`` boot stages. This executes cloud config :ref:`modules` -configured to run in the Init, Config and Final stages. The modules are -declared to run in various boot stages in the file +configured to run in the Init, Config and Final stages. Can be run on the +command line, but this is not recommended and will generate a warning because +incomplete configuration can be applied when run later in boot. +The modules are declared to run in various boot stages in the file :file:`/etc/cloud/cloud.cfg` under keys: * ``cloud_init_modules`` * ``cloud_config_modules`` * ``cloud_final_modules`` -Can be run on the command line, but each module is gated to run only once due -to semaphores in :file:`/var/lib/cloud/`. +Can be run on the command line, but is deprecated, because incomplete +configuration can be applied when run later in boot. Each module is gated to +run only once due to semaphores in :file:`/var/lib/cloud/`. * :command:`--mode [init|config|final]`: Run ``modules:init``, ``modules:config`` or ``modules:final`` ``cloud-init`` stages. From a911d07955300b6b07898627c077b13e0ade4e62 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 10 Jul 2024 16:04:25 -0600 Subject: [PATCH 016/131] fix(test): Fix ip printer for non-lxd (#5488) --- tests/integration_tests/instances.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 32281756cd1..cfae37c272c 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -294,6 +294,8 @@ def ip(self) -> str: and self.instance.execute_via_ssh ): self._ip = self.instance.ip + elif not isinstance(self.instance, LXDInstance): + self._ip = self.instance.ip except NotImplementedError: self._ip = "Unknown" return self._ip From 18d76ac60d96186a6f89fd8b0b3ace4c70bbd174 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 10 Jul 2024 16:38:58 -0600 Subject: [PATCH 017/131] tests: revert expectation of exit 2 from cloud-init init --local (#5504) Commit 604d80eb introduced assertions expecting exit 2 from the CLI when calling cloud-init init --local. Revert this test assertion as only cloud-init status command exits (2) on deprecations/warnings. Invoking cloud-init's boot stages on the commmand line will only exit 1 if critical errors are encountered to avoid degrading overall systemd health as seen from cloud-init systemd units. When cloud-init boot stages encounter recoverable_errors of any type, there is no need to exit non-zero as those deprecation logs are not-critical to the health of the system as a whole. --- .../datasources/test_ec2_ipv6.py | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/tests/integration_tests/datasources/test_ec2_ipv6.py b/tests/integration_tests/datasources/test_ec2_ipv6.py index 0f5a2dbf6bb..41bb852e627 100644 --- a/tests/integration_tests/datasources/test_ec2_ipv6.py +++ b/tests/integration_tests/datasources/test_ec2_ipv6.py @@ -2,22 +2,13 @@ import pytest -from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM -from tests.integration_tests.util import get_feature_flag_value - - -def _test_crawl(client: IntegrationInstance, ip: str): - return_code = ( - 2 - if should_log_deprecation( - "24.3", get_feature_flag_value(client, "DEPRECATION_INFO_BOUNDARY") - ) - else 0 - ) - assert client.execute("cloud-init clean --logs") - assert return_code == client.execute("cloud-init init --local").return_code + + +def _test_crawl(client, ip): + assert client.execute("cloud-init clean --logs").ok + assert client.execute("cloud-init init --local").ok log = client.read_from_file("/var/log/cloud-init.log") assert f"Using metadata source: '{ip}'" in log result = re.findall(r"Crawl of metadata service.* (\d+.\d+) seconds", log) From 8dbc5c23b68bf73551fd39c8a05801b86e38519d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 10 Jul 2024 17:52:25 -0500 Subject: [PATCH 018/131] test: Unconditionally skip test_multi_nic_hotplug_vpc (#5503) It is pretty consistently failing due to #5373 with no fix in sight. --- tests/integration_tests/modules/test_hotplug.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index 8c7bc7839d0..c088240de1a 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -301,6 +301,7 @@ def test_multi_nic_hotplug(setup_image, session_cloud: IntegrationCloud): @pytest.mark.skipif(CURRENT_RELEASE <= FOCAL, reason="See LP: #2055397") @pytest.mark.skipif(PLATFORM != "ec2", reason="test is ec2 specific") +@pytest.mark.skip(reason="IMDS race, see GH-5373. Unskip when fixed.") def test_multi_nic_hotplug_vpc(setup_image, session_cloud: IntegrationCloud): """Tests that additional secondary NICs are routable from local networks after the hotplug hook is executed when network updates From e0e6a427fdc6826bf7b11d52157a1c5f9b3dde4d Mon Sep 17 00:00:00 2001 From: Curt Moore Date: Thu, 11 Jul 2024 05:17:25 -0500 Subject: [PATCH 019/131] Fix configuration of DNS servers via OpenStack (#5384) Ensure DNS server addresses are parsed from the proper location of network_data.json Fixes #5386 Co-authored-by: Alberto Contreras --- cloudinit/sources/helpers/openstack.py | 29 +++- .../sources/helpers/test_openstack.py | 126 ++++++++++++++++++ 2 files changed, 153 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 70998dda2ee..9b46a22c37d 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -578,8 +578,8 @@ def convert_net_json(network_json=None, known_macs=None): "scope", "dns_nameservers", "dns_search", - "routes", ], + "routes": ["network", "destination", "netmask", "gateway", "metric"], } links = network_json.get("links", []) @@ -620,6 +620,20 @@ def convert_net_json(network_json=None, known_macs=None): (k, v) for k, v in network.items() if k in valid_keys["subnet"] ) + # Filter the route entries as they may contain extra elements such + # as DNS which are required elsewhere by the cloudinit schema + routes = [ + dict( + (k, v) + for k, v in route.items() + if k in valid_keys["routes"] + ) + for route in network.get("routes", []) + ] + + if routes: + subnet.update({"routes": routes}) + if network["type"] == "ipv4_dhcp": subnet.update({"type": "dhcp4"}) elif network["type"] == "ipv6_dhcp": @@ -646,11 +660,22 @@ def convert_net_json(network_json=None, known_macs=None): } ) + # Look for either subnet or network specific DNS servers + # and add them as subnet level DNS entries. + # Subnet specific nameservers dns_nameservers = [ service["address"] - for service in network.get("services", []) + for route in network.get("routes", []) + for service in route.get("services", []) if service.get("type") == "dns" ] + # Network specific nameservers + for service in network.get("services", []): + if service.get("type") != "dns": + continue + if service["address"] in dns_nameservers: + continue + dns_nameservers.append(service["address"]) if dns_nameservers: subnet["dns_nameservers"] = dns_nameservers diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py index 7ae164140a0..6ec0bd75b0d 100644 --- a/tests/unittests/sources/helpers/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -231,3 +231,129 @@ def test_bond_mac(self): assert expected == openstack.convert_net_json( network_json=network_json, known_macs=macs ) + + def test_dns_servers(self): + """ + Verify additional properties under subnet.routes are not rendered + """ + network_json = { + "links": [ + { + "id": "ens1f0np0", + "name": "ens1f0np0", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "mtu": 9000, + }, + { + "id": "ens1f1np1", + "name": "ens1f1np1", + "type": "phy", + "ethernet_mac_address": "xx:xx:xx:xx:xx:01", + "mtu": 9000, + }, + { + "id": "bond0", + "name": "bond0", + "type": "bond", + "bond_links": ["ens1f0np0", "ens1f1np1"], + "mtu": 9000, + "ethernet_mac_address": "xx:xx:xx:xx:xx:00", + "bond_mode": "802.3ad", + "bond_xmit_hash_policy": "layer3+4", + "bond_miimon": 100, + }, + { + "id": "bond0.123", + "name": "bond0.123", + "type": "vlan", + "vlan_link": "bond0", + "vlan_id": 123, + "vlan_mac_address": "xx:xx:xx:xx:xx:00", + }, + ], + "networks": [ + { + "id": "publicnet-ipv4", + "type": "ipv4", + "link": "bond0.123", + "ip_address": "x.x.x.x", + "netmask": "255.255.255.0", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "x.x.x.1", + "services": [ + {"type": "dns", "address": "1.1.1.1"}, + {"type": "dns", "address": "8.8.8.8"}, + ], + } + ], + "network_id": "00000000-0000-0000-0000-000000000000", + } + ], + "services": [], + } + expected = { + "version": 1, + "config": [ + { + "name": "ens1f0np0", + "type": "physical", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:00", + }, + { + "name": "ens1f1np1", + "type": "physical", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:01", + }, + { + "name": "bond0", + "type": "bond", + "mtu": 9000, + "subnets": [], + "mac_address": "xx:xx:xx:xx:xx:00", + "params": { + "bond-mode": "802.3ad", + "bond-xmit_hash_policy": "layer3+4", + "bond-miimon": 100, + }, + "bond_interfaces": ["ens1f0np0", "ens1f1np1"], + }, + { + "name": "bond0.123", + "type": "vlan", + "subnets": [ + { + "type": "static", + "netmask": "255.255.255.0", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "x.x.x.1", + } + ], + "address": "x.x.x.x", + "dns_nameservers": ["1.1.1.1", "8.8.8.8"], + "ipv4": True, + } + ], + "vlan_id": 123, + "vlan_link": "bond0", + }, + ], + } + macs = { + "xx:xx:xx:xx:xx:00": "ens1f0np0", + "xx:xx:xx:xx:xx:01": "ens1f1np1", + } + netcfg = openstack.convert_net_json( + network_json=network_json, known_macs=macs + ) + assert expected == netcfg From 311f7234765dec7c0f1ede7ecc88b303160bc892 Mon Sep 17 00:00:00 2001 From: Curt Moore Date: Thu, 18 Jul 2024 01:04:53 -0500 Subject: [PATCH 020/131] fix: Update DNS behavior for NetworkManager interfaces (#5496) If DNS information is added to a NetworkManager managed interface where the given protocol family is disabled, NetworkManager will be unable to activate the interface. #5387 --- cloudinit/net/network_manager.py | 10 +- tests/unittests/net/test_network_manager.py | 323 ++++++++++++++++++++ 2 files changed, 331 insertions(+), 2 deletions(-) create mode 100644 tests/unittests/net/test_network_manager.py diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index 41dcd24b27f..06305668fe4 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -239,7 +239,10 @@ def _add_nameserver(self, dns: str) -> None: Extends the ipv[46].dns property with a name server. """ family = "ipv6" if is_ipv6_address(dns) else "ipv4" - if self.config.has_section(family): + if ( + self.config.has_section(family) + and self._get_config_option(family, "method") != "disabled" + ): self._set_default(family, "dns", "") self.config[family]["dns"] = self.config[family]["dns"] + dns + ";" @@ -248,7 +251,10 @@ def _add_dns_search(self, dns_search: List[str]) -> None: Extends the ipv[46].dns-search property with a name server. """ for family in ["ipv4", "ipv6"]: - if self.config.has_section(family): + if ( + self.config.has_section(family) + and self._get_config_option(family, "method") != "disabled" + ): self._set_default(family, "dns-search", "") self.config[family]["dns-search"] = ( self.config[family]["dns-search"] diff --git a/tests/unittests/net/test_network_manager.py b/tests/unittests/net/test_network_manager.py new file mode 100644 index 00000000000..2aa476d7d15 --- /dev/null +++ b/tests/unittests/net/test_network_manager.py @@ -0,0 +1,323 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import textwrap +from unittest import mock + +import yaml + +from cloudinit.net import network_manager, network_state +from tests.unittests.helpers import dir2dict + + +def assert_equal_dict(expected_d, found_d): + for p, c in expected_d.items(): + if p not in found_d: + continue + assert c == found_d[p] + + +class TestNetworkManagerRenderNetworkState: + def _parse_network_state_from_config(self, config): + with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"): + config = yaml.safe_load(config) + return network_state.parse_net_config_data(config) + + def test_bond_dns_baseline(self, tmpdir): + + config = textwrap.dedent( + """\ + version: 1 + config: + - mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: ens1f0np0 + subnets: [] + type: physical + - mac_address: 'xx:xx:xx:xx:xx:01' + mtu: 9000 + name: ens1f1np1 + subnets: [] + type: physical + - bond_interfaces: + - ens1f0np0 + - ens1f1np1 + mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: bond0 + params: + bond-miimon: 100 + bond-mode: 802.3ad + bond-xmit_hash_policy: layer3+4 + subnets: [] + type: bond + - name: bond0.123 + subnets: + - address: 0.0.0.0 + ipv4: true + netmask: 255.255.255.0 + prefix: 24 + routes: + - gateway: 0.0.0.1 + netmask: 0.0.0.0 + network: 0.0.0.0 + type: static + type: vlan + vlan_id: 123 + vlan_link: bond0 + - address: 1.1.1.1 + search: hostname1 + type: nameserver + """ + ) + + expected_config = { + "/etc/NetworkManager/system-connections/cloud-init-ens1f0np0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f0np0 + uuid=99c4bf6c-1691-53c4-bfe8-abdcb90b278a + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:00 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-ens1f1np1.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f1np1 + uuid=2685ec2b-1c26-583d-a660-0ab24201fef3 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:01 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=802.3ad + + [ethernet] + mtu=9000 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.123.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.123 + uuid=7541e7a5-450b-570b-b3e8-a7f9eced114a + autoconnect-priority=120 + type=vlan + interface-name=bond0.123 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=123 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv4] + method=manual + may-fail=false + address1=0.0.0.0/24 + route1=0.0.0.0/0,0.0.0.1 + dns=1.1.1.1; + dns-search=hostname1; + + """ + ), + } + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config(config) + target = str(tmpdir) + network_manager.Renderer().render_network_state(ns, target=target) + rendered_content = dir2dict(target) + assert_equal_dict(expected_config, rendered_content) + + def test_bond_dns_redacted_with_method_disabled(self, tmpdir): + + config = textwrap.dedent( + """\ + version: 1 + config: + - mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: ens1f0np0 + subnets: [] + type: physical + - mac_address: 'xx:xx:xx:xx:xx:01' + mtu: 9000 + name: ens1f1np1 + subnets: [] + type: physical + - bond_interfaces: + - ens1f0np0 + - ens1f1np1 + mac_address: 'xx:xx:xx:xx:xx:00' + mtu: 9000 + name: bond0 + params: + bond-miimon: 100 + bond-mode: 802.3ad + bond-xmit_hash_policy: layer3+4 + subnets: [] + type: bond + - name: bond0.123 + subnets: + - address: 0.0.0.0 + ipv4: true + netmask: 255.255.255.0 + prefix: 24 + routes: + - gateway: 0.0.0.1 + netmask: 0.0.0.0 + network: 0.0.0.0 + type: ipv6_slaac # !! to force ipvx.method to be disabled + type: vlan + vlan_id: 123 + vlan_link: bond0 + - address: 1.1.1.1 + search: hostname1 + type: nameserver + """ + ) + + expected_config = { + "/etc/NetworkManager/system-connections/cloud-init-ens1f0np0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f0np0 + uuid=99c4bf6c-1691-53c4-bfe8-abdcb90b278a + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:00 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-ens1f1np1.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init ens1f1np1 + uuid=2685ec2b-1c26-583d-a660-0ab24201fef3 + autoconnect-priority=120 + type=ethernet + slave-type=bond + master=54317911-f840-516b-a10d-82cb4c1f075c + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [ethernet] + mtu=9000 + mac-address=XX:XX:XX:XX:XX:01 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0 + uuid=54317911-f840-516b-a10d-82cb4c1f075c + autoconnect-priority=120 + type=bond + interface-name=bond0 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [bond] + mode=802.3ad + + [ethernet] + mtu=9000 + + """ + ), + "/etc/NetworkManager/system-connections/cloud-init-bond0.123.nmconnection": textwrap.dedent( # noqa: E501 + """\ + # Generated by cloud-init. Changes will be lost. + + [connection] + id=cloud-init bond0.123 + uuid=7541e7a5-450b-570b-b3e8-a7f9eced114a + autoconnect-priority=120 + type=vlan + interface-name=bond0.123 + + [user] + org.freedesktop.NetworkManager.origin=cloud-init + + [vlan] + id=123 + parent=54317911-f840-516b-a10d-82cb4c1f075c + + [ipv6] + method=auto + may-fail=false + address1=0.0.0.0/24 + dns-search=hostname1; + + [ipv4] + method=disabled + route1=0.0.0.0/0,0.0.0.1 + + """ + ), + } + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config(config) + target = str(tmpdir) + network_manager.Renderer().render_network_state(ns, target=target) + rendered_content = dir2dict(target) + assert_equal_dict(expected_config, rendered_content) From 658d1841f7ed6b2397d2d9328c2d143150c8b9f8 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 18 Jul 2024 00:38:45 -0600 Subject: [PATCH 021/131] doc(OFV): Document how to configure cloud-init (#5519) --- doc/rtd/reference/datasources/ovf.rst | 28 ++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/doc/rtd/reference/datasources/ovf.rst b/doc/rtd/reference/datasources/ovf.rst index a233df13a78..0ee33d0b821 100644 --- a/doc/rtd/reference/datasources/ovf.rst +++ b/doc/rtd/reference/datasources/ovf.rst @@ -3,9 +3,35 @@ OVF *** -The OVF datasource provides a datasource for reading data from an +The OVF datasource provides a generic datasource for reading data from an `Open Virtualization Format`_ ISO transport. +What platforms support OVF +-------------------------- + +OFV is an open standard which is supported by various virtualization +platforms, including (but not limited to): + +GCP +OpenShift +Proxmox +vSphere +VirtualBox +Xen + +While these (and many more) platforms support OVF, in some cases cloud-init +has alternative datasources which provide better platform integration. +Make sure to check whether another datasource is exists which is specific to +your platform of choice before trying to use OVF. + +Configuration +------------- + +Cloud-init gets configurations from an OVF XML file. User-data and network +configuration are provided by properties in the XML which contain key / value +pairs. The user-data is provided by a key named ``user-data``, and network +configuration is provided by a key named ``network-config``. + Graceful rpctool fallback ------------------------- From 0b4084374440d2a5a9968129e0460a1a009d9830 Mon Sep 17 00:00:00 2001 From: Ani Sinha Date: Thu, 18 Jul 2024 13:36:39 +0530 Subject: [PATCH 022/131] Support setting mirrorlist in yum repository config (#5522) 'mirrorlist' config can be specified instead or along with 'baseurl' in the yum repository config. Add support for specifying mirrorlist instead of 'baseurl'. Fixes GH-5520 Signed-off-by: Ani Sinha --- cloudinit/config/cc_yum_add_repo.py | 2 +- .../schemas/schema-cloud-config-v1.json | 10 +++++ doc/examples/cloud-config-yum-repo.txt | 3 +- .../unittests/config/test_cc_yum_add_repo.py | 40 ++++++++++++++++++- 4 files changed, 52 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 9a717af3d1a..548c83bab6d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -141,7 +141,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: n_repo_config[k] = v repo_config = n_repo_config missing_required = 0 - req_fields = ["baseurl", "metalink"] + req_fields = ["baseurl", "metalink", "mirrorlist"] for req_field in req_fields: if req_field not in repo_config: missing_required += 1 diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index e45504d4113..253e90b8c55 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -3457,6 +3457,11 @@ "format": "uri", "description": "Specifies a URL to a metalink file for the repomd.xml" }, + "mirrorlist": { + "type": "string", + "format": "uri", + "description": "Specifies a URL to a file containing a baseurls list" + }, "name": { "type": "string", "description": "Optional human-readable name of the yum repo." @@ -3494,6 +3499,11 @@ "required": [ "metalink" ] + }, + { + "required": [ + "mirrorlist" + ] } ] } diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt index 6a4037e2462..cee26677b49 100644 --- a/doc/examples/cloud-config-yum-repo.txt +++ b/doc/examples/cloud-config-yum-repo.txt @@ -11,9 +11,10 @@ yum_repos: # Any repository configuration options # See: man yum.conf # - # At least one of 'baseurl' or 'metalink' is required! + # At least one of 'baseurl' or 'metalink' or 'mirrorlist' is required! baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir + mirrorlist: https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever& enabled: false failovermethod: priority gpgcheck: true diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py index e6a9109ee19..c77262f508f 100644 --- a/tests/unittests/config/test_cc_yum_add_repo.py +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -31,7 +31,8 @@ def test_bad_config(self): "yum_repos": { "epel-testing": { "name": "Extra Packages for Enterprise Linux 5 - Testing", - # At least one of baseurl or metalink must be present. + # At least one of baseurl or metalink or mirrorlist + # must be present. # Missing this should cause the repo not to be written # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', "enabled": False, @@ -84,6 +85,43 @@ def test_metalink_config(self): for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) + def test_mirrorlist_config(self): + cfg = { + "yum_repos": { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "mirrorlist": "http://mirrors.blah.org/metalink?repo=rhel-$releasever", + "enabled": False, + "gpgcheck": True, + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "failovermethod": "priority", + }, + }, + } + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + cc_yum_add_repo.handle("yum_add_repo", cfg, None, []) + contents = util.load_text_file("/etc/yum.repos.d/epel-testing.repo") + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "failovermethod": "priority", + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "enabled": "0", + "mirrorlist": "http://mirrors.blah.org/metalink?repo=rhel-$releasever", + "gpgcheck": "1", + } + } + for section in expected: + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + def test_write_config(self): cfg = { "yum_repos": { From 550c685c98551f65c30832b186fe091721b48477 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 18 Jul 2024 09:04:54 -0400 Subject: [PATCH 023/131] fix: Clean cache if no datasource fallback (#5499) 9929a00 added the ability to used a cached datasource when none is found. This was supposed to be per-datasource, but the lack of cache cleaning got applied universally. This commit makes it so cache will be cleaned as it was before if fallback isn't implemented in datasource. Fixes GH-5486 --- cloudinit/stages.py | 1 + .../assets/DataSourceNoCacheNetworkOnly.py | 23 ++++ .../assets/DataSourceNoCacheWithFallback.py | 29 +++++ .../datasources/test_caching.py | 115 ++++++++++++++++++ tests/integration_tests/instances.py | 4 +- 5 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py create mode 100644 tests/integration_tests/assets/DataSourceNoCacheWithFallback.py create mode 100644 tests/integration_tests/datasources/test_caching.py diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 52876e72434..872905e39d1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -406,6 +406,7 @@ def _get_data_source(self, existing) -> sources.DataSource: ds, ) else: + util.del_file(self.paths.instance_link) raise e self.datasource = ds # Ensure we adjust our path members datasource diff --git a/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py b/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py new file mode 100644 index 00000000000..54a7bab3437 --- /dev/null +++ b/tests/integration_tests/assets/DataSourceNoCacheNetworkOnly.py @@ -0,0 +1,23 @@ +import logging + +from cloudinit import sources + +LOG = logging.getLogger(__name__) + + +class DataSourceNoCacheNetworkOnly(sources.DataSource): + def _get_data(self): + LOG.debug("TEST _get_data called") + return True + + +datasources = [ + ( + DataSourceNoCacheNetworkOnly, + (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py b/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py new file mode 100644 index 00000000000..fdfc473f8a5 --- /dev/null +++ b/tests/integration_tests/assets/DataSourceNoCacheWithFallback.py @@ -0,0 +1,29 @@ +import logging +import os + +from cloudinit import sources + +LOG = logging.getLogger(__name__) + + +class DataSourceNoCacheWithFallback(sources.DataSource): + def _get_data(self): + if os.path.exists("/ci-test-firstboot"): + LOG.debug("TEST _get_data called") + return True + return False + + def check_if_fallback_is_allowed(self): + return True + + +datasources = [ + ( + DataSourceNoCacheWithFallback, + (sources.DEP_FILESYSTEM,), + ), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/tests/integration_tests/datasources/test_caching.py b/tests/integration_tests/datasources/test_caching.py new file mode 100644 index 00000000000..33e4b671c28 --- /dev/null +++ b/tests/integration_tests/datasources/test_caching.py @@ -0,0 +1,115 @@ +import pytest + +from tests.integration_tests import releases, util +from tests.integration_tests.instances import IntegrationInstance + + +def setup_custom_datasource(client: IntegrationInstance, datasource_name: str): + client.write_to_file( + "/etc/cloud/cloud.cfg.d/99-imds.cfg", + f"datasource_list: [ {datasource_name}, None ]\n" + "datasource_pkg_list: [ cisources ]", + ) + assert client.execute( + "mkdir -p /usr/lib/python3/dist-packages/cisources" + ) + client.push_file( + util.ASSETS_DIR / f"DataSource{datasource_name}.py", + "/usr/lib/python3/dist-packages/cisources/" + f"DataSource{datasource_name}.py", + ) + + +def verify_no_cache_boot(client: IntegrationInstance): + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "No local datasource found", + "running 'init'", + "no cache found", + "Detected platform", + "TEST _get_data called", + ], + text=log, + ) + util.verify_clean_boot(client) + + +@pytest.mark.skipif( + not releases.IS_UBUNTU, + reason="hardcoded dist-packages directory", +) +def test_no_cache_network_only(client: IntegrationInstance): + """Test cache removal per boot. GH-5486 + + This tests the CloudStack password reset use case. The expectation is: + - Metadata is fetched in network timeframe only + - Because `check_instance_id` is not defined, no cached datasource + is found in the init-local phase, but the cache is used in the + remaining phases due to existance of /run/cloud-init/.instance-id + - Because `check_if_fallback_is_allowed` is not defined, cloud-init + does NOT fall back to the pickled datasource, and will + instead delete the cache during the init-local phase + - Metadata is therefore fetched every boot in the network phase + """ + setup_custom_datasource(client, "NoCacheNetworkOnly") + + # Run cloud-init as if first boot + assert client.execute("cloud-init clean --logs") + client.restart() + + verify_no_cache_boot(client) + + # Clear the log without clean and run cloud-init for subsequent boot + assert client.execute("echo '' > /var/log/cloud-init.log") + client.restart() + + verify_no_cache_boot(client) + + +@pytest.mark.skipif( + not releases.IS_UBUNTU, + reason="hardcoded dist-packages directory", +) +def test_no_cache_with_fallback(client: IntegrationInstance): + """Test we use fallback when defined and no cache available.""" + setup_custom_datasource(client, "NoCacheWithFallback") + + # Run cloud-init as if first boot + assert client.execute("cloud-init clean --logs") + # Used by custom datasource + client.execute("touch /ci-test-firstboot") + client.restart() + + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "no cache found", + "Detected platform", + "TEST _get_data called", + "running 'init'", + "restored from cache with run check", + "running 'modules:config'", + ], + text=log, + ) + util.verify_clean_boot(client) + + # Clear the log without clean and run cloud-init for subsequent boot + assert client.execute("echo '' > /var/log/cloud-init.log") + client.execute("rm /ci-test-firstboot") + client.restart() + + log = client.read_from_file("/var/log/cloud-init.log") + util.verify_ordered_items_in_text( + [ + "cache invalid in datasource", + "Detected platform", + "Restored fallback datasource from checked cache", + "running 'init'", + "restored from cache with run check", + "running 'modules:config'", + ], + text=log, + ) + util.verify_clean_boot(client) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index cfae37c272c..1c8344ab916 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -106,7 +106,9 @@ def push_file( # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() self.instance.push_file(str(local_path), tmp_path) - assert self.execute("mv {} {}".format(tmp_path, str(remote_path))).ok + assert self.execute( + "mv {} {}".format(tmp_path, str(remote_path)) + ), f"Failed to push {tmp_path} to {remote_path}" def read_from_file(self, remote_path) -> str: result = self.execute("cat {}".format(remote_path)) From 57d130eeb75f4652cd92d5605949cdc334113239 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 18 Jul 2024 14:01:25 -0600 Subject: [PATCH 024/131] chore(formatting): fix squashed commit test formatting (#5524) --- tests/integration_tests/datasources/test_caching.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration_tests/datasources/test_caching.py b/tests/integration_tests/datasources/test_caching.py index 33e4b671c28..f043ddbe779 100644 --- a/tests/integration_tests/datasources/test_caching.py +++ b/tests/integration_tests/datasources/test_caching.py @@ -10,9 +10,7 @@ def setup_custom_datasource(client: IntegrationInstance, datasource_name: str): f"datasource_list: [ {datasource_name}, None ]\n" "datasource_pkg_list: [ cisources ]", ) - assert client.execute( - "mkdir -p /usr/lib/python3/dist-packages/cisources" - ) + assert client.execute("mkdir -p /usr/lib/python3/dist-packages/cisources") client.push_file( util.ASSETS_DIR / f"DataSource{datasource_name}.py", "/usr/lib/python3/dist-packages/cisources/" From b0a673a53dd7ea554a9fe197dc4066fec21c53c4 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 19 Jun 2024 14:41:28 -0500 Subject: [PATCH 025/131] feat: Add trace-level logger (#5414) This is useful for logs we want hidden by default but can be turned on via configuration. --- cloudinit/log.py | 37 ++++++++++++++++++++++++++++++------- tests/unittests/test_log.py | 12 +++++++++++- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/cloudinit/log.py b/cloudinit/log.py index 08d0efa3001..61b96262aa1 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -23,6 +23,23 @@ DEFAULT_LOG_FORMAT = "%(asctime)s - %(filename)s[%(levelname)s]: %(message)s" DEPRECATED = 35 +TRACE = logging.DEBUG - 5 + + +class CustomLoggerType(logging.Logger): + """A hack to get mypy to stop complaining about custom logging methods. + + When using deprecated or trace logging, rather than: + LOG = logging.getLogger(__name__) + Instead do: + LOG = cast(CustomLoggerType, logging.getLogger(__name__)) + """ + + def trace(self, *args, **kwargs): + pass + + def deprecated(self, *args, **kwargs): + pass def setup_basic_logging(level=logging.DEBUG, formatter=None): @@ -45,14 +62,20 @@ def flush_loggers(root): flush_loggers(root.parent) -def define_deprecation_logger(lvl=DEPRECATED): - logging.addLevelName(lvl, "DEPRECATED") +def define_extra_loggers() -> None: + """Add DEPRECATED and TRACE log levels to the logging module.""" + + def new_logger(level): + def log_at_level(self, message, *args, **kwargs): + if self.isEnabledFor(level): + self._log(level, message, args, **kwargs) - def deprecated(self, message, *args, **kwargs): - if self.isEnabledFor(lvl): - self._log(lvl, message, args, **kwargs) + return log_at_level - logging.Logger.deprecated = deprecated + logging.addLevelName(DEPRECATED, "DEPRECATED") + logging.addLevelName(TRACE, "TRACE") + setattr(logging.Logger, "deprecated", new_logger(DEPRECATED)) + setattr(logging.Logger, "trace", new_logger(TRACE)) def setup_logging(cfg=None): @@ -183,7 +206,7 @@ def configure_root_logger(): # Always format logging timestamps as UTC time logging.Formatter.converter = time.gmtime - define_deprecation_logger() + define_extra_loggers() setup_backup_logging() reset_logging() diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index 87996310349..175afc0eb94 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -6,6 +6,7 @@ import io import logging import time +from typing import cast import pytest @@ -63,10 +64,18 @@ def test_logger_uses_gmtime(self): class TestDeprecatedLogs: def test_deprecated_log_level(self, caplog): - logging.getLogger().deprecated("deprecated message") + logger = cast(log.CustomLoggerType, logging.getLogger()) + logger.deprecated("deprecated message") assert "DEPRECATED" == caplog.records[0].levelname assert "deprecated message" in caplog.text + def test_trace_log_level(self, caplog): + logger = cast(log.CustomLoggerType, logging.getLogger()) + logger.setLevel(logging.NOTSET) + logger.trace("trace message") + assert "TRACE" == caplog.records[0].levelname + assert "trace message" in caplog.text + @pytest.mark.parametrize( "expected_log_level, deprecation_info_boundary", ( @@ -115,6 +124,7 @@ def test_deprecate_log_level_based_on_features( ) def test_log_deduplication(self, caplog): + log.define_extra_loggers() util.deprecate( deprecated="stuff", deprecated_version="19.1", From 8ec2f64ad69439580ed732e6436b6a9e420e00fe Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 19 Jun 2024 15:04:34 -0500 Subject: [PATCH 026/131] refactor: replace verbosity with log levels in logs.py (#5414) --- cloudinit/cmd/devel/logs.py | 116 +++++++++++++------------ tests/unittests/cmd/devel/test_logs.py | 27 ++---- 2 files changed, 66 insertions(+), 77 deletions(-) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index a1e4eb8dfad..d3600d2ef05 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -7,14 +7,17 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse +import logging import os +import pathlib import shutil import subprocess import sys from datetime import datetime, timezone from pathlib import Path -from typing import NamedTuple, Optional +from typing import NamedTuple, Optional, cast +from cloudinit import log from cloudinit.cmd.devel import read_cfg_paths from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, subp @@ -27,6 +30,8 @@ write_file, ) +LOG = cast(log.CustomLoggerType, logging.getLogger(__name__)) + class LogPaths(NamedTuple): userdata_raw: str @@ -152,111 +157,103 @@ def _get_copytree_ignore_files(paths: LogPaths): return ignored_files -def _write_command_output_to_file(cmd, filename, msg, verbosity): +def _write_command_output_to_file(cmd, filename, msg): """Helper which runs a command and writes output or error to filename.""" ensure_dir(os.path.dirname(filename)) try: output = subp(cmd).stdout except ProcessExecutionError as e: write_file(filename, str(e)) - _debug("collecting %s failed.\n" % msg, 1, verbosity) + LOG.debug("collecting %s failed.", msg) else: write_file(filename, output) - _debug("collected %s\n" % msg, 1, verbosity) + LOG.debug("collected %s", msg) return output -def _stream_command_output_to_file(cmd, filename, msg, verbosity): - """Helper which runs a command and writes output or error to filename.""" +def _stream_command_output_to_file(cmd, filename, msg): + """Helper which runs a command and writes output or error to filename. + + `subprocess.call` is invoked directly here to stream output to the file. + Otherwise memory usage can be high for large outputs. + """ ensure_dir(os.path.dirname(filename)) try: with open(filename, "w") as f: subprocess.call(cmd, stdout=f, stderr=f) # nosec B603 except OSError as e: write_file(filename, str(e)) - _debug("collecting %s failed.\n" % msg, 1, verbosity) + LOG.debug("collecting %s failed.", msg) else: - _debug("collected %s\n" % msg, 1, verbosity) - - -def _debug(msg, level, verbosity): - if level <= verbosity: - sys.stderr.write(msg) + LOG.debug("collected %s", msg) -def _collect_file(path, out_dir, verbosity): +def _collect_file(path: str, out_dir: str) -> None: if os.path.isfile(path): copy(path, out_dir) - _debug("collected file: %s\n" % path, 1, verbosity) + LOG.debug("collected file: %s", path) else: - _debug("file %s did not exist\n" % path, 2, verbosity) + LOG.trace("file %s did not exist", path) -def _collect_installer_logs( - log_dir: str, include_userdata: bool, verbosity: int -): +def _collect_installer_logs(log_dir: str, include_userdata: bool) -> None: """Obtain subiquity logs and config files.""" for src_file in INSTALLER_APPORT_FILES: destination_dir = Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir), verbosity) + _collect_file(src_file.path, str(destination_dir)) if include_userdata: for src_file in INSTALLER_APPORT_SENSITIVE_FILES: destination_dir = Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir), verbosity) + _collect_file(src_file.path, str(destination_dir)) -def _collect_version_info(log_dir: str, verbosity: int): +def _collect_version_info(log_dir: str) -> None: version = _write_command_output_to_file( cmd=["cloud-init", "--version"], filename=os.path.join(log_dir, "version"), msg="cloud-init --version", - verbosity=verbosity, ) dpkg_ver = _write_command_output_to_file( cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], filename=os.path.join(log_dir, "dpkg-version"), msg="dpkg version", - verbosity=verbosity, ) if not version: version = dpkg_ver if dpkg_ver else "not-available" - _debug("collected cloud-init version: %s\n" % version, 1, verbosity) + LOG.debug("collected cloud-init version: %s", version) -def _collect_system_logs(log_dir: str, verbosity: int): +def _collect_system_logs(log_dir: str) -> None: _stream_command_output_to_file( cmd=["dmesg"], filename=os.path.join(log_dir, "dmesg.txt"), msg="dmesg output", - verbosity=verbosity, ) _stream_command_output_to_file( cmd=["journalctl", "--boot=0", "-o", "short-precise"], filename=os.path.join(log_dir, "journal.txt"), msg="systemd journal of current boot", - verbosity=verbosity, ) def _collect_cloudinit_logs( log_dir: str, - verbosity: int, init: Init, paths: LogPaths, include_userdata: bool, -): - for log in get_config_logfiles(init.cfg): - _collect_file(log, log_dir, verbosity) +) -> None: + for logfile in get_config_logfiles(init.cfg): + _collect_file(logfile, log_dir) if include_userdata: user_data_file = paths.userdata_raw - _collect_file(user_data_file, log_dir, verbosity) + _collect_file(user_data_file, log_dir) -def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): +def _collect_run_dir(log_dir: str, paths: LogPaths) -> None: run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(paths.run_dir): @@ -267,15 +264,10 @@ def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): ignore=lambda _, __: _get_copytree_ignore_files(paths), ) except shutil.Error as e: - sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + "\n") - _debug("collected dir %s\n" % paths.run_dir, 1, verbosity) + LOG.warning("Failed collecting file(s) due to error: %s", e) + LOG.debug("collected directory: %s", paths.run_dir) else: - _debug( - "directory '%s' did not exist\n" % paths.run_dir, - 1, - verbosity, - ) + LOG.debug("directory '%s' did not exist", paths.run_dir) if os.path.exists(os.path.join(paths.run_dir, "disabled")): # Fallback to grab previous cloud/data cloud_data_dir = Path(paths.cloud_data) @@ -286,9 +278,7 @@ def _collect_run_dir(log_dir: str, verbosity: int, paths: LogPaths): ) -def collect_logs( - tarfile: str, include_userdata: bool, verbosity: int = 0 -) -> int: +def collect_logs(tarfile: str, include_userdata: bool) -> int: """Collect all cloud-init logs and tar them up into the provided tarfile. @param tarfile: The path of the tar-gzipped file to create. @@ -296,9 +286,9 @@ def collect_logs( @return: 0 on success, 1 on failure. """ if include_userdata and os.getuid() != 0: - sys.stderr.write( - "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n" + LOG.error( + "To include userdata, root user is required. " + "Try sudo cloud-init collect-logs" ) return 1 @@ -312,25 +302,37 @@ def collect_logs( init.read_cfg() paths = get_log_paths(init) - _collect_version_info(log_dir, verbosity) - _collect_system_logs(log_dir, verbosity) - _collect_cloudinit_logs( - log_dir, verbosity, init, paths, include_userdata - ) - _collect_installer_logs(log_dir, include_userdata, verbosity) - _collect_run_dir(log_dir, verbosity, paths) + _collect_version_info(log_dir) + _collect_system_logs(log_dir) + _collect_cloudinit_logs(log_dir, init, paths, include_userdata) + _collect_installer_logs(log_dir, include_userdata) + _collect_run_dir(log_dir, paths) with chdir(tmp_dir): subp(["tar", "czvf", tarfile, log_dir.replace(f"{tmp_dir}/", "")]) - sys.stderr.write("Wrote %s\n" % tarfile) + LOG.info("Wrote %s", tarfile) return 0 +def _setup_logger(verbosity: int) -> None: + log.reset_logging() + if verbosity == 0: + level = logging.INFO + elif verbosity == 1: + level = logging.DEBUG + else: + level = log.TRACE + LOG.setLevel(level) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + LOG.addHandler(handler) + + def handle_collect_logs_args(name, args): """Handle calls to 'cloud-init collect-logs' as a subcommand.""" + _setup_logger(args.verbosity) return collect_logs( tarfile=args.tarfile, include_userdata=args.userdata, - verbosity=args.verbosity, ) diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index 7dfdfac6edc..f8385a4c3ef 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -4,7 +4,6 @@ import os import re from datetime import datetime -from io import StringIO import pytest @@ -21,22 +20,19 @@ @mock.patch("cloudinit.cmd.devel.logs.os.getuid") class TestCollectLogs: def test_collect_logs_with_userdata_requires_root_user( - self, m_getuid, tmpdir + self, m_getuid, tmpdir, caplog ): """collect-logs errors when non-root user collects userdata .""" m_getuid.return_value = 100 # non-root output_tarfile = tmpdir.join("logs.tgz") - with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: - assert 1 == logs.collect_logs( - output_tarfile, include_userdata=True - ) + assert 1 == logs.collect_logs(output_tarfile, include_userdata=True) assert ( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n" == m_stderr.getvalue() + " Try sudo cloud-init collect-logs" in caplog.text ) def test_collect_logs_creates_tarfile( - self, m_getuid, m_log_paths, mocker, tmpdir + self, m_getuid, m_log_paths, mocker, tmpdir, caplog ): """collect-logs creates a tarfile with all related cloud-init info.""" m_getuid.return_value = 100 @@ -101,13 +97,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): ) stdout.write(expected_subp[cmd_tuple]) - fake_stderr = mock.MagicMock() - mocker.patch(M_PATH + "subp", side_effect=fake_subp) mocker.patch( M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) - mocker.patch(M_PATH + "sys.stderr", fake_stderr) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) logs.collect_logs(output_tarfile, include_userdata=False) @@ -151,10 +144,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): assert "results" == load_text_file( os.path.join(out_logdir, "run", "cloud-init", "results.json") ) - fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) + assert f"Wrote {output_tarfile}" in caplog.text def test_collect_logs_includes_optional_userdata( - self, m_getuid, mocker, tmpdir, m_log_paths + self, m_getuid, mocker, tmpdir, m_log_paths, caplog ): """collect-logs include userdata when --include-userdata is set.""" m_getuid.return_value = 0 @@ -215,13 +208,10 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): ) stdout.write(expected_subp[cmd_tuple]) - fake_stderr = mock.MagicMock() - mocker.patch(M_PATH + "subp", side_effect=fake_subp) mocker.patch( M_PATH + "subprocess.call", side_effect=fake_subprocess_call ) - mocker.patch(M_PATH + "sys.stderr", fake_stderr) mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) logs.collect_logs(output_tarfile, include_userdata=True) @@ -239,7 +229,7 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): m_log_paths.instance_data_sensitive.name, ) ) - fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) + assert f"Wrote {output_tarfile}" in caplog.text @pytest.mark.parametrize( "cmd, expected_file_contents, expected_return_value", @@ -278,7 +268,6 @@ def test_write_command_output_to_file( filename=output_file, cmd=cmd, msg="", - verbosity=1, ) assert expected_return_value == return_output @@ -301,7 +290,6 @@ def test_stream_command_output_to_file( filename=output_file, cmd=cmd, msg="", - verbosity=1, ) assert expected_file_contents == load_text_file(output_file) @@ -382,7 +370,6 @@ def test_include_installer_logs_when_present( logs._collect_installer_logs( log_dir=tmpdir.strpath, include_userdata=include_userdata, - verbosity=0, ) expect_userdata = bool(include_userdata and apport_sensitive_files) # when subiquity artifacts exist, and userdata set true, expect logs From 19c86ffb45ee3b1aa0034a447fa42028f0945da9 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 19 Jun 2024 15:17:39 -0500 Subject: [PATCH 027/131] refactor: logs.py pathlib changes (#5414) Switch to pathlib where appropriate and call consistently --- cloudinit/cmd/devel/logs.py | 48 +++++++++++++++----------- tests/unittests/cmd/devel/test_logs.py | 12 +++---- 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index d3600d2ef05..a6b61fcb720 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -14,8 +14,7 @@ import subprocess import sys from datetime import datetime, timezone -from pathlib import Path -from typing import NamedTuple, Optional, cast +from typing import List, NamedTuple, Optional, cast from cloudinit import log from cloudinit.cmd.devel import read_cfg_paths @@ -157,35 +156,42 @@ def _get_copytree_ignore_files(paths: LogPaths): return ignored_files -def _write_command_output_to_file(cmd, filename, msg): +def _write_command_output_to_file( + cmd: List[str], + file_path: pathlib.Path, + msg: str, +) -> Optional[str]: """Helper which runs a command and writes output or error to filename.""" - ensure_dir(os.path.dirname(filename)) + file_path.parent.mkdir(parents=True, exist_ok=True) try: output = subp(cmd).stdout except ProcessExecutionError as e: - write_file(filename, str(e)) + write_file(file_path, str(e)) LOG.debug("collecting %s failed.", msg) + output = None else: - write_file(filename, output) - LOG.debug("collected %s", msg) - return output + write_file(file_path, output) + LOG.debug("collected %s to file '%s'", msg, file_path.stem) + return output -def _stream_command_output_to_file(cmd, filename, msg): +def _stream_command_output_to_file( + cmd: List[str], file_path: pathlib.Path, msg: str +) -> None: """Helper which runs a command and writes output or error to filename. `subprocess.call` is invoked directly here to stream output to the file. Otherwise memory usage can be high for large outputs. """ - ensure_dir(os.path.dirname(filename)) + file_path.parent.mkdir(parents=True, exist_ok=True) try: - with open(filename, "w") as f: + with file_path.open("w") as f: subprocess.call(cmd, stdout=f, stderr=f) # nosec B603 except OSError as e: - write_file(filename, str(e)) + write_file(file_path, str(e)) LOG.debug("collecting %s failed.", msg) else: - LOG.debug("collected %s", msg) + LOG.debug("collected %s to file '%s'", msg, file_path.stem) def _collect_file(path: str, out_dir: str) -> None: @@ -199,13 +205,13 @@ def _collect_file(path: str, out_dir: str) -> None: def _collect_installer_logs(log_dir: str, include_userdata: bool) -> None: """Obtain subiquity logs and config files.""" for src_file in INSTALLER_APPORT_FILES: - destination_dir = Path(log_dir + src_file.path).parent + destination_dir = pathlib.Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) _collect_file(src_file.path, str(destination_dir)) if include_userdata: for src_file in INSTALLER_APPORT_SENSITIVE_FILES: - destination_dir = Path(log_dir + src_file.path).parent + destination_dir = pathlib.Path(log_dir + src_file.path).parent if not destination_dir.exists(): ensure_dir(str(destination_dir)) _collect_file(src_file.path, str(destination_dir)) @@ -214,12 +220,12 @@ def _collect_installer_logs(log_dir: str, include_userdata: bool) -> None: def _collect_version_info(log_dir: str) -> None: version = _write_command_output_to_file( cmd=["cloud-init", "--version"], - filename=os.path.join(log_dir, "version"), + file_path=pathlib.Path(log_dir, "version"), msg="cloud-init --version", ) dpkg_ver = _write_command_output_to_file( cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], - filename=os.path.join(log_dir, "dpkg-version"), + file_path=pathlib.Path(log_dir, "dpkg-version"), msg="dpkg version", ) if not version: @@ -230,12 +236,12 @@ def _collect_version_info(log_dir: str) -> None: def _collect_system_logs(log_dir: str) -> None: _stream_command_output_to_file( cmd=["dmesg"], - filename=os.path.join(log_dir, "dmesg.txt"), + file_path=pathlib.Path(log_dir, "dmesg.txt"), msg="dmesg output", ) _stream_command_output_to_file( cmd=["journalctl", "--boot=0", "-o", "short-precise"], - filename=os.path.join(log_dir, "journal.txt"), + file_path=pathlib.Path(log_dir, "journal.txt"), msg="systemd journal of current boot", ) @@ -270,11 +276,11 @@ def _collect_run_dir(log_dir: str, paths: LogPaths) -> None: LOG.debug("directory '%s' did not exist", paths.run_dir) if os.path.exists(os.path.join(paths.run_dir, "disabled")): # Fallback to grab previous cloud/data - cloud_data_dir = Path(paths.cloud_data) + cloud_data_dir = pathlib.Path(paths.cloud_data) if cloud_data_dir.exists(): shutil.copytree( str(cloud_data_dir), - Path(log_dir + str(cloud_data_dir)), + pathlib.Path(log_dir + str(cloud_data_dir)), ) diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index f8385a4c3ef..b1d9f585d30 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -256,17 +256,17 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): def test_write_command_output_to_file( self, m_getuid, - tmpdir, + tmp_path, cmd, expected_file_contents, expected_return_value, ): m_getuid.return_value = 100 - output_file = tmpdir.join("test-output-file.txt") + output_file = tmp_path / "test-output-file.txt" return_output = logs._write_command_output_to_file( - filename=output_file, cmd=cmd, + file_path=output_file, msg="", ) @@ -281,14 +281,14 @@ def test_write_command_output_to_file( ], ) def test_stream_command_output_to_file( - self, m_getuid, tmpdir, cmd, expected_file_contents + self, m_getuid, tmp_path, cmd, expected_file_contents ): m_getuid.return_value = 100 - output_file = tmpdir.join("test-output-file.txt") + output_file = tmp_path / "test-output-file.txt" logs._stream_command_output_to_file( - filename=output_file, cmd=cmd, + file_path=output_file, msg="", ) From 6e4153b346bc0d3f3422c01a3f93ecfb28269da2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 19 Jun 2024 15:18:12 -0500 Subject: [PATCH 028/131] refactor: logs.py add typing and small misc refactors (#5414) --- cloudinit/cmd/devel/logs.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index a6b61fcb720..f5ae53ce26a 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -100,7 +100,9 @@ class ApportFile(NamedTuple): ] -def get_parser(parser=None): +def get_parser( + parser: Optional[argparse.ArgumentParser] = None, +) -> argparse.ArgumentParser: """Build or extend and arg parser for collect-logs utility. @param parser: Optional existing ArgumentParser instance representing the @@ -145,7 +147,7 @@ def get_parser(parser=None): return parser -def _get_copytree_ignore_files(paths: LogPaths): +def _get_copytree_ignore_files(paths: LogPaths) -> List[str]: """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ "hook-hotplug-cmd", # named pipe for hotplug @@ -229,8 +231,7 @@ def _collect_version_info(log_dir: str) -> None: msg="dpkg version", ) if not version: - version = dpkg_ver if dpkg_ver else "not-available" - LOG.debug("collected cloud-init version: %s", version) + version = dpkg_ver or "not-available" def _collect_system_logs(log_dir: str) -> None: @@ -333,7 +334,7 @@ def _setup_logger(verbosity: int) -> None: LOG.addHandler(handler) -def handle_collect_logs_args(name, args): +def handle_collect_logs_args(_name: str, args: argparse.Namespace) -> int: """Handle calls to 'cloud-init collect-logs' as a subcommand.""" _setup_logger(args.verbosity) return collect_logs( @@ -342,11 +343,5 @@ def handle_collect_logs_args(name, args): ) -def main(): - """Tool to collect and tar all cloud-init related logs.""" - parser = get_parser() - return handle_collect_logs_args("collect-logs", parser.parse_args()) - - if __name__ == "__main__": - sys.exit(main()) + sys.exit(handle_collect_logs_args("", get_parser().parse_args())) From 23be88d005983aa39b248ad55b1a3008973673c6 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 16 Jul 2024 13:06:43 -0600 Subject: [PATCH 029/131] fix(ds-identify): Detect nocloud when seedfrom url exists (#5515) With this change, the following config in cloud.cfg.d/ will select NoCloud in network stage. ``` datasource_list: [ GCE, NoCloud, None ] datasource: NoCloud: seedfrom: http://0.0.0.0:8000/ ``` Previously a two or less datasources in the datasource_list were required to get this behavior, which was undocumented and not intuitive. The ds-identify already allowed inline user-data and meta-data to trigger detection. Add ds-identify unittests for seedfrom and inline user-data. Add DataSourceNoCloud.ds_detect() unittests for seedfrom and inline user-data. --- cloudinit/sources/DataSourceNoCloud.py | 11 +++++ tests/unittests/sources/test_nocloud.py | 23 ++++++++++ tests/unittests/test_ds_identify.py | 61 +++++++++++++++++++++++++ tools/ds-identify | 8 +++- 4 files changed, 101 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index a7d3f3adfaa..d5ca84a1a22 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -408,6 +408,17 @@ def ds_detect(self): if serial == "nocloud-net": log_deprecated() return True + elif ( + self.sys_cfg.get("datasource", {}) + .get("NoCloud", {}) + .key("seedfrom") + ): + LOG.debug( + "Machine is configured by system configuration to run on " + "single datasource %s.", + self, + ) + return True return False diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py index 15b25196db7..b98ff73c9ac 100644 --- a/tests/unittests/sources/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -98,6 +98,29 @@ def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): self.assertEqual(dsrc.platform_type, "nocloud") self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir) + def test_nocloud_seedfrom(self, m_is_lxd): + """Check that a seedfrom triggers detection""" + assert dsNoCloud( + sys_cfg={"datasource": {"NoCloud": {"seedfrom": "somevalue"}}}, + distro=None, + paths=self.paths, + ).ds_detect() + + def test_nocloud_user_data_meta_data(self, m_is_lxd): + """Check that meta-data and user-data trigger detection""" + assert dsNoCloud( + sys_cfg={ + "datasource": { + "NoCloud": { + "meta-data": "", + "user-data": "#cloud-config\nsome-config", + } + } + }, + distro=None, + paths=self.paths, + ).ds_detect() + def test_fs_label(self, m_is_lxd): # find_devs_with should not be called ff fs_label is None class PsuedoException(Exception): diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index e71e853f314..d8f10c1ab8f 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -867,6 +867,32 @@ def test_configured_list_with_none(self): mydata["files"][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' self._check_via_dict(mydata, rc=RC_FOUND, dslist=["Ec2", DS_NONE]) + def test_nocloud_seedfrom(self): + """Check seedfrom system config detects nocloud. + + Verify that a cloud.cfg.d/ that contains more than two datasources in + its datasource_list will positively identify nocloud when a + datasource.NoCloud.seedfrom value exists + """ + self._check_via_dict( + copy.deepcopy(VALID_CFG["NoCloud-seedfrom"]), + rc=RC_FOUND, + dslist=["NoCloud", DS_NONE], + ) + + def test_nocloud_userdata_and_metadata(self): + """Check seedfrom system config detects nocloud. + + Verify that a cloud.cfg.d/ that contains more than two datasources in + its datasource_list will positively identify nocloud when both + datasource.NoCloud.{user-data,meta-data} value exists + """ + self._check_via_dict( + copy.deepcopy(VALID_CFG["NoCloud-user-data-meta-data"]), + rc=RC_FOUND, + dslist=["NoCloud", DS_NONE], + ) + def test_aliyun_identified(self): """Test that Aliyun cloud is identified by product id.""" self._test_ds_found("AliYun") @@ -1964,6 +1990,41 @@ def _print_run_output(rc, out, err, cfg, files): os.path.join(P_SEED_DIR, "nocloud", "meta-data"): "md\n", }, }, + "NoCloud-seedfrom": { + "ds": "NoCloud", + "files": { + # Also include a datasource list of more than just + # [NoCloud, None], because that would automatically select + # NoCloud without checking + "etc/cloud/cloud.cfg.d/test.cfg": dedent( + """\ + datasource_list: [ Azure, OpenStack, NoCloud, None ] + datasource: + NoCloud: + seedfrom: http://0.0.0.0/test + """ + ) + }, + }, + "NoCloud-user-data-meta-data": { + "ds": "NoCloud", + "files": { + # Also include a datasource list of more than just + # [NoCloud, None], because that would automatically select + # NoCloud without checking + "etc/cloud/cloud.cfg.d/test.cfg": dedent( + """\ + datasource_list: [ Azure, OpenStack, NoCloud, None ] + datasource: + NoCloud: + meta-data: "" + user-data: | + #cloud-config + + """ + ) + }, + }, "NoCloud-seed-ubuntu-core": { "ds": "NoCloud", "files": { diff --git a/tools/ds-identify b/tools/ds-identify index 31a15fed9e1..f6a97461178 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -989,8 +989,12 @@ dscheck_NoCloud() { fi # This is a bit hacky, but a NoCloud false positive isn't the end of the world - if check_config "NoCloud" && check_config "user-data" && check_config "meta-data"; then - return ${DS_FOUND} + if check_config "NoCloud"; then + if check_config "user-data" && check_config "meta-data"; then + return ${DS_FOUND} + elif check_config "seedfrom"; then + return ${DS_FOUND} + fi fi return ${DS_NOT_FOUND} From 7703634ec048aa00ddf6ef7a5d552004a84c4f04 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 16 Jul 2024 13:54:27 -0600 Subject: [PATCH 030/131] chore: Improve detection logging for user clarity (#5515) The nocloud datasource logs messages that are sometimes confused by users for errors. Clarify them. Also, remove redundant information from the logs: - simplify log wording - only include seed and dsmode information in nocloud string when non-default values are used --- cloudinit/sources/DataSourceNoCloud.py | 34 +++++++++++++++++++++++--- cloudinit/sources/__init__.py | 9 +++---- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index d5ca84a1a22..a441c1edcf4 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -36,8 +36,17 @@ def __init__(self, sys_cfg, distro, paths): self._network_eni = None def __str__(self): - root = sources.DataSource.__str__(self) - return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + """append seed and dsmode info when they contain non-default values""" + return ( + super().__str__() + + " " + + (f"[seed={self.seed}]" if self.seed else "") + + ( + f"[dsmode={self.dsmode}]" + if self.dsmode != sources.DSMODE_NETWORK + else "" + ) + ) def _get_devices(self, label): fslist = util.find_devs_with("TYPE=vfat") @@ -167,7 +176,7 @@ def _pp2d_callback(mp, data): seedfound = proto break if not seedfound: - LOG.debug("Seed from %s not supported by %s", seedfrom, self) + self._log_unusable_seedfrom(seedfrom) return False # check and replace instances of known dmi. such as # chassis-serial-number or baseboard-product-name @@ -215,6 +224,16 @@ def platform_type(self): self._platform_type = "lxd" if util.is_lxd() else "nocloud" return self._platform_type + def _log_unusable_seedfrom(self, seedfrom: str): + """Stage-specific level and message.""" + LOG.info( + "%s only uses seeds starting with %s - will try to use %s " + "in the network stage.", + self, + self.supported_seed_starts, + seedfrom, + ) + def _get_cloud_name(self): """Return unknown when 'cloud-name' key is absent from metadata.""" return sources.METADATA_UNKNOWN @@ -374,6 +393,15 @@ def __init__(self, sys_cfg, distro, paths): "ftps://", ) + def _log_unusable_seedfrom(self, seedfrom: str): + """Stage-specific level and message.""" + LOG.warning( + "%s only uses seeds starting with %s - %s is not valid.", + self, + self.supported_seed_starts, + seedfrom, + ) + def ds_detect(self): """Check dmi and kernel command line for dsname diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 27c37ee1e13..eb39ddc7bb3 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -393,14 +393,13 @@ def override_ds_detect(self) -> bool: """ if self.dsname.lower() == parse_cmdline().lower(): LOG.debug( - "Machine is configured by the kernel command line to run on " - "single datasource %s.", + "Kernel command line set to use a single datasource %s.", self, ) return True elif self.sys_cfg.get("datasource_list", []) == [self.dsname]: LOG.debug( - "Machine is configured to run on single datasource %s.", self + "Datasource list set to use a single datasource %s.", self ) return True return False @@ -411,12 +410,12 @@ def _check_and_get_data(self): return self._get_data() elif self.ds_detect(): LOG.debug( - "Detected platform: %s. Checking for active instance data", + "Detected %s", self, ) return self._get_data() else: - LOG.debug("Datasource type %s is not detected.", self) + LOG.debug("Did not detect %s", self) return False def _get_standardized_metadata(self, instance_data): From 16a31981259fca3c34c15ad71666f6e9c4077ef9 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 17 Jul 2024 07:54:10 -0600 Subject: [PATCH 031/131] chore: Deprecate partially supported system config (#5515) ds-identify does not support the fs_label key. This key is only partially supported. Deprecate it. Users of custom labels may switch to cidata or CIDATA. Note: Tools such as cloud-localds hard-code the label. --- cloudinit/sources/DataSourceNoCloud.py | 6 ++++++ doc/examples/cloud-config-datasources.txt | 3 --- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index a441c1edcf4..69ebab91479 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -130,6 +130,12 @@ def _pp2d_callback(mp, data): label = self.ds_cfg.get("fs_label", "cidata") if label is not None: + if label.lower() != "cidata": + util.deprecate( + deprecated="Custom fs_label keys", + deprecated_version="24.3", + extra_message="This key isn't supported by ds-identify.", + ) for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 43b344184d4..0e1527bf952 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -34,9 +34,6 @@ datasource: # seedfrom: http://my.example.com/i-abcde/ seedfrom: None - # fs_label: the label on filesystems to be searched for NoCloud source - fs_label: cidata - # these are optional, but allow you to basically provide a datasource # right here user-data: | From 5532b4a69d9bc59b0d82a7df32228171d9f3d85c Mon Sep 17 00:00:00 2001 From: Carlos Nihelton Date: Fri, 19 Jul 2024 15:17:47 -0700 Subject: [PATCH 032/131] feat(wsl): Special handling Landscape client config tags (#5460) UP4W business logic is so that its data overrides user at a key (module) level. That means the entire Landscape config is overriden if both agent data and user data contains config for that module. Yet, for better usability, computer tags must be assignable per instance. That's not possible with agent.yaml, because it's meant to be global. Its config data affects all Ubuntu WSL instances. Thus this aims to make a special case for landscape.client.tags, if present in user provided data (either Landscape or local user - whatever is picked up before merging with agent.yaml) its value overwrites any tags set by agent.yaml. Only landscape.client.tags are treated specially. The pre-existing merge rules still apply for any other value present in both agent.yaml and user provided data. Fixes UDENG-2464 --- cloudinit/sources/DataSourceWSL.py | 11 ++ doc/rtd/reference/datasources/wsl.rst | 5 +- tests/unittests/sources/test_wsl.py | 273 +++++++++++++++++++++++++- 3 files changed, 278 insertions(+), 11 deletions(-) diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index b81298927a0..7a75ff4e691 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -328,9 +328,13 @@ def _get_data(self) -> bool: # provides them instead. # That's the reason for not using util.mergemanydict(). merged: dict = {} + user_tags: str = "" overridden_keys: typing.List[str] = [] if user_data: merged = user_data + user_tags = ( + merged.get("landscape", {}).get("client", {}).get("tags", "") + ) if agent_data: if user_data: LOG.debug("Merging both user_data and agent.yaml configs.") @@ -345,6 +349,13 @@ def _get_data(self) -> bool: ", ".join(overridden_keys) ) ) + if user_tags and merged.get("landscape", {}).get("client"): + LOG.debug( + "Landscape client conf updated with user-data" + " landscape.client.tags: %s", + user_tags, + ) + merged["landscape"]["client"]["tags"] = user_tags self.userdata_raw = "#cloud-config\n%s" % yaml.dump(merged) return True diff --git a/doc/rtd/reference/datasources/wsl.rst b/doc/rtd/reference/datasources/wsl.rst index ab96f9490c4..c6970448b5c 100644 --- a/doc/rtd/reference/datasources/wsl.rst +++ b/doc/rtd/reference/datasources/wsl.rst @@ -66,7 +66,10 @@ following paths: the Ubuntu Pro for WSL agent. If this file is present, its modules will be merged with (1), overriding any conflicting modules. If (1) is not provided, then this file will be merged with any valid user-provided configuration - instead. + instead. Exception is made for Landscape client config computer tags. If + user provided data contains a value for ``landscape.client.tags`` it will be + used instead of the one provided by the ``agent.yaml``, which is treated as + a default. Then, if a file from (1) is not found, a user-provided configuration will be looked for instead in the following order: diff --git a/tests/unittests/sources/test_wsl.py b/tests/unittests/sources/test_wsl.py index 31c5c897ed5..2f26d7fd565 100644 --- a/tests/unittests/sources/test_wsl.py +++ b/tests/unittests/sources/test_wsl.py @@ -355,6 +355,8 @@ def test_get_data_sh(self, m_lsb_release, tmpdir, paths): @mock.patch("cloudinit.util.get_linux_distro") def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): + """Validates the precedence of user-data files.""" + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO # Set up basic user data: @@ -400,9 +402,17 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): assert "" == shell_script - # Additionally set up some UP4W agent data: + @mock.patch("cloudinit.util.get_linux_distro") + def test_interaction_with_pro(self, m_get_linux_dist, tmpdir, paths): + """Validates the interaction of user-data and Pro For WSL agent data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") - # Now the winner should be the merge of the agent and Landscape data. + # The winner should be the merge of the agent and user provided data. ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") os.makedirs(ubuntu_pro_tmp, exist_ok=True) @@ -410,9 +420,14 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): agent_file.write( """#cloud-config landscape: + host: + url: landscape.canonical.com:6554 client: - account_name: agenttest -ubuntu_advantage: + account_name: agenttest + url: https://landscape.canonical.com/message-system + ping_url: https://landscape.canonical.com/ping + tags: wsl +ubuntu_pro: token: testtoken""" ) @@ -436,17 +451,93 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): ) assert "wsl.conf" in userdata assert "packages" not in userdata - assert "ubuntu_advantage" in userdata + assert "ubuntu_pro" in userdata assert "landscape" in userdata assert "agenttest" in userdata - # Additionally set up some Landscape provided user data + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_vs_local_user(self, m_get_linux_dist, tmpdir, paths): + """Validates the precendence of Landscape-provided over local data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write( + """#cloud-config +ubuntu_pro: + token: usertoken +package_update: true""" + ) + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) landscape_file.write( """#cloud-config landscape: client: account_name: landscapetest + tags: tag_aiml,tag_dev +locale: en_GB.UTF-8""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "locale" in userdata + and "landscapetest" in userdata + and "ubuntu_pro" not in userdata + and "package_update" not in userdata + ), "Landscape data should have overriden user provided data" + + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_provided_data(self, m_get_linux_dist, tmpdir, paths): + """Validates the interaction of Pro For WSL agent and Landscape data""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: hosted.com:6554 + client: + account_name: agenttest + url: https://hosted.com/message-system + ping_url: https://hosted.com/ping + ssl_public_key: C:\\Users\\User\\server.pem + tags: wsl +ubuntu_pro: + token: testtoken""" + ) + + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest + tags: tag_aiml,tag_dev package_update: true""" ) @@ -469,14 +560,176 @@ def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): ), ) - assert "wsl.conf" not in userdata - assert "packages" not in userdata - assert "ubuntu_advantage" in userdata + assert "ubuntu_pro" in userdata, "Agent data should be present" assert "package_update" in userdata, ( "package_update entry should not be overriden by agent data" " nor ignored" ) - assert "landscape" in userdata assert ( "landscapetest" not in userdata and "agenttest" in userdata ), "Landscape account name should have been overriden by agent data" + # Make sure we have tags from Landscape data, not agent's + assert ( + "tag_aiml" in userdata and "tag_dev" in userdata + ), "User-data should override agent data's Landscape computer tags" + assert "wsl" not in userdata + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_landscape_no_tags(self, m_get_linux_dist, tmpdir, paths): + """Validates the Pro For WSL default Landscape tags are applied""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: landscape.canonical.com:6554 + client: + account_name: agenttest + url: https://landscape.canonical.com/message-system + ping_url: https://landscape.canonical.com/ping + tags: wsl +ubuntu_pro: + token: testtoken""" + ) + # Set up some Landscape provided user data without tags + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "tags: wsl" in userdata + ), "Landscape computer tags should match UP4W agent's data defaults" + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_no_tags_at_all(self, m_get_linux_dist, tmpdir, paths): + """Asserts the DS still works if there are no Landscape tags at all""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + user_file = tmpdir.join(".cloud-init", "ubuntu-24.04.user-data") + user_file.dirpath().mkdir() + user_file.write("#cloud-config\nwrite_files:\n- path: /etc/wsl.conf") + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + # Make sure we don't crash if there are no tags anywhere. + agent_file.write( + """#cloud-config +ubuntu_pro: + token: up4w_token""" + ) + # Set up some Landscape provided user data without tags + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "landscapetest" in userdata + assert "up4w_token" in userdata + assert "tags" not in userdata + + @mock.patch("cloudinit.util.get_linux_distro") + def test_with_no_client_subkey(self, m_get_linux_dist, tmpdir, paths): + """Validates the DS works without the landscape.client subkey""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + # Make sure we don't crash if there is no client subkey. + # (That would be a bug in the agent as there is no other config + # value for landscape outside of landscape.client, so I'm making up + # some non-sense keys just to make sure we won't crash) + agent_file.write( + """#cloud-config +landscape: + server: + port: 6554 +ubuntu_pro: + token: up4w_token""" + ) + + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write( + """#cloud-config +landscape: + client: + account_name: landscapetest +package_update: true""" + ) + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + assert "landscapetest" not in userdata + assert ( + "port: 6554" in userdata + ), "agent data should override the entire landscape config." + + assert "up4w_token" in userdata From e1147bd024e267f1dd025d447dafcad13059cb4f Mon Sep 17 00:00:00 2001 From: sxt1001 Date: Mon, 22 Jul 2024 21:23:05 +0800 Subject: [PATCH 033/131] add openeuler to distros in cc_spacewalk.py (#5530) Commit 441d8f81 adds openeuler to the list of supported distros in cc_spacewalk.py, but there is one omission. --- cloudinit/config/cc_spacewalk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py index 6b364aa938b..7f19e633d21 100644 --- a/cloudinit/config/cc_spacewalk.py +++ b/cloudinit/config/cc_spacewalk.py @@ -18,7 +18,7 @@ LOG = logging.getLogger(__name__) -distros = ["redhat", "fedora"] +distros = ["redhat", "fedora", "openeuler"] required_packages = ["rhn-setup"] def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT" From 7c2d4fd63948aad32829fce0f66dcba32d844e03 Mon Sep 17 00:00:00 2001 From: Lucas Ritzdorf <42657792+LRitzdorf@users.noreply.github.com> Date: Mon, 22 Jul 2024 08:05:30 -0600 Subject: [PATCH 034/131] feat: Support URI sources in `write_files` module (#5505) This change adds an optional `source` key to the `write_files` module, allowing users to specify a URI from which to load file contents. This facilitates more flexible multi-part configurations, as file contents can be managed via external sources such as independent Git repositories. Fixes GH-5500 --- cloudinit/config/cc_write_files.py | 64 +++++++++++++-- cloudinit/config/cc_write_files_deferred.py | 3 +- .../schemas/schema-cloud-config-v1.json | 22 +++++ doc/module-docs/cc_runcmd/example1.yaml | 1 - doc/module-docs/cc_write_files/data.yaml | 10 ++- doc/module-docs/cc_write_files/example6.yaml | 9 ++ tests/unittests/config/test_cc_write_files.py | 82 +++++++++++++++++++ tools/.github-cla-signers | 1 + 8 files changed, 183 insertions(+), 9 deletions(-) create mode 100644 doc/module-docs/cc_write_files/example6.yaml diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index c97d1225afc..004ede438d9 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -9,8 +9,9 @@ import base64 import logging import os +from typing import Optional -from cloudinit import util +from cloudinit import url_helper, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -44,7 +45,8 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: name, ) return - write_files(name, filtered_files, cloud.distro.default_owner) + ssl_details = util.fetch_ssl_details(cloud.paths) + write_files(name, filtered_files, cloud.distro.default_owner, ssl_details) def canonicalize_extraction(encoding_type): @@ -72,7 +74,7 @@ def canonicalize_extraction(encoding_type): return [TEXT_PLAIN_ENC] -def write_files(name, files, owner: str): +def write_files(name, files, owner: str, ssl_details: Optional[dict] = None): if not files: return @@ -86,8 +88,23 @@ def write_files(name, files, owner: str): ) continue path = os.path.abspath(path) - extractions = canonicalize_extraction(f_info.get("encoding")) - contents = extract_contents(f_info.get("content", ""), extractions) + # Read content from provided URL, if any, or decode from inline + contents = read_url_or_decode( + f_info.get("source", None), + ssl_details, + f_info.get("content", None), + f_info.get("encoding", None), + ) + if contents is None: + LOG.warning( + "No content could be loaded for entry %s in module %s;" + " skipping", + i + 1, + name, + ) + continue + # Only create the file if content exists. This will not happen, for + # example, if the URL fails and no inline content was provided (u, g) = util.extract_usergroup(f_info.get("owner", owner)) perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS) omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb" @@ -118,6 +135,43 @@ def decode_perms(perm, default): return default +def read_url_or_decode(source, ssl_details, content, encoding): + url = None if source is None else source.get("uri", None) + use_url = bool(url) + # Special case: empty URL and content. Write a blank file + if content is None and not use_url: + return "" + # Fetch file content from source URL, if provided + result = None + if use_url: + try: + # NOTE: These retry parameters are arbitrarily chosen defaults. + # They have no significance, and may be changed if appropriate + result = url_helper.read_file_or_url( + url, + headers=source.get("headers", None), + retries=3, + sec_between=3, + ssl_details=ssl_details, + ).contents + except Exception: + util.logexc( + LOG, + 'Failed to retrieve contents from source "%s"; falling back to' + ' data from "contents" key', + url, + ) + use_url = False + # If inline content is provided, and URL is not provided or is + # inaccessible, parse the former + if content is not None and not use_url: + # NOTE: This is not simply an "else"! Notice that `use_url` can change + # in the previous "if" block + extractions = canonicalize_extraction(encoding) + result = extract_contents(content, extractions) + return result + + def extract_contents(contents, extraction_types): result = contents for t in extraction_types: diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index 0dc0662e1e7..87be2b45cfb 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -39,4 +39,5 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: name, ) return - write_files(name, filtered_files, cloud.distro.default_owner) + ssl_details = util.fetch_ssl_details(cloud.paths) + write_files(name, filtered_files, cloud.distro.default_owner, ssl_details) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 253e90b8c55..75bdaa5865b 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -3386,6 +3386,28 @@ "default": "''", "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``" }, + "source": { + "type": "object", + "description": "Optional specification for content loading from an arbitrary URI", + "additionalProperties": false, + "properties": { + "uri": { + "type": "string", + "format": "uri", + "description": "URI from which to load file content. If loading fails repeatedly, ``content`` is used instead." + }, + "headers": { + "type": "object", + "description": "Optional HTTP headers to accompany load request, if applicable", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "uri" + ] + }, "owner": { "type": "string", "default": "root:root", diff --git a/doc/module-docs/cc_runcmd/example1.yaml b/doc/module-docs/cc_runcmd/example1.yaml index 03812f926e9..8c5efee96ab 100644 --- a/doc/module-docs/cc_runcmd/example1.yaml +++ b/doc/module-docs/cc_runcmd/example1.yaml @@ -4,4 +4,3 @@ runcmd: - [sh, -xc, 'echo $(date) '': hello world!'''] - [sh, -c, echo "=========hello world'========="] - ls -l /root -- [wget, 'http://example.org', -O, /tmp/index.html] diff --git a/doc/module-docs/cc_write_files/data.yaml b/doc/module-docs/cc_write_files/data.yaml index c59b8e2ea75..3d4b04da492 100644 --- a/doc/module-docs/cc_write_files/data.yaml +++ b/doc/module-docs/cc_write_files/data.yaml @@ -3,8 +3,9 @@ cc_write_files: Write out arbitrary content to files, optionally setting permissions. Parent folders in the path are created if absent. Content can be specified in plain text or binary. Data encoded with either base64 or binary gzip - data can be specified and will be decoded before being written. For empty - file creation, content can be omitted. + data can be specified and will be decoded before being written. Data can + also be loaded from an arbitrary URI. For empty file creation, content can + be omitted. .. note:: If multi-line data is provided, care should be taken to ensure it @@ -36,5 +37,10 @@ cc_write_files: Example 5: Defer writing the file until after the package (Nginx) is installed and its user is created. file: cc_write_files/example5.yaml + - comment: > + Example 6: Retrieve file contents from a URI source, rather than inline. + Especially useful with an external config-management repo, or for large + binaries. + file: cc_write_files/example6.yaml name: Write Files title: Write arbitrary files diff --git a/doc/module-docs/cc_write_files/example6.yaml b/doc/module-docs/cc_write_files/example6.yaml new file mode 100644 index 00000000000..40112a58e17 --- /dev/null +++ b/doc/module-docs/cc_write_files/example6.yaml @@ -0,0 +1,9 @@ +#cloud-config +write_files: +- source: + uri: https://gitlab.example.com/some_ci_job/artifacts/hello + headers: + Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== + User-Agent: cloud-init on myserver.example.com + path: /usr/bin/hello + permissions: '0755' diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index 742f9e8cf53..ec0024971ad 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -9,6 +9,7 @@ import tempfile import pytest +import responses from cloudinit import util from cloudinit.config.cc_write_files import decode_perms, handle, write_files @@ -84,6 +85,16 @@ def test_simple(self): ) self.assertEqual(util.load_text_file(filename), expected) + def test_empty(self): + self.patchUtils(self.tmp) + filename = "/tmp/my.file" + write_files( + "test_empty", + [{"path": filename}], + self.owner, + ) + self.assertEqual(util.load_text_file(filename), "") + def test_append(self): self.patchUtils(self.tmp) existing = "hello " @@ -167,6 +178,71 @@ def test_handle_plain_text(self): "Unknown encoding type text/plain", self.logs.getvalue() ) + def test_file_uri(self): + self.patchUtils(self.tmp) + src_path = "/tmp/file-uri" + dst_path = "/tmp/file-uri-target" + content = "asdf" + util.write_file(src_path, content) + cfg = { + "write_files": [ + { + "source": {"uri": "file://" + src_path}, + "path": dst_path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual( + util.load_text_file(src_path), util.load_text_file(dst_path) + ) + + @responses.activate + def test_http_uri(self): + self.patchUtils(self.tmp) + path = "/tmp/http-uri-target" + url = "http://hostname/path" + content = "more asdf" + responses.add(responses.GET, url, content) + cfg = { + "write_files": [ + { + "source": { + "uri": url, + "headers": { + "foo": "bar", + "blah": "blah", + }, + }, + "path": path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual(content, util.load_text_file(path)) + + def test_uri_fallback(self): + self.patchUtils(self.tmp) + src_path = "/tmp/INVALID" + dst_path = "/tmp/uri-fallback-target" + content = "asdf" + util.del_file(src_path) + cfg = { + "write_files": [ + { + "source": {"uri": "file://" + src_path}, + "content": content, + "encoding": "text/plain", + "path": dst_path, + } + ] + } + cc = self.tmp_cloud("ubuntu") + handle("ignored", cfg, cc, []) + self.assertEqual(content, util.load_text_file(dst_path)) + def test_deferred(self): self.patchUtils(self.tmp) file_path = "/tmp/deferred.file" @@ -249,6 +325,12 @@ class TestWriteFilesSchema: "write_files": [ { "append": False, + "source": { + "uri": "http://a.com/a", + "headers": { + "Authorization": "Bearer SOME_TOKEN" + }, + }, "content": "a", "encoding": "text/plain", "owner": "jeff", diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 30c411e466a..d371798668f 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -110,6 +110,7 @@ licebmi linitio LKHN lkundrak +LRitzdorf lucasmoura lucendio lungj From 2534432bf606cdbf02b3c4700ff003b30bfce791 Mon Sep 17 00:00:00 2001 From: Minghe Ren Date: Mon, 22 Jul 2024 07:10:33 -0700 Subject: [PATCH 035/131] fix(azurelinux): Change default usr_lib_exec path (#5526) Change default usr_lib_exec from /usr/libexec/ to /usr/lib as azurelinux installs cloud-init tool binaries under /usr/lib/cloud-int instead of /usr/libexec/cloud-init --- cloudinit/distros/azurelinux.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudinit/distros/azurelinux.py b/cloudinit/distros/azurelinux.py index 5098a45942d..591b870020e 100644 --- a/cloudinit/distros/azurelinux.py +++ b/cloudinit/distros/azurelinux.py @@ -22,6 +22,8 @@ class Distro(rhel.Distro): + usr_lib_exec = "/usr/lib" + def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) self.osfamily = "azurelinux" From 2d238522640fe35bb80f62637abf83727b950daf Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Mon, 22 Jul 2024 11:31:50 +0200 Subject: [PATCH 036/131] test: fix no ds cache tests (#5529) After 7703634ec048aa00ddf6ef7a5d552004a84c4f04 log messages migrated from: "Detected platform DataSource..." to "Detected DataSource..." Adapt integration tests accordingly. --- tests/integration_tests/datasources/test_caching.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration_tests/datasources/test_caching.py b/tests/integration_tests/datasources/test_caching.py index f043ddbe779..467585fa20c 100644 --- a/tests/integration_tests/datasources/test_caching.py +++ b/tests/integration_tests/datasources/test_caching.py @@ -25,7 +25,7 @@ def verify_no_cache_boot(client: IntegrationInstance): "No local datasource found", "running 'init'", "no cache found", - "Detected platform", + "Detected DataSource", "TEST _get_data called", ], text=log, @@ -83,7 +83,7 @@ def test_no_cache_with_fallback(client: IntegrationInstance): util.verify_ordered_items_in_text( [ "no cache found", - "Detected platform", + "Detected DataSource", "TEST _get_data called", "running 'init'", "restored from cache with run check", @@ -102,7 +102,7 @@ def test_no_cache_with_fallback(client: IntegrationInstance): util.verify_ordered_items_in_text( [ "cache invalid in datasource", - "Detected platform", + "Detected DataSource", "Restored fallback datasource from checked cache", "running 'init'", "restored from cache with run check", From f90f0b8a6f9dbaffe47cfb33b361181326a0caa4 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Mon, 22 Jul 2024 14:52:59 +0200 Subject: [PATCH 037/131] test: fix test_kernel_command_line_match (#5529) Adapt to conform with 7703634ec048aa00ddf6ef7a5d552004a84c4f04 --- .../test_kernel_command_line_match.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/integration_tests/test_kernel_command_line_match.py b/tests/integration_tests/test_kernel_command_line_match.py index 60bda90726c..2fce349d098 100644 --- a/tests/integration_tests/test_kernel_command_line_match.py +++ b/tests/integration_tests/test_kernel_command_line_match.py @@ -22,7 +22,7 @@ ( ( "ds=nocloud;s=http://my-url/;h=hostname", - "DataSourceNoCloud [seed=None][dsmode=net]", + "DataSourceNoCloud", True, ), ("ci.ds=openstack", "DataSourceOpenStack", True), @@ -49,17 +49,14 @@ def test_lxd_datasource_kernel_override( override_kernel_command_line(ds_str, client) if cmdline_configured: assert ( - "Machine is configured by the kernel command line to run on single" + "Kernel command line set to use a single" f" datasource {configured}" ) in client.execute("cat /var/log/cloud-init.log") else: # verify that no plat log = client.execute("cat /var/log/cloud-init.log") - assert (f"Detected platform: {configured}") in log - assert ( - "Machine is configured by the kernel " - "command line to run on single " - ) not in log + assert f"Detected {configured}" in log + assert "Kernel command line set to use a single" not in log GH_REPO_PATH = "https://raw.githubusercontent.com/canonical/cloud-init/main/" From 99ac8193f5bd391c369b40dc6cf5bd40f2afe72e Mon Sep 17 00:00:00 2001 From: Carlos Nihelton Date: Tue, 23 Jul 2024 14:33:22 -0700 Subject: [PATCH 038/131] fix(wsl): Put back the "path" argument to wsl_path in ds-identify (#5537) Got swallowed by https://github.com/canonical/cloud-init/pull/5116/commits/ da6b5c437a799bb934c89545c1b077a84d34a51d The former commit resulted in usage error from the wslpath command thus we never found WSL specific data, disabling cloud-init. --- tools/ds-identify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/ds-identify b/tools/ds-identify index f6a97461178..606be9c4da5 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -1696,7 +1696,7 @@ dscheck_VMware() { WSL_path() { local params="$1" path="$2" val="" - val="$(wslpath "$params" "$1")" + val="$(wslpath "$params" "$path")" _RET="$val" } @@ -1776,7 +1776,7 @@ dscheck_WSL() { # Then we can check for any .cloud-init folders for the user if [ ! -d "$profile_dir/.cloud-init/" ] && [ ! -d "$profile_dir/.ubuntupro/.cloud-init/" ]; then - debug 1 "No .cloud-init directories found" + debug 1 "No .cloud-init directories found in $profile_dir" return "${DS_NOT_FOUND}" fi From bb4b7c0c6940ffe4e555cc43016ef8806b74d6f7 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 24 Jul 2024 19:21:22 +0200 Subject: [PATCH 039/131] fix: auto label doc PRs (#5542) Create explicit file to define and pin the actions/labeler's version. Upgrade breaking changes from action/labeler@v5. --- .github/labeler.yml | 5 ++++- .github/workflows/labeler.yaml | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/labeler.yaml diff --git a/.github/labeler.yml b/.github/labeler.yml index b341bb5018e..8a1422256eb 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,2 +1,5 @@ documentation: -- doc/* +- changed-files: + - any-glob-to-any-file: + - 'doc/*' +- base-branch: 'main' \ No newline at end of file diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 00000000000..71171438900 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,9 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 From 02beb9ab4f2f69303979b39d551227d2b3f02890 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Wed, 24 Jul 2024 19:45:03 +0200 Subject: [PATCH 040/131] docs: improve qemu command line (#5540) The suggested qemu command line in our local execution example is rather old. Change the discouraged -hd* options to the new -device instead. Further add a chance to use KVM acceleration to speed up the example. Finally we had several occasions to be working on x86 only. We dropped arguments that can not work on cross-arch and furthermore added a hint at how one could again native performance on these platforms. Fixes GH-5050 --- doc/rtd/howto/run_cloud_init_locally.rst | 11 +++++++++-- doc/rtd/tutorial/qemu.rst | 7 ++++++- tools/.github-cla-signers | 1 + 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/doc/rtd/howto/run_cloud_init_locally.rst b/doc/rtd/howto/run_cloud_init_locally.rst index 0111bc1da42..2510eadd067 100644 --- a/doc/rtd/howto/run_cloud_init_locally.rst +++ b/doc/rtd/howto/run_cloud_init_locally.rst @@ -70,6 +70,12 @@ Download an Ubuntu image to run: wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img +.. note:: + This example uses emulated CPU instructions on non-x86 hosts, so it may be + slow. To make it faster on non-x86 architectures, one can change the image + type and :spelling:ignore:`qemu-system-` command name to match the + architecture of your host machine. + Boot the image with the ISO attached ------------------------------------ @@ -78,8 +84,9 @@ Boot the cloud image with our configuration, :file:`seed.img`, to QEMU: .. code-block:: shell-session $ qemu-system-x86_64 -m 1024 -net nic -net user \ - -hda jammy-server-cloudimg-amd64.img \ - -hdb seed.img + -drive file=jammy-server-cloudimg-amd64.img,index=0,format=qcow2,media=disk \ + -drive file=seed.img,index=1,media=cdrom \ + -machine accel=kvm:tcg The now-booted image will allow for login using the password provided above. diff --git a/doc/rtd/tutorial/qemu.rst b/doc/rtd/tutorial/qemu.rst index 4c1afedd8a1..caa79cd39dd 100644 --- a/doc/rtd/tutorial/qemu.rst +++ b/doc/rtd/tutorial/qemu.rst @@ -80,6 +80,12 @@ server image using :command:`wget`: $ wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img +.. note:: + This example uses emulated CPU instructions on non-x86 hosts, so it may be + slow. To make it faster on non-x86 architectures, one can change the image + type and :spelling:ignore:`qemu-system-` command name to match the + architecture of your host machine. + Define our user data ==================== @@ -203,7 +209,6 @@ take a few moments to complete. -net nic \ -net user \ -machine accel=kvm:tcg \ - -cpu host \ -m 512 \ -nographic \ -hda jammy-server-cloudimg-amd64.img \ diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d371798668f..80872dcb508 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -42,6 +42,7 @@ citrus-it cjp256 CodeBleu Conan-Kudo +cpaelzer cvstealth dankenigsberg dankm From 8ceae8b7b504960e348ecda36c26db0b6fa12890 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 24 Jul 2024 22:23:30 +0200 Subject: [PATCH 041/131] doc: add diagram with boot stages (#5539) SC-1835 Fixes GH-5334 --- doc-requirements.txt | 1 + doc/rtd/conf.py | 1 + doc/rtd/explanation/boot.rst | 46 ++++++++++++++++++++++++++++++------ 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/doc-requirements.txt b/doc-requirements.txt index beb14dd9009..2bb66b5d2dc 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -9,4 +9,5 @@ sphinx-design sphinx-copybutton sphinx-notfound-page sphinxcontrib.datatemplates +sphinxcontrib-mermaid sphinxcontrib-spelling diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 1ca6a85a208..55744abb992 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -44,6 +44,7 @@ "sphinx.ext.autosectionlabel", "sphinx.ext.viewcode", "sphinxcontrib.datatemplates", + "sphinxcontrib.mermaid", "sphinxcontrib.spelling", ] diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index 6aff2856b66..b1421a209a5 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -3,13 +3,42 @@ Boot stages *********** -There are five stages to boot: +There are five stages to boot which are run seqentially: ``Detect``, ``Local``, +``Network``, ``Config`` and ``Final`` -1. Detect -2. Local -3. Network -4. Config -5. Final +Visual representation of cloud-init boot stages with respect to network config +and system accessibility: + +.. mermaid:: + + graph TB + + D["Detect"] ---> L + + L --> NU([Network up]) + L & NU --> N + subgraph L["Local"] + FI[Fetch IMDS] + end + + N --> NO([Network online]) + N & NO --> C + N --> S([SSH]) + N --> Login([Login]) + + subgraph N["Network"] + cloud_init_modules + end + %% cloud_config_modules + + subgraph C["Config"] + cloud_config_modules + end + + C --> F + subgraph F["Final"] + cloud_final_modules + end .. _boot-Detect: @@ -83,7 +112,7 @@ Network +---------+--------+----------------------------------------------------------+ | runs | after local stage and configured networking is up | +---------+--------+----------------------------------------------------------+ -| blocks | as much of remaining boot as possible | +| blocks | majority of remaining boot (e.g. SSH and console login) | +---------+--------+----------------------------------------------------------+ | modules | *cloud_init_modules* in ``/etc/cloud/cloud.cfg`` | +---------+--------+----------------------------------------------------------+ @@ -111,6 +140,9 @@ necessary for cloud-init to run should not be done until after this stage. A part-handler and :ref:`boothooks` will run at this stage. +After this stage completes, expect to be able to access the system via serial +console login or SSH. + .. _boot-Config: Config From 5f93726619eaf2ebe78bed34196d089c27816aac Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 24 Jul 2024 16:28:46 -0600 Subject: [PATCH 042/131] feat(schema): add chef_license schema enum (#5543) Fixes GH-5513 --- cloudinit/config/schemas/schema-cloud-config-v1.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 75bdaa5865b..c05bd994212 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -1352,7 +1352,12 @@ }, "chef_license": { "type": "string", - "description": "string that indicates if user accepts or not license related to some of chef products" + "description": "string that indicates if user accepts or not license related to some of chef products. See https://docs.chef.io/licensing/accept/", + "enum": [ + "accept", + "accept-silent", + "accept-no-persist" + ] } } } From c1760751e2ab8f326ee82538496a99cfc4c8445f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 24 Jul 2024 16:31:43 -0600 Subject: [PATCH 043/131] fix: doc auto label to consider schema json changes as doc PRs (#5543) --- .github/labeler.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index 8a1422256eb..9eb6dcc33b4 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -2,4 +2,5 @@ documentation: - changed-files: - any-glob-to-any-file: - 'doc/*' -- base-branch: 'main' \ No newline at end of file + - 'cloudinit/config/schema/*' +- base-branch: 'main' From e5e78c2ed3fa00f8a9c760d01c17799a138dc1eb Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 25 Jul 2024 18:45:05 +0200 Subject: [PATCH 044/131] doc: Update docs on boothooks (#5546) Improve explaination on #cloud-boothook for end-users. SC-1657 Fixes GH-4542 Co-authored-by: Calvin Mwadime --- doc/examples/boothook.txt | 3 ++ doc/rtd/explanation/format.rst | 51 ++++++++++++++++++++++++++++------ doc/rtd/reference/examples.rst | 7 +++++ 3 files changed, 52 insertions(+), 9 deletions(-) create mode 100644 doc/examples/boothook.txt diff --git a/doc/examples/boothook.txt b/doc/examples/boothook.txt new file mode 100644 index 00000000000..1c1d52d4589 --- /dev/null +++ b/doc/examples/boothook.txt @@ -0,0 +1,3 @@ +#cloud-boothook +#!/bin/sh +echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index c1eda9006d9..b154dd66036 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -157,17 +157,50 @@ a MIME archive. ``cloud-boothook`` ================== -This content is `boothook` data. It is stored in a file under -:file:`/var/lib/cloud` and executed immediately. This is the earliest `hook` -available. Note, that there is no mechanism provided for running only once. The -`boothook` must take care of this itself. +One line ``#cloud-boothook`` header and then executable payload. -It is provided with the instance id in the environment variable -``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' -type of functionality. +This is run very early on the boot process, during the +:ref:`Network boot stage`, even before ``cc_bootcmd``. -Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when -using a MIME archive. +This can be used when something has to be configured very early on boot, +potentially on every boot, with less convenience as ``cc_bootcmd`` but more +flexibility. + +.. note:: + Boothooks are executed on every boot. + The environment variable ``INSTANCE_ID`` will be set to the current instance + ID. ``INSTANCE_ID`` can be used to implement a `once-per-instance` type of + functionality. + +Begins with: ``#cloud-boothook``. + +Example with simple script +-------------------------- + +.. code-block:: bash + + #cloud-boothook + #!/bin/sh + echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts + +Example of once-per-instance script +----------------------------------- + +.. code-block:: bash + + #cloud-boothook + #!/bin/sh + + PERSIST_ID=/var/lib/cloud/first-instance-id + _id="" + if [ -r $PERSIST_ID ]; then + _id=$(cat /var/lib/cloud/first-instance-id) + fi + + if [ -z $_id ] || [ $INSTANCE_ID != $_id ]; then + echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts + fi + sudo echo $INSTANCE_ID > $PERSIST_ID Part-handler ============ diff --git a/doc/rtd/reference/examples.rst b/doc/rtd/reference/examples.rst index c9829e49cd2..fe2703031ac 100644 --- a/doc/rtd/reference/examples.rst +++ b/doc/rtd/reference/examples.rst @@ -77,6 +77,13 @@ Run commands on first boot :language: yaml :linenos: +Run commands on very early at every boot +======================================== + +.. literalinclude:: ../../examples/boothook.txt + :language: bash + :linenos: + Install arbitrary packages ========================== From 883d8e2f99990ea38513cfe2affe3c04d50d0b48 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 25 Jul 2024 19:15:41 +0200 Subject: [PATCH 045/131] doc(modules): add section to wrap modules' doc (#5550) Fixes GH-5467 --- doc/rtd/reference/modules.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/rtd/reference/modules.rst b/doc/rtd/reference/modules.rst index 2a7d26d3068..f56e3ffa8e1 100644 --- a/doc/rtd/reference/modules.rst +++ b/doc/rtd/reference/modules.rst @@ -17,6 +17,9 @@ version ``22.1`` (the first release in 2022) it is scheduled to be removed in the logs. If a key's expected value changes, the key will be marked ``changed`` with a date. A 5 year timeline also applies to changed keys. +Modules +======= + .. datatemplate:yaml:: ../../module-docs/cc_ansible/data.yaml :template: modules.tmpl .. datatemplate:yaml:: ../../module-docs/cc_apk_configure/data.yaml From 2ffd6528acaf34d324698b9f7a59bb91af498b1b Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 25 Jul 2024 10:45:49 +0200 Subject: [PATCH 046/131] fix(doc-spelling): config spelling_word_list_filename (#5547) Without this explicit config option, an untracked and unused `doc/rtd/spelling_wordlist.txt` file is created while running `tox -e doc-spelling`. --- doc/rtd/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 55744abb992..cfa1f63df63 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -56,6 +56,7 @@ templates_path = ["templates"] # Uses case-independent spelling matches from doc/rtd/spelling_word_list.txt spelling_filters = ["spelling.WordListFilter"] +spelling_word_list_filename = "spelling_word_list.txt" # The suffix of source filenames. source_suffix = ".rst" From 25058e1159117dbfffe4248d22af26bba23309c5 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Thu, 25 Jul 2024 11:02:40 +0200 Subject: [PATCH 047/131] chore: remove unneeded doc-lint tox env config (#5547) --- tox.ini | 3 --- 1 file changed, 3 deletions(-) diff --git a/tox.ini b/tox.ini index a43ef53f3c2..d6982cbe382 100644 --- a/tox.ini +++ b/tox.ini @@ -226,9 +226,6 @@ commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} doc8 doc/rtd -[doc-lint] -ignore-path-errors=doc/rtd/topics/faq.rst;D001 - [testenv:doc-spelling] deps = -r{toxinidir}/doc-requirements.txt From 779dd6b009f8a568fe74e98df9803ad590b83044 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 25 Jul 2024 13:18:32 -0600 Subject: [PATCH 048/131] doc(autoinstall): Remove incorrect statements, be more direct (#5545) --- doc/rtd/reference/faq.rst | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/doc/rtd/reference/faq.rst b/doc/rtd/reference/faq.rst index 45ec431d910..146dc66774d 100644 --- a/doc/rtd/reference/faq.rst +++ b/doc/rtd/reference/faq.rst @@ -15,21 +15,15 @@ Having trouble? We would like to help! - Find a bug? Check out the :ref:`reporting_bugs` topic to find out how to report one -``autoinstall``, ``preruncmd``, ``postruncmd`` -============================================== - -Since ``cloud-init`` ignores top level user data ``cloud-config`` keys, other -projects such as `Juju`_ and `Subiquity autoinstaller`_ use a YAML-formatted -config that combines ``cloud-init``'s user data cloud-config YAML format with -their custom YAML keys. Since ``cloud-init`` ignores unused top level keys, -these combined YAML configurations may be valid ``cloud-config`` files, -however keys such as ``autoinstall``, ``preruncmd``, and ``postruncmd`` are -not used by ``cloud-init`` to configure anything. - -Please direct bugs and questions about other projects that use ``cloud-init`` -to their respective support channels. For Subiquity autoinstaller that is via -IRC (``#ubuntu-server`` on Libera) or Discourse. For Juju support see their -`discourse page`_. +``autoinstall`` +=============== + +Other projects, such as `Subiquity autoinstaller`_, use cloud-init to implement +a subset of their features and have a YAML configuration format which combines +``cloud-init``'s cloud-config with additional keys. + +If you are an autoinstall user, please direct questions to their IRC channel +(``#ubuntu-server`` on Libera). Can I use cloud-init as a library? ================================== @@ -83,8 +77,6 @@ Whitepapers: .. _mailing list: https://launchpad.net/~cloud-init .. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init -.. _Juju: https://ubuntu.com/blog/topics/juju -.. _discourse page: https://discourse.charmhub.io .. _do: https://github.com/canonical/ubuntu-pro-client/blob/9b46480b9e4b88e918bac5ced0d4b8edb3cbbeab/lib/auto_attach.py#L35 .. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI @@ -106,5 +98,3 @@ Whitepapers: .. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/ .. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/ .. _Subiquity autoinstaller: https://ubuntu.com/server/docs/install/autoinstall -.. _juju_project: https://discourse.charmhub.io/t/model-config-cloudinit-userdata/512 -.. _discourse page: https://discourse.charmhub.io From 81ef45ef8611ce3ffcce87c57919b44a2a32d284 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 26 Jul 2024 12:57:56 -0600 Subject: [PATCH 049/131] doc(NoCloud): Categorize the different configuration types (#5521) Formally document providing runtime configuration in system configuration. Introduce names to identify previously unnamed NoCloud concepts. Add more structure - discrete sections for: - runtime configuration types - discovery configuration - configuration sources --- doc/examples/cloud-config-datasources.txt | 2 +- doc/rtd/reference/datasources/nocloud.rst | 318 ++++++++++++++++------ 2 files changed, 232 insertions(+), 88 deletions(-) diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 0e1527bf952..fd95b3a81fb 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -38,7 +38,7 @@ datasource: # right here user-data: | # This is the user-data verbatim - meta-data: + meta-data: | instance-id: i-87018aed local-hostname: myhost.internal diff --git a/doc/rtd/reference/datasources/nocloud.rst b/doc/rtd/reference/datasources/nocloud.rst index 3033869f682..bf32ad3458b 100644 --- a/doc/rtd/reference/datasources/nocloud.rst +++ b/doc/rtd/reference/datasources/nocloud.rst @@ -4,99 +4,241 @@ NoCloud ******* The data source ``NoCloud`` is a flexible datasource that can be used in -multiple different ways. With NoCloud, one can provide configurations to -the instance without running a network service (or even without having a -network at all). Alternatively, one can use HTTP/HTTPS or FTP/FTPS to provide -a configuration. +multiple different ways. -Configuration Methods: +With NoCloud, one can provide configuration to the instance locally (without +network access) or alternatively NoCloud can fetch the configuration from a +remote server. + +Much of the following documentation describes how to tell cloud-init where +to get its configuration. + +Runtime configurations ====================== -.. warning:: - User data placed under ``/etc/cloud/`` will **not** be recognized as a - source of configuration data by the NoCloud datasource. While it may - be acted upon by cloud-init, using - :ref:`DataSourceNone` should be preferred. +Cloud-init discovers four types of configuration at runtime. The source of +these configuration types is configurable with a discovery configuration. This +discovery configuration can be delivered to cloud-init in different ways, but +is different from the configurations that cloud-init uses to configure the +instance at runtime. + +user data +--------- + +User data is a :ref:`configuration format` that allows a +user to configure an instance. + +metadata +-------- + +The ``meta-data`` file is a YAML-formatted file. + +vendor data +----------- + +Vendor data may be used to provide default cloud-specific configurations which +may be overriden by user data. This may be useful, for example, to configure an +instance with a cloud provider's repository mirror for faster package +installation. + +network config +-------------- + +Network configuration typically comes from the cloud provider to set +cloud-specific network configurations, or a reasonable default is set by +cloud-init (typically cloud-init brings up an interface using DHCP). -Method 1: Labeled filesystem +Since NoCloud is a generic datasource, network configuration may be set the +same way as user data, metadata, vendor data. + +See the :ref:`network configuration` documentation for +information on network configuration formats. + +Discovery configuration +======================= + +The purpose of the discovery configuration is to tell cloud-init where it can +find the runtime configurations described above. + +There are two methods for cloud-init to receive a discovery configuration. + +Method 1: Line configuration ---------------------------- -A labeled `vfat`_ or `iso9660` filesystem may be used. The filesystem volume -must be labelled ``CIDATA``. +The "line configuration" is a single string of text which is passed to an +instance at boot time via either the kernel command line or in the serial +number exposed via DMI (sometimes called SMBIOS). +Example: :: -Method 2: Custom webserver --------------------------- + ds=nocloud;s=https://10.42.42.42/configs/ -Configuration files can be provided to cloud-init over HTTP(s). To tell -cloud-init the URI to use, arguments must be passed to the instance via the -kernel command line or SMBIOS serial number. This argument might look like: :: +In the above line configuration, ``ds=nocloud`` tells cloud-init to use the +NoCloud datasource, and ``s=https://10.42.42.42/configs/`` tells cloud-init to +fetch configurations using ``https`` from the URI +``https://10.42.42.42/configs/``. - ds=nocloud;s=https://10.42.42.42/cloud-init/configs/ +We will describe the possible values in a line configuration in the following +sections. See :ref:`this section` for more details on line +configuration. .. note:: + If using kernel command line arguments with GRUB, note that an unescaped semicolon is intepreted as the end of a statement. - Consider using single-quotes to avoid this pitfall. See: `GRUB quoting`_ - ds=nocloud;s=http://10.42.42.42/cloud-init/configs/ + See: `GRUB quoting`_ + +Method 2: System configuration +------------------------------ + +System configurations are YAML-formatted files and have names that end in +``.cfg``. These are located under :file:`/etc/cloud/cloud.cfg.d/`. + +Example: + +.. code-block:: yaml + + datasource: + NoCloud: + seedfrom: https://10.42.42.42/configs/ + +The above system configuration tells cloud-init that it is using NoCloud and +that it can find configurations at ``https://10.42.42.42/configs/``. + +The scope of this section is limited to its use for selecting the source of +its configuration, however it is worth mentioning that the system configuration +provides more than just the discovery configuration. + +In addition to defining where cloud-init can find runtime configurations, the +system configuration also controls many of cloud-init's default behaviors. +Most users shouldn't need to modify these defaults, however it is worth noting +that downstream distributions often use them to set reasonable default +behaviors for cloud-init. This includes things such as which distro to behave +as and which networking backend to use. + +The default values in :file:`/etc/cloud/cloud.cfg` may be overriden by drop-in +files which are stored in :file:`/etc/cloud/cloud.cfg.d`. + +Configuration sources +===================== -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +User-data, metadata, network config, and vendor data may be sourced from one +of several possible locations, either locally or remotely. + +Source 1: Local filesystem +-------------------------- + +System configuration may provide cloud-init runtime configuration directly + +.. code-block:: yaml + + datasource: + NoCloud: + meta-data: | + instance-id: l-eadfbe + user-data: | + #cloud-config + runcmd: [ echo "it worked!" > /tmp/example.txt ] + +Local filesystem: custom location +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Cloud-init makes it possible to find system configuration in a custom +filesystem path for those that require more flexibility. This may be +done with a line configuration: :: + + + ds=nocloud;s=file://path/to/directory/ + +Or a system configuration: + +.. code-block:: yaml + + datasource: + NoCloud: + seedfrom: file://path/to/directory + +Source 2: Drive with labeled filesystem +--------------------------------------- + +A labeled `vfat`_ or `iso9660` filesystem may be used. The filesystem volume +must be labelled ``CIDATA``. The :ref:`configuration files` must +be in the root directory of the filesystem. + +Source 3: Custom webserver +-------------------------- + +Configuration files can be provided to cloud-init over HTTP(S) using a +line configuration: :: + + ds=nocloud;s=https://10.42.42.42/cloud-init/configs/ + +or using system configuration: + +.. code-block:: yaml datasource: NoCloud: seedfrom: https://10.42.42.42/cloud-init/configs/ -Method 3: FTP Server +Source 4: FTP Server -------------------- Configuration files can be provided to cloud-init over unsecured FTP -or alternatively with FTP over TLS. To tell cloud-init the URL to use, -arguments must be passed to the instance via the kernel command line or SMBIOS -serial number. This argument might look like: :: +or alternatively with FTP over TLS using a line configuration :: ds=nocloud;s=ftps://10.42.42.42/cloud-init/configs/ -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +or using system configuration + +.. code-block:: yaml datasource: NoCloud: seedfrom: ftps://10.42.42.42/cloud-init/configs/ -Method 4: Local filesystem --------------------------- +.. _source_files: -Configuration files can be provided on the local filesystem at specific -filesystem paths using kernel command line arguments or SMBIOS serial number to -tell cloud-init where on the filesystem to look. +Source files +------------ -.. note:: - Unless arbitrary filesystem paths are required, one might prefer to use - :ref:`DataSourceNone`, since it does not require - modifying the kernel command line or SMBIOS. +The base path pointed to by the URI in the above sources provides content +using the following final path components: -This argument might look like: :: +* ``user-data`` +* ``meta-data`` +* ``vendor-data`` +* ``network-config`` - ds=nocloud;s=file://path/to/directory/ +For example, if the ``seedfrom`` value of ``seedfrom`` is +``https://10.42.42.42/``, then the following files will be fetched from the +webserver at first boot: -Alternatively, this URI may be defined in a configuration in a file -:file:`/etc/cloud/cloud.cfg.d/*.cfg` like this: :: +.. code-block:: sh - datasource: - NoCloud: - seedfrom: file://10.42.42.42/cloud-init/configs/ + https://10.42.42.42/user-data + https://10.42.42.42/vendor-data + https://10.42.42.42/meta-data + https://10.42.42.42/network-config + +If the required files don't exist, this datasource will be skipped. + +.. _line_config_detail: +Line configuration in detail +============================ -Permitted keys -============== +The line configuration has several options. -Currently three keys (and their aliases) are permitted for configuring -cloud-init. +Permitted keys (DMI and kernel command line) +-------------------------------------------- -The only required key is: +Currently three keys (and their aliases) are permitted in cloud-init's kernel +command line and DMI (sometimes called SMBIOS) serial number. -* ``seedfrom`` alias: ``s`` +There is only one required key in a line configuration: + +* ``seedfrom`` (alternatively ``s``) A valid ``seedfrom`` value consists of a URI which must contain a trailing ``/``. @@ -104,15 +246,11 @@ A valid ``seedfrom`` value consists of a URI which must contain a trailing Some optional keys may be used, but their use is discouraged and may be removed in the future. -* ``local-hostname`` alias: ``h`` (:ref:`cloud-config` - preferred) -* ``instance-id`` alias: ``i`` (set instance id in :file:`meta-data` instead) -.. note:: +* ``local-hostname`` (alternatively ``h``) +* ``instance-id`` (alternatively ``i``) - The aliases ``s`` , ``h`` and ``i`` are only supported by kernel - command line or SMBIOS. When configured in a ``*.cfg`` file, the long key - name is required. +Both of these can be set in :file:`meta-data` instead. Seedfrom: HTTP and HTTPS ------------------------ @@ -138,26 +276,37 @@ Where ``scheme`` can be ``ftp`` or ``ftps``, ``userinfo`` will be ``host`` can be an IP address or DNS name, and ``port`` is which network port to use (default is ``21``). -Seedfrom: Files ---------------- +Discovery configuration considerations +====================================== + +Above, we describe the two methods of providing discovery configuration (system +configuration and line configuration). Two methods exist because there are +advantages and disadvantages to each option, neither is clearly a better +choice - so it is left to the user to decide. + +Line configuration +------------------ + +**Advantages** -The path pointed to by the URI can contain the following -files: +* it may be possible to set kernel command line and DMI variables at boot time + without modifying the base image -``user-data`` (required) -``meta-data`` (required) -``vendor-data`` (optional) -``network-config`` (optional) +**Disadvantages** -If the seedfrom URI doesn't contain the required files, this datasource -will be skipped. +* requires control and modification of the hypervisor or the bootloader +* DMI / SMBIOS is architecture specific -The ``user-data`` file uses :ref:`user data format`. The -``meta-data`` file is a YAML-formatted file. +System configuration +-------------------- + +**Advantages** + +* simple: requires only modifying a file -The ``vendor-data`` file adheres to -:ref:`user data formats`. The ``network-config`` file -follows cloud-init's :ref:`Network Configuration Formats`. +**Disadvantages** + +* requires modifying the filesystem prior to booting an instance DMI-specific kernel command line ================================ @@ -189,7 +338,7 @@ wanted. - ``dmi.system-uuid`` - ``dmi.system-version`` -For example, you can pass this option to QEMU: :: +For example, you can pass this line configuration to QEMU: :: -smbios type=1,serial=ds=nocloud;s=http://10.10.0.1:8000/__dmi.chassis-serial-number__/ @@ -268,14 +417,10 @@ sufficient disk by following the following example. user data you will also have to change the ``instance-id``, or start the disk fresh. -Also, you can inject an :file:`/etc/network/interfaces` file by providing the -content for that file in the ``network-interfaces`` field of -:file:`meta-data`. - Example ``meta-data`` --------------------- -:: +.. code-block:: yaml instance-id: iid-abcdefg network-interfaces: | @@ -288,17 +433,14 @@ Example ``meta-data`` hostname: myhost +``network-config`` +------------------ + Network configuration can also be provided to ``cloud-init`` in either :ref:`network_config_v1` or :ref:`network_config_v2` by providing that -YAML formatted data in a file named :file:`network-config`. If found, -this file will override a :file:`network-interfaces` file. +YAML formatted data in a file named :file:`network-config`. -See an example below. Note specifically that this file does not -have a top level ``network`` key as it is already assumed to -be network configuration based on the filename. - -Example config --------------- +Example network v1: .. code-block:: yaml @@ -314,6 +456,8 @@ Example config gateway: 192.168.1.254 +Example network v2: + .. code-block:: yaml version: 2 From 914a3a8a4a3c708548906fbd9674d2500e776e4b Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Fri, 26 Jul 2024 21:14:06 +0200 Subject: [PATCH 050/131] doc: improve drop-in custom modules (#5548) Add group of pages for drop-in custom modules and restructure existing docs under it. Add doc for custom datasources and config modules. SC-1836 Fixes GH-4649 --- doc/rtd/development/datasource_creation.rst | 2 + doc/rtd/development/module_creation.rst | 11 ++++ doc/rtd/explanation/format.rst | 2 + doc/rtd/reference/base_config_reference.rst | 4 ++ doc/rtd/reference/cli.rst | 26 +------- doc/rtd/reference/custom_modules.rst | 24 ++++++++ .../custom_modules/custom_clean_scripts.rst | 25 ++++++++ .../custom_configuration_module.rst | 23 +++++++ .../custom_modules/custom_datasource.rst | 19 ++++++ .../custom_modules/custom_mergers.rst | 60 +++++++++++++++++++ doc/rtd/reference/index.rst | 1 + doc/rtd/reference/merging.rst | 60 +------------------ 12 files changed, 175 insertions(+), 82 deletions(-) create mode 100644 doc/rtd/reference/custom_modules.rst create mode 100644 doc/rtd/reference/custom_modules/custom_clean_scripts.rst create mode 100644 doc/rtd/reference/custom_modules/custom_configuration_module.rst create mode 100644 doc/rtd/reference/custom_modules/custom_datasource.rst create mode 100644 doc/rtd/reference/custom_modules/custom_mergers.rst diff --git a/doc/rtd/development/datasource_creation.rst b/doc/rtd/development/datasource_creation.rst index 98f9f88419a..1b6e525b122 100644 --- a/doc/rtd/development/datasource_creation.rst +++ b/doc/rtd/development/datasource_creation.rst @@ -170,6 +170,8 @@ Datasources included in upstream cloud-init benefit from ongoing maintenance, compatibility with the rest of the codebase, and security fixes by the upstream development team. +If this is not possible, one can add +:ref:`custom out-of-tree datasources` to cloud-init. .. _make-mime: https://cloudinit.readthedocs.io/en/latest/explanation/instancedata.html#storage-locations .. _DMI: https://www.dmtf.org/sites/default/files/standards/documents/DSP0005.pdf diff --git a/doc/rtd/development/module_creation.rst b/doc/rtd/development/module_creation.rst index 32240ab3e91..3e10a1ee00b 100644 --- a/doc/rtd/development/module_creation.rst +++ b/doc/rtd/development/module_creation.rst @@ -163,6 +163,17 @@ in the correct location based on dependencies. If your module has no particular dependencies or is not necessary for a later boot stage, it should be placed in the ``cloud_final_modules`` section before the ``final-message`` module. +Benefits of including your config module in upstream cloud-init +=============================================================== + +Config modules included in upstream cloud-init benefit from ongoing +maintenance, +compatibility with the rest of the codebase, and security fixes by the upstream +development team. + +If this is not possible, one can add +:ref:`custom out-of-tree config modules` +to cloud-init. .. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58 .. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35 diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index b154dd66036..8f14ccdb6c5 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -202,6 +202,8 @@ Example of once-per-instance script fi sudo echo $INSTANCE_ID > $PERSIST_ID +.. _user_data_formats-part_handler: + Part-handler ============ diff --git a/doc/rtd/reference/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst index 9686d456d11..82484118553 100644 --- a/doc/rtd/reference/base_config_reference.rst +++ b/doc/rtd/reference/base_config_reference.rst @@ -28,6 +28,8 @@ distribution supported by ``cloud-init``. Base configuration keys ======================= +.. _base_config_module_keys: + Module keys ----------- @@ -221,6 +223,8 @@ Other keys The :ref:`network configuration` to be applied to this instance. +.. _base_config_datasource_pkg_list: + ``datasource_pkg_list`` ^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index 9c0bbe9c3ee..eb800b22a75 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -83,30 +83,8 @@ re-run all stages as it did on first boot. .. note:: - Cloud-init provides the directory :file:`/etc/cloud/clean.d/` for third party - applications which need additional configuration artifact cleanup from - the filesystem when the `clean` command is invoked. - - The :command:`clean` operation is typically performed by image creators - when preparing a golden image for clone and redeployment. The clean command - removes any cloud-init semaphores, allowing cloud-init to treat the next - boot of this image as the "first boot". When the image is next booted - cloud-init will performing all initial configuration based on any valid - datasource meta-data and user-data. - - Any executable scripts in this subdirectory will be invoked in lexicographical - order with run-parts when running the :command:`clean` command. - - Typical format of such scripts would be a ##- like the following: - :file:`/etc/cloud/clean.d/99-live-installer` - - An example of a script is: - - .. code-block:: bash - - sudo rm -rf /var/lib/installer_imgs/ - sudo rm -rf /var/log/installer/ - + The operations performed by `clean` can be supplemented / customized. See: + :ref:`custom_clean_scripts`. .. _cli_collect_logs: diff --git a/doc/rtd/reference/custom_modules.rst b/doc/rtd/reference/custom_modules.rst new file mode 100644 index 00000000000..3145e723bd7 --- /dev/null +++ b/doc/rtd/reference/custom_modules.rst @@ -0,0 +1,24 @@ +Custom Modules +************** + +This includes reference documentation on how to extend cloud-init with +custom / out-of-tree functionality. + +.. _custom_formats: + +Custom Formats +============== + +One can define custom data formats by presenting a +:ref:`#part-handler` +config via user-data or vendor-data. + +----- + +.. toctree:: + :maxdepth: 1 + + custom_modules/custom_clean_scripts.rst + custom_modules/custom_configuration_module.rst + custom_modules/custom_datasource.rst + custom_modules/custom_mergers.rst diff --git a/doc/rtd/reference/custom_modules/custom_clean_scripts.rst b/doc/rtd/reference/custom_modules/custom_clean_scripts.rst new file mode 100644 index 00000000000..955668fb266 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_clean_scripts.rst @@ -0,0 +1,25 @@ +.. _custom_clean_scripts: + +Custom Clean Scripts +******************** + +Cloud-init provides the directory :file:`/etc/cloud/clean.d/` for third party +applications which need additional configuration artifact cleanup from +the filesystem when the :ref:`cloud-init clean` command is invoked. + +The :command:`clean` operation is typically performed by image creators +when preparing a golden image for clone and redeployment. The clean command +removes any cloud-init internal state, allowing cloud-init to treat the next +boot of this image as the "first boot". +Any executable scripts in this subdirectory will be invoked in lexicographical +order when running the :command:`clean` command. + +Example +======= + +.. code-block:: bash + + $ cat /etc/cloud/clean.d/99-live-installer + #!/bin/sh + sudo rm -rf /var/lib/installer_imgs/ + sudo rm -rf /var/log/installer/ diff --git a/doc/rtd/reference/custom_modules/custom_configuration_module.rst b/doc/rtd/reference/custom_modules/custom_configuration_module.rst new file mode 100644 index 00000000000..a26adf26eb4 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_configuration_module.rst @@ -0,0 +1,23 @@ +.. _custom_configuration_module: + +Custom Configuration Module +*************************** + +Custom 3rd-party out-of-tree configuration modules can be added to cloud-init +by: + +#. :ref:`Implement a config module` in a Python file with its + name starting with ``cc_``. + +#. Place the file where the rest of config modules are located. + On Ubuntu this path is typically: + `/usr/lib/python3/dist-packages/cloudinit/config/`. + +#. Extend the :ref:`base-configuration's ` + ``cloud_init_modules``, ``cloud_config_modules`` or ``cloud_final_modules`` + to let the config module run on one of those stages. + +.. warning :: + The config jsonschema validation functionality is going to complain about + unknown config keys introduced by custom modules and there is not an easy + way for custom modules to define their keys schema-wise. diff --git a/doc/rtd/reference/custom_modules/custom_datasource.rst b/doc/rtd/reference/custom_modules/custom_datasource.rst new file mode 100644 index 00000000000..2d5aa6c8463 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_datasource.rst @@ -0,0 +1,19 @@ +.. _custom_datasource: + +Custom DataSource +***************** + +Custom 3rd-party out-of-tree DataSources can be added to cloud-init by: + +#. :ref:`Implement a DataSource` in a Python file. + +#. Place that file in as a single Python module or package in folder included + in ``$PYTHONPATH``. + +#. Extend the base configuration's + :ref:`datasource_pkg_list` to include the + Python package where the DataSource is located. + +#. Extend the :ref:`base-configuration`'s + :ref:`datasource_list` to include the name of + the custom DataSource. diff --git a/doc/rtd/reference/custom_modules/custom_mergers.rst b/doc/rtd/reference/custom_modules/custom_mergers.rst new file mode 100644 index 00000000000..b1af2c1d9f6 --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_mergers.rst @@ -0,0 +1,60 @@ +.. _custom_mergers: + +Custom Mergers +************** + +It is possible for users to inject their own :ref:`merging` +files to handle specific types of merging as they choose (the +basic ones included will handle lists, dicts, and strings). + +A `merge class` is a class definition providing functions that can be used +to merge a given type with another given type. + +An example of one of these `merging classes` is the following: + +.. code-block:: python + + class Merger: + def __init__(self, merger, opts): + self._merger = merger + self._overwrite = 'overwrite' in opts + + # This merging algorithm will attempt to merge with + # another dictionary, on encountering any other type of object + # it will not merge with said object, but will instead return + # the original value + # + # On encountering a dictionary, it will create a new dictionary + # composed of the original and the one to merge with, if 'overwrite' + # is enabled then keys that exist in the original will be overwritten + # by keys in the one to merge with (and associated values). Otherwise + # if not in overwrite mode the 2 conflicting keys themselves will + # be merged. + def _on_dict(self, value, merge_with): + if not isinstance(merge_with, (dict)): + return value + merged = dict(value) + for (k, v) in merge_with.items(): + if k in merged: + if not self._overwrite: + merged[k] = self._merger.merge(merged[k], v) + else: + merged[k] = v + else: + merged[k] = v + return merged + +There is an ``_on_dict`` method here that will be given a +source value, and a value to merge with. The result will be the merged object. + +This code itself is called by another merging class which "directs" the +merging to happen by analysing the object types to merge, and attempting to +find a known object that will merge that type. An example of this can be found +in the :file:`mergers/__init__.py` file (see ``LookupMerger`` and +``UnknownMerger``). + +Note how each +merge can have options associated with it, which affect how the merging is +performed. For example, a dictionary merger can be told to overwrite instead +of attempting to merge, or a string merger can be told to append strings +instead of discarding other strings to merge with. diff --git a/doc/rtd/reference/index.rst b/doc/rtd/reference/index.rst index 14e754b295f..d1791fa9631 100644 --- a/doc/rtd/reference/index.rst +++ b/doc/rtd/reference/index.rst @@ -25,3 +25,4 @@ matrices and so on. ubuntu_stable_release_updates.rst breaking_changes.rst user_files.rst + custom_modules.rst diff --git a/doc/rtd/reference/merging.rst b/doc/rtd/reference/merging.rst index 7f1fc022f17..097892e2536 100644 --- a/doc/rtd/reference/merging.rst +++ b/doc/rtd/reference/merging.rst @@ -94,64 +94,8 @@ merging is done on other types. Customisation ============= -Because the above merging algorithm may not always be desired (just as the -previous merging algorithm was not always the preferred one), the concept of -customised merging was introduced through `merge classes`. - -A `merge class` is a class definition providing functions that can be used -to merge a given type with another given type. - -An example of one of these `merging classes` is the following: - -.. code-block:: python - - class Merger: - def __init__(self, merger, opts): - self._merger = merger - self._overwrite = 'overwrite' in opts - - # This merging algorithm will attempt to merge with - # another dictionary, on encountering any other type of object - # it will not merge with said object, but will instead return - # the original value - # - # On encountering a dictionary, it will create a new dictionary - # composed of the original and the one to merge with, if 'overwrite' - # is enabled then keys that exist in the original will be overwritten - # by keys in the one to merge with (and associated values). Otherwise - # if not in overwrite mode the 2 conflicting keys themselves will - # be merged. - def _on_dict(self, value, merge_with): - if not isinstance(merge_with, (dict)): - return value - merged = dict(value) - for (k, v) in merge_with.items(): - if k in merged: - if not self._overwrite: - merged[k] = self._merger.merge(merged[k], v) - else: - merged[k] = v - else: - merged[k] = v - return merged - -As you can see, there is an ``_on_dict`` method here that will be given a -source value, and a value to merge with. The result will be the merged object. - -This code itself is called by another merging class which "directs" the -merging to happen by analysing the object types to merge, and attempting to -find a known object that will merge that type. An example of this can be found -in the :file:`mergers/__init__.py` file (see ``LookupMerger`` and -``UnknownMerger``). - -So, following the typical ``cloud-init`` approach of allowing source code to -be downloaded and used dynamically, it is possible for users to inject their -own merging files to handle specific types of merging as they choose (the -basic ones included will handle lists, dicts, and strings). Note how each -merge can have options associated with it, which affect how the merging is -performed. For example, a dictionary merger can be told to overwrite instead -of attempting to merge, or a string merger can be told to append strings -instead of discarding other strings to merge with. +Custom 3rd party mergers can be defined, for more info visit +:ref:`custom_mergers`. How to activate =============== From 15200a002427e28a2c1af088143865392dd1ac71 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 26 Jul 2024 14:00:50 -0600 Subject: [PATCH 051/131] chore: Deprecate ENI as an input configuration format (#5561) --- cloudinit/sources/DataSourceConfigDrive.py | 8 ++++++++ cloudinit/sources/DataSourceNoCloud.py | 7 +++++++ .../reference/network-config-format-eni.rst | 20 ------------------- doc/rtd/reference/network-config.rst | 8 -------- 4 files changed, 15 insertions(+), 28 deletions(-) delete mode 100644 doc/rtd/reference/network-config-format-eni.rst diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 3f82c89ea42..d5db34cd1d7 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -176,6 +176,14 @@ def network_config(self): elif self.network_eni is not None: self._network_config = eni.convert_eni_data(self.network_eni) LOG.debug("network config provided via converted eni data") + util.deprecate( + deprecated="Eni network configuration in ConfigDrive", + deprecated_version="24.3", + extra_message=( + "You can use openstack's network " + "configuration format instead" + ), + ) else: LOG.debug("no network configuration available") return self._network_config diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 69ebab91479..291da950cac 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -271,6 +271,13 @@ def check_instance_id(self, sys_cfg): def network_config(self): if self._network_config is None: if self._network_eni is not None: + util.deprecate( + deprecated="Eni network configuration in NoCloud", + deprecated_version="24.3", + extra_message=( + "You can use network v1 or network v2 instead" + ), + ) self._network_config = eni.convert_eni_data(self._network_eni) return self._network_config diff --git a/doc/rtd/reference/network-config-format-eni.rst b/doc/rtd/reference/network-config-format-eni.rst deleted file mode 100644 index be7bbeb29ec..00000000000 --- a/doc/rtd/reference/network-config-format-eni.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _network_config_eni: - -Network configuration ENI (legacy) -********************************** - -``Cloud-init`` supports reading and writing network config in the ``ENI`` -format which is consumed by the ``ifupdown`` tool to parse and apply network -configuration. - -As an input format this is **legacy**. In cases where ENI format is available -and another format is also available, ``cloud-init`` will prefer to use the -other, newer format. - -This can happen in either :ref:`datasource_nocloud` or -:ref:`datasource_openstack` datasources. - -Please reference existing `documentation`_ for the -:file:`/etc/network/interfaces(5)` format. - -.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst index 2e95550e61a..61a12167d74 100644 --- a/doc/rtd/reference/network-config.rst +++ b/doc/rtd/reference/network-config.rst @@ -126,7 +126,6 @@ The following datasources optionally provide network configuration: - :ref:`datasource_config_drive` - `OpenStack Metadata Service Network`_ - - :ref:`network_config_eni` - :ref:`datasource_digital_ocean` @@ -140,15 +139,9 @@ The following datasources optionally provide network configuration: - :ref:`network_config_v1` - :ref:`network_config_v2` - - :ref:`network_config_eni` - -- :ref:`datasource_opennebula` - - - :ref:`network_config_eni` - :ref:`datasource_openstack` - - :ref:`network_config_eni` - `OpenStack Metadata Service Network`_ - :ref:`datasource_smartos` @@ -168,7 +161,6 @@ For more information on network configuration formats: .. toctree:: :maxdepth: 1 - network-config-format-eni.rst network-config-format-v1.rst network-config-format-v2.rst From f9ab856f40a99e5d2696ec47ca2049ccaebb3b4c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 29 Jul 2024 15:35:52 -0400 Subject: [PATCH 052/131] docs: Overhaul user data formats documentation (#5551) Fixes GH-4739 --- doc/examples/part-handler.txt | 68 ++- doc/rtd/explanation/boot.rst | 2 +- doc/rtd/explanation/format.rst | 425 ++++++++++++------ doc/rtd/explanation/instancedata.rst | 5 +- doc/rtd/explanation/vendordata.rst | 14 +- doc/rtd/reference/base_config_reference.rst | 8 + doc/rtd/reference/custom_modules.rst | 8 +- .../custom_modules/custom_part_handlers.rst | 32 ++ doc/rtd/spelling_word_list.txt | 2 + 9 files changed, 379 insertions(+), 185 deletions(-) create mode 100644 doc/rtd/reference/custom_modules/custom_part_handlers.rst diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt index 7cc356f6346..020c8302f80 100644 --- a/doc/examples/part-handler.txt +++ b/doc/examples/part-handler.txt @@ -1,22 +1,58 @@ #part-handler +"""This is a trivial example part-handler that creates a file with the path +specified in the payload. It performs no input checking or error handling. + +To use it, first save the file you are currently viewing into your current +working directory. Then run the following: +``` +$ echo '/var/tmp/my_path' > part +$ cloud-init devel make-mime -a part-handler.py:part-handler -a part:x-my-path --force > user-data +``` + +This will create a mime file with the contents of 'part' and the +part-handler. You can now pass 'user-data' to your cloud of choice. + +When run, cloud-init will have created an empty file at /var/tmp/my_path. +""" + +import pathlib +from typing import Any + +from cloudinit.cloud import Cloud + + def list_types(): - # return a list of mime-types that are handled by this module - return(["text/plain", "text/go-cubs-go"]) - -def handle_part(data, ctype, filename, payload): - # data: the cloudinit object - # ctype: '__begin__', '__end__', or the specific mime-type of the part - # filename: the filename for the part, or dynamically generated part if - # no filename is given attribute is present - # payload: the content of the part (empty for begin or end) + """Return a list of mime-types that are handled by this module.""" + return ["text/x-my-path"] + + +def handle_part(data: Cloud, ctype: str, filename: str, payload: Any): + """Handle a part with the given mime-type. + + This function will get called multiple times. The first time is + to allow any initial setup needed to handle parts. It will then get + called once for each part matching the mime-type returned by `list_types`. + Finally, it will get called one last time to allow for any final + teardown. + + :data: A `Cloud` instance. This will be the same instance for each call + to handle_part. + :ctype: '__begin__', '__end__', or the mime-type + (for this example 'text/x-my-path') of the part + :filename: The filename for the part as defined in the MIME archive, + or dynamically generated part if no filename is given + :payload: The content of the part. This will be + `None` when `ctype` is '__begin__' or '__end__'. + """ if ctype == "__begin__": - print("my handler is beginning") - return + # Any custom setup needed before handling payloads + return + if ctype == "__end__": - print("my handler is ending") - return + # Any custom teardown needed after handling payloads can happen here + return - print(f"==== received ctype={ctype} filename={filename} ====") - print(payload) - print(f"==== end ctype={ctype} filename={filename}") + # If we've made it here, we're dealing with a real payload, so handle + # it appropriately + pathlib.Path(payload.strip()).touch() diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index b1421a209a5..a975ca7a093 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -137,7 +137,7 @@ mounted, including ones that have stale (previous instance) references in :file:`/etc/fstab`. As such, entries in :file:`/etc/fstab` other than those necessary for cloud-init to run should not be done until after this stage. -A part-handler and :ref:`boothooks` +A part-handler and :ref:`boothooks` will run at this stage. After this stage completes, expect to be able to access the system via serial diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index 8f14ccdb6c5..bed2b61af11 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -3,18 +3,53 @@ User data formats ***************** -User data is opaque configuration data provided by a platform to an instance at -launch configure the instance. User data can be one of the following types. +User data is configuration data provided by a user of a cloud platform to an +instance at launch. User data can be passed to cloud-init in any of many +formats documented here. + +Configuration types +=================== + +User data formats can be categorized into those that directly configure the +instance, and those that serve as a container, template, or means to obtain +or modify another configuration. + +Formats that directly configure the instance: + +- `Cloud config data`_ +- `User data script`_ +- `Cloud boothook`_ + +Formats that deal with other user data formats: + +- `Include file`_ +- `Jinja template`_ +- `MIME multi-part archive`_ +- `Cloud config archive`_ +- `Part handler`_ +- `Gzip compressed content`_ .. _user_data_formats-cloud_config: Cloud config data ================= -Cloud-config is the preferred user data format. The cloud config format is a -declarative syntax which uses `YAML version 1.1`_ with keys which describe -desired instance state. Cloud-config can be used to define how an instance -should be configured in a human-friendly format. +Example +------- + +.. code-block:: yaml + + #cloud-config + password: password + chpasswd: + expire: False + +Explanation +----------- + +Cloud-config can be used to define how an instance should be configured +in a human-friendly format. The cloud config format uses `YAML`_ with +keys which describe desired instance state. These things may include: @@ -24,93 +59,190 @@ These things may include: - importing certain SSH keys or host keys - *and many more...* -See the :ref:`yaml_examples` section for a commented set of examples of -supported cloud config formats. - -Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when -using a MIME archive. +Many modules are available to process cloud-config data. These modules +may run once per instance, every boot, or once ever. See the associated +module to determine the run frequency. -.. note:: - Cloud config data can also render cloud instance metadata variables using - :ref:`jinja templates `. +For more information, see the cloud config +:ref:`example configurations ` or the cloud config +:ref:`modules reference`. .. _user_data_script: User data script ================ -Typically used by those who just want to execute a shell script. +Example +------- + +.. code-block:: shell + + #!/bin/sh + echo "Hello World" > /var/tmp/output.txt + +Explanation +----------- -Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME -archive. +A user data script is a single script to be executed once per instance. +User data scripts are run relatively late in the boot process, during +cloud-init's :ref:`final stage` as part of the +:ref:`cc_scripts_user` module. When run, +the environment variable ``INSTANCE_ID`` is set to the current instance ID +for use within the script. -User data scripts can optionally render cloud instance metadata variables using -:ref:`jinja templates `. +.. _user_data_formats-cloud_boothook: -Example script +Cloud boothook +============== + +Simple Example -------------- -Create a script file :file:`myscript.sh` that contains the following: +.. code-block:: shell -.. code-block:: + #cloud-boothook + #!/bin/sh + echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts + +Example of once-per-instance script +----------------------------------- +.. code-block:: bash + + #cloud-boothook #!/bin/sh - echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt -Now run: + PERSIST_ID=/var/lib/cloud/first-instance-id + _id="" + if [ -r $PERSIST_ID ]; then + _id=$(cat /var/lib/cloud/first-instance-id) + fi -.. code-block:: shell-session + if [ -z $_id ] || [ $INSTANCE_ID != $_id ]; then + echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts + fi + sudo echo $INSTANCE_ID > $PERSIST_ID - $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 +Explanation +----------- -Kernel command line -=================== +A cloud boothook is similar to a :ref:`user data script` +in that it is a script run on boot. When run, +the environment variable ``INSTANCE_ID`` is set to the current instance ID +for use within the script. -When using the NoCloud datasource, users can pass user data via the kernel -command line parameters. See the :ref:`NoCloud datasource` -and :ref:`explanation/kernel-command-line:Kernel command line` documentation -for more details. +The boothook is different in that: -Gzip compressed content -======================= +* It is run very early in boot, during the :ref:`network` stage, + before any cloud-init modules are run. +* It is run on every boot -Content found to be gzip compressed will be uncompressed. -The uncompressed data will then be used as if it were not compressed. -This is typically useful because user data is limited to ~16384 [#]_ bytes. +Include file +============ + +Example +------- + +.. code-block:: text + + #include + https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-run-cmds.txt + https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-boot-cmds.txt + +Explanation +----------- + +An include file contains a list of URLs, one per line. Each of the URLs will +be read and their content can be any kind of user data format, both base +config and meta config. If an error occurs reading a file the remaining files +will not be read. + +Jinja template +============== + +Example cloud-config +-------------------- + +.. code-block:: yaml + + ## template: jinja + #cloud-config + runcmd: + - echo 'Running on {{ v1.cloud_name }}' > /var/tmp/cloud_name + +Example user data script +------------------------ + +.. code-block:: shell + + ## template: jinja + #!/bin/sh + echo 'Current instance id: {{ v1.instance_id }}' > /var/tmp/instance_id + +Explanation +----------- + +`Jinja templating `_ may be used for +cloud-config and user data scripts. Any +:ref:`instance-data variables` may be used +as jinja template variables. Any jinja templated configuration must contain +the original header along with the new jinja header above it. + +.. note:: + Use of Jinja templates is ONLY supported for cloud-config and user data + scripts. Jinja templates are not supported for cloud-boothooks or + meta configs. + +.. _user_data_formats-mime_archive: MIME multi-part archive ======================= -This list of rules is applied to each part of this multi-part file. +Example +------- + +.. code-block:: + + Content-Type: multipart/mixed; boundary="===============2389165605550749110==" + MIME-Version: 1.0 + Number-Attachments: 2 + + --===============2389165605550749110== + Content-Type: text/cloud-boothook; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Content-Disposition: attachment; filename="part-001" + + #!/bin/sh + echo "this is from a boothook." > /var/tmp/boothook.txt + + --===============2389165605550749110== + Content-Type: text/cloud-config; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Content-Disposition: attachment; filename="part-002" + + bootcmd: + - echo "this is from a cloud-config." > /var/tmp/bootcmd.txt + --===============2389165605550749110==-- + +Explanation +----------- + Using a MIME multi-part file, the user can specify more than one type of data. For example, both a user data script and a cloud-config type could be specified. -Supported content-types are listed from the ``cloud-init`` subcommand +Each part must specify a valid +:ref:`content types`. Supported content-types +may also be listed from the ``cloud-init`` subcommand :command:`make-mime`: .. code-block:: shell-session $ cloud-init devel make-mime --list-types -Example output: - -.. code-block:: - - cloud-boothook - cloud-config - cloud-config-archive - cloud-config-jsonp - jinja2 - part-handler - x-include-once-url - x-include-url - x-shellscript - x-shellscript-per-boot - x-shellscript-per-instance - x-shellscript-per-once - Helper subcommand to generate MIME messages ------------------------------------------- @@ -121,8 +253,7 @@ The :command:`make-mime` subcommand takes pairs of (filename, "text/" mime subtype) separated by a colon (e.g., ``config.yaml:cloud-config``) and emits a MIME multipart message to :file:`stdout`. -Examples --------- +**MIME subcommand Examples** Create user data containing both a cloud-config (:file:`config.yaml`) and a shell script (:file:`script.sh`) @@ -141,102 +272,56 @@ Create user data containing 3 shell scripts: $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once -``include`` file -================ - -This content is an :file:`include` file. - -The file contains a list of URLs, one per line. Each of the URLs will be read -and their content will be passed through this same set of rules, i.e., the -content read from the URL can be gzipped, MIME multi-part, or plain text. If -an error occurs reading a file the remaining files will not be read. -Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using -a MIME archive. +Cloud config archive +==================== -``cloud-boothook`` -================== +Example +------- -One line ``#cloud-boothook`` header and then executable payload. +.. code-block:: shell -This is run very early on the boot process, during the -:ref:`Network boot stage`, even before ``cc_bootcmd``. + #cloud-config-archive + - type: "text/cloud-boothook" + content: | + #!/bin/sh + echo "this is from a boothook." > /var/tmp/boothook.txt + - type: "text/cloud-config" + content: | + bootcmd: + - echo "this is from a cloud-config." > /var/tmp/bootcmd.txt -This can be used when something has to be configured very early on boot, -potentially on every boot, with less convenience as ``cc_bootcmd`` but more -flexibility. +Explanation +----------- -.. note:: - Boothooks are executed on every boot. - The environment variable ``INSTANCE_ID`` will be set to the current instance - ID. ``INSTANCE_ID`` can be used to implement a `once-per-instance` type of - functionality. +A cloud-config-archive is a way to specify more than one type of data +using YAML. Since building a MIME multipart archive can be somewhat unwieldly +to build by hand or requires using a cloud-init helper utility, the +cloud-config-archive provides a simpler alternative to building the MIME +multi-part archive for those that would prefer to use YAML. -Begins with: ``#cloud-boothook``. +The format is a list of dictionaries. -Example with simple script --------------------------- +Required fields: -.. code-block:: bash +* ``type``: The :ref:`Content-Type` + identifier for the type of user data in content +* ``content``: The user data configuration - #cloud-boothook - #!/bin/sh - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts +Optional fields: -Example of once-per-instance script ------------------------------------ +* ``launch-index``: The EC2 Launch-Index (if applicable) +* ``filename``: This field is only used if using a user data format that + requires a filename in a MIME part. This is unrelated to any local system + file. -.. code-block:: bash - - #cloud-boothook - #!/bin/sh - - PERSIST_ID=/var/lib/cloud/first-instance-id - _id="" - if [ -r $PERSIST_ID ]; then - _id=$(cat /var/lib/cloud/first-instance-id) - fi - - if [ -z $_id ] || [ $INSTANCE_ID != $_id ]; then - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts - fi - sudo echo $INSTANCE_ID > $PERSIST_ID +All other fields will be interpreted as a MIME part header. .. _user_data_formats-part_handler: -Part-handler +Part handler ============ -This is a `part-handler`: It contains custom code for either supporting new -mime-types in multi-part user data, or overriding the existing handlers for -supported mime-types. It will be written to a file in -:file:`/var/lib/cloud/data` based on its filename (which is generated). - -This must be Python code that contains a ``list_types`` function and a -``handle_part`` function. Once the section is read the ``list_types`` method -will be called. It must return a list of mime-types that this `part-handler` -handles. Since MIME parts are processed in order, a `part-handler` part -must precede any parts with mime-types it is expected to handle in the same -user data. - -The ``handle_part`` function must be defined like: - -.. code-block:: python - - def handle_part(data, ctype, filename, payload): - # data = the cloudinit object - # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled. - # filename = the filename of the part (or a generated filename if none is present in mime data) - # payload = the parts' content - -``Cloud-init`` will then call the ``handle_part`` function once before it -handles any parts, once per part received, and once after all parts have been -handled. The ``'__begin__'`` and ``'__end__'`` sentinels allow the part -handler to do initialisation or teardown before or after receiving any parts. - -Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when -using a MIME archive. - Example ------- @@ -244,17 +329,63 @@ Example :language: python :linenos: -Also, `this blog post`_ offers another example for more advanced usage. -Disabling user data -=================== +Explanation +----------- + +A part handler contains custom code for either supporting new +mime-types in multi-part user data or for overriding the existing handlers for +supported mime-types. + +See the :ref:`custom part handler` reference documentation +for details on writing custom handlers along with an annotated example. + +`This blog post`_ offers another example for more advanced usage. + +Gzip compressed content +======================= + +Content found to be gzip compressed will be uncompressed. +The uncompressed data will then be used as if it were not compressed. +This is typically useful because user data size may be limited based on +cloud platform. + +.. _user_data_formats-content_types: + +Headers and content types +========================= + +In order for cloud-init to recognize which user data format is being used, +the user data must contain a header. Additionally, if the user data +is being passed as a multi-part message, such as MIME, cloud-config-archive, +or part-handler, the content-type for each part must also be set +appropriately. + +The table below lists the headers and content types for each user data format. +Note that gzip compressed content is not represented here as it gets passed +as binary data and so may be processed automatically. + ++--------------------+-----------------------------+-------------------------+ +|User data format |Header |Content-Type | ++====================+=============================+=========================+ +|Cloud config data |#cloud-config |text/cloud-config | ++--------------------+-----------------------------+-------------------------+ +|User data script |#! |text/x-shellscript | ++--------------------+-----------------------------+-------------------------+ +|Cloud boothook |#cloud-boothook |text/cloud-boothook | ++--------------------+-----------------------------+-------------------------+ +|MIME multi-part |Content-Type: multipart/mixed|multipart/mixed | ++--------------------+-----------------------------+-------------------------+ +|Cloud config archive|#cloud-config-archive |text/cloud-config-archive| ++--------------------+-----------------------------+-------------------------+ +|Jinja template |## template: jinja |text/jinja | ++--------------------+-----------------------------+-------------------------+ +|Include file |#include |text/x-include-url | ++--------------------+-----------------------------+-------------------------+ +|Part handler |#part-handler |text/part-handler | ++--------------------+-----------------------------+-------------------------+ -``Cloud-init`` can be configured to ignore any user data provided to instance. -This allows custom images to prevent users from accidentally breaking closed -appliances. Setting ``allow_userdata: false`` in the configuration will disable -``cloud-init`` from processing user data. .. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py -.. _YAML version 1.1: https://yaml.org/spec/1.1/current.html -.. [#] See your cloud provider for applicable user-data size limitations... -.. _this blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html +.. _YAML: https://yaml.org/spec/1.1/current.html +.. _This blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst index 650efa79452..d2aadc083ee 100644 --- a/doc/rtd/explanation/instancedata.rst +++ b/doc/rtd/explanation/instancedata.rst @@ -165,7 +165,10 @@ Storage locations unredacted JSON blob. * :file:`/run/cloud-init/combined-cloud-config.json`: root-readable unredacted JSON blob. Any meta-data, vendor-data and user-data overrides - are applied to the :file:`/run/cloud-init/combined-cloud-config.json` config values. + are applied to the :file:`/run/cloud-init/combined-cloud-config.json` config + values. + +.. _instance_metadata-keys: :file:`instance-data.json` top level keys ----------------------------------------- diff --git a/doc/rtd/explanation/vendordata.rst b/doc/rtd/explanation/vendordata.rst index 621fcdeb3d9..a2340c2fab9 100644 --- a/doc/rtd/explanation/vendordata.rst +++ b/doc/rtd/explanation/vendordata.rst @@ -20,19 +20,7 @@ caveats: required for the instance to run, then vendor data should not be used. 4. User-supplied cloud-config is merged over cloud-config from vendor data. -Users providing cloud-config data can use the ``#cloud-config-jsonp`` method -to more finely control their modifications to the vendor-supplied -cloud-config. For example, if both vendor and user have provided ``runcmd`` -then the default merge handler will cause the user's ``runcmd`` to override -the one provided by the vendor. To append to ``runcmd``, the user could better -provide multi-part input with a ``cloud-config-jsonp`` part like: - -.. code:: yaml - - #cloud-config-jsonp - [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}] - -Further, we strongly advise vendors to not "be evil". By evil, we mean any +Further, we strongly advise vendors to ensure you protect against any action that could compromise a system. Since users trust you, please take care to make sure that any vendor data is safe, atomic, idempotent and does not put your users at risk. diff --git a/doc/rtd/reference/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst index 82484118553..2d13675e68c 100644 --- a/doc/rtd/reference/base_config_reference.rst +++ b/doc/rtd/reference/base_config_reference.rst @@ -267,6 +267,14 @@ Format is a dict with ``enabled`` and ``prefix`` keys: ``vendor_data``. * ``prefix``: A path to prepend to any ``vendor_data``-provided script. +``allow_userdata`` +^^^^^^^^^^^^^^^^^^ + +A boolean value to disable the use of user data. +This allows custom images to prevent users from accidentally breaking closed +appliances. Setting ``allow_userdata: false`` in the configuration will disable +``cloud-init`` from processing user data. + ``manual_cache_clean`` ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/rtd/reference/custom_modules.rst b/doc/rtd/reference/custom_modules.rst index 3145e723bd7..4ce423dd52b 100644 --- a/doc/rtd/reference/custom_modules.rst +++ b/doc/rtd/reference/custom_modules.rst @@ -6,13 +6,6 @@ custom / out-of-tree functionality. .. _custom_formats: -Custom Formats -============== - -One can define custom data formats by presenting a -:ref:`#part-handler` -config via user-data or vendor-data. - ----- .. toctree:: @@ -22,3 +15,4 @@ config via user-data or vendor-data. custom_modules/custom_configuration_module.rst custom_modules/custom_datasource.rst custom_modules/custom_mergers.rst + custom_modules/custom_part_handlers.rst diff --git a/doc/rtd/reference/custom_modules/custom_part_handlers.rst b/doc/rtd/reference/custom_modules/custom_part_handlers.rst new file mode 100644 index 00000000000..501dc7af7be --- /dev/null +++ b/doc/rtd/reference/custom_modules/custom_part_handlers.rst @@ -0,0 +1,32 @@ +.. _custom_part_handler: + +Custom Part Handler +******************* + +This must be Python code that contains a ``list_types`` function and a +``handle_part`` function. + +The ``list_types`` function takes no arguments and must return a list +of :ref:`content types` that this +part handler handles. These can include custom content types or built-in +content types that this handler will override. + +The ``handle_part`` function takes 4 arguments and returns nothing. See the +example for how exactly each argument is used. + +To use this part handler, it must be included in a MIME multipart file as +part of the :ref:`user data`. +Since MIME parts are processed in order, a part handler part must precede +any parts with mime-types that it is expected to handle in the same user data. + +``Cloud-init`` will then call the ``handle_part`` function once before it +handles any parts, once per part received, and once after all parts have been +handled. These additional calls allow for initialisation or teardown before +or after receiving any parts. + +Example +======= + +.. literalinclude:: ../../../examples/part-handler.txt + :language: python + :linenos: diff --git a/doc/rtd/spelling_word_list.txt b/doc/rtd/spelling_word_list.txt index 239b3b49475..5f4783af65b 100644 --- a/doc/rtd/spelling_word_list.txt +++ b/doc/rtd/spelling_word_list.txt @@ -24,6 +24,7 @@ bigstep boolean bootcmd boothook +boothooks btrfs busybox byobu @@ -211,6 +212,7 @@ scaleway seedurl serverurl setup-keymap +shellscript shortid sigonly sk From f8c1b51f5fa8785301fafc4b5f581546bbd1a56e Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 29 Jul 2024 16:26:55 -0600 Subject: [PATCH 053/131] fix(actions): doc labeler needs all clause instead of default any (#5568) Unspecified base match in labeler assumes 'any' for each match clause. When specifying base-branch and --any-glob-to-any-file either one of these cases would result in a successful match which would label all PRs again main as documentation. We need to explicitly specify 'all:' in our labeler match config to ensure BOTH: * matching file paths related to documentation -AND- * targeting a merge against 'main' branch --- .github/labeler.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index 9eb6dcc33b4..ed6ea52dd22 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,6 +1,7 @@ documentation: -- changed-files: - - any-glob-to-any-file: - - 'doc/*' - - 'cloudinit/config/schema/*' -- base-branch: 'main' +- all: + - changed-files: + - any-glob-to-any-file: + - 'doc/*' + - 'cloudinit/config/schema/*' + - base-branch: 'main' From 00317d16dc417f6120391e2504782f755f2486d2 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 29 Jul 2024 16:48:30 -0600 Subject: [PATCH 054/131] chore(debian): Remove vestigial postinst and preinst code (#5569) Both were version gated and do not run. --- packages/debian/cloud-init.postinst | 16 ---------------- packages/debian/cloud-init.preinst | 20 -------------------- 2 files changed, 36 deletions(-) delete mode 100644 packages/debian/cloud-init.postinst delete mode 100644 packages/debian/cloud-init.preinst diff --git a/packages/debian/cloud-init.postinst b/packages/debian/cloud-init.postinst deleted file mode 100644 index cdd0466d6da..00000000000 --- a/packages/debian/cloud-init.postinst +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$edir/cloud-config.service" "$edir/cloud-final.service" \ - "$edir/cloud-init-local.service" "$edir/cloud-init.service" -} - - -#DEBHELPER# - -if [ "$1" = "configure" ]; then - oldver="$2" - cleanup_lp1552999 "$oldver" -fi diff --git a/packages/debian/cloud-init.preinst b/packages/debian/cloud-init.preinst deleted file mode 100644 index 3c2af06d38d..00000000000 --- a/packages/debian/cloud-init.preinst +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# vi: ts=4 expandtab - -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local hdir="/var/lib/systemd/deb-systemd-helper-enabled" - hdir="$hdir/multi-user.target.wants" - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$hdir/cloud-config.service" "$hdir/cloud-final.service" \ - "$hdir/cloud-init-local.service" "$hdir/cloud-init.service" -} - - -if [ "$1" = "upgrade" ]; then - oldver="$2" - cleanup_lp1552999 "$oldver" -fi - -#DEBHELPER# From f8d8a0cd4f8cbd816ce7f2403a3d3d1c4dee7a6e Mon Sep 17 00:00:00 2001 From: Yuanhang Sun Date: Tue, 30 Jul 2024 07:38:02 +0800 Subject: [PATCH 055/131] fix: add host template for AOSC (#5557) --- templates/hosts.aosc.tmpl | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 templates/hosts.aosc.tmpl diff --git a/templates/hosts.aosc.tmpl b/templates/hosts.aosc.tmpl new file mode 100644 index 00000000000..897cebcc115 --- /dev/null +++ b/templates/hosts.aosc.tmpl @@ -0,0 +1,23 @@ +## template:jinja +{# +This file (/etc/cloud/templates/hosts.aosc.tmpl) is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.aosc.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# +{# The value '{{hostname}}' will be replaced with the local-hostname -#} +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 localhost ip6-localhost ip6-loopback +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters From b5d4f3fa16070610cbbbf32dd24616b53b95c5b5 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 31 Jul 2024 06:18:47 -0600 Subject: [PATCH 056/131] fix(actions): correct typo in cloudinit/config/schemas/ match (#5570) Also drop undesirable former doc-autolabel.yml workflow --- .github/labeler.yml | 4 ++-- .github/workflows/doc-autolabel.yml | 12 ------------ 2 files changed, 2 insertions(+), 14 deletions(-) delete mode 100644 .github/workflows/doc-autolabel.yml diff --git a/.github/labeler.yml b/.github/labeler.yml index ed6ea52dd22..eaf08134c34 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -3,5 +3,5 @@ documentation: - changed-files: - any-glob-to-any-file: - 'doc/*' - - 'cloudinit/config/schema/*' - - base-branch: 'main' + - 'cloudinit/config/schemas/*' + - base-branch: ['main'] diff --git a/.github/workflows/doc-autolabel.yml b/.github/workflows/doc-autolabel.yml deleted file mode 100644 index 54c065bdc6f..00000000000 --- a/.github/workflows/doc-autolabel.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Label documentation changes automatically -on: -- pull_request_target - -jobs: - triage: - permissions: - contents: read - pull-requests: write - runs-on: ubuntu-latest - steps: - - uses: actions/labeler@v4 From e6b2e0fea7a9e776672e2f7677ad73ffaadd2d7b Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 31 Jul 2024 09:50:33 -0600 Subject: [PATCH 057/131] feat: Eliminate redundant configuration reads (#5536) When instance id hasn't changed and datasource hasn't changed, don't forcibly reload the configuration. --- cloudinit/stages.py | 24 +++-- tests/integration_tests/test_instance_id.py | 97 +++++++++++++++++++++ 2 files changed, 115 insertions(+), 6 deletions(-) create mode 100644 tests/integration_tests/test_instance_id.py diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 872905e39d1..d564cbbc289 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -11,6 +11,7 @@ import sys from collections import namedtuple from contextlib import suppress +from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Tuple, Union from cloudinit import ( @@ -459,8 +460,13 @@ def _reflect_cur_instance(self): # Remove the old symlink and attach a new one so # that further reads/writes connect into the right location idir = self._get_ipath() - util.del_file(self.paths.instance_link) - util.sym_link(idir, self.paths.instance_link) + destination = Path(self.paths.instance_link).resolve().absolute() + already_instancified = destination == Path(idir).absolute() + if already_instancified: + LOG.info("Instance link already exists, not recreating it.") + else: + util.del_file(self.paths.instance_link) + util.sym_link(idir, self.paths.instance_link) # Ensures these dirs exist dir_list = [] @@ -499,10 +505,16 @@ def _reflect_cur_instance(self): ) self._write_to_cache() - # Ensure needed components are regenerated - # after change of instance which may cause - # change of configuration - self._reset() + if already_instancified and previous_ds == ds: + LOG.info( + "Not re-loading configuration, instance " + "id and datasource have not changed." + ) + # Ensure needed components are regenerated + # after change of instance which may cause + # change of configuration + else: + self._reset() return iid def previous_iid(self): diff --git a/tests/integration_tests/test_instance_id.py b/tests/integration_tests/test_instance_id.py new file mode 100644 index 00000000000..ed94b6b61c7 --- /dev/null +++ b/tests/integration_tests/test_instance_id.py @@ -0,0 +1,97 @@ +from typing import cast + +import pytest +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit import subp +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.integration_settings import PLATFORM + +_INSTANCE_ID = 0 + + +def setup_meta_data(instance: LXDInstance): + """Increment the instance id and apply it to the instance.""" + global _INSTANCE_ID + _INSTANCE_ID += 1 + command = [ + "lxc", + "config", + "set", + instance.name, + f"user.meta-data=instance-id: test_{_INSTANCE_ID}", + ] + subp.subp(command) + + +# class TestInstanceID: +@pytest.mark.skipif( + PLATFORM not in ["lxd_container", "lxd_vm"], + reason="Uses lxd-specific behavior.", +) +@pytest.mark.lxd_setup.with_args(setup_meta_data) +@pytest.mark.lxd_use_exec +def test_instance_id_changes(client: IntegrationInstance): + """Verify instance id change behavior + + If the id from the datasource changes, cloud-init should update the + instance id link. + """ + client.execute("cloud-init status --wait") + # check that instance id is the one we set + assert ( + "test_1" + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + "/var/lib/cloud/instances/test_1" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + + instance = cast(LXDInstance, client.instance) + setup_meta_data(instance) + client.restart() + client.execute("cloud-init status --wait") + # check that instance id is the one we reset + assert ( + "test_2" + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + "/var/lib/cloud/instances/test_2" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + + +@pytest.mark.lxd_use_exec +def test_instance_id_no_changes(client: IntegrationInstance): + """Verify instance id no change behavior + + If the id from the datasource does not change, cloud-init should not + update the instance id link. + """ + instance_id = client.execute( + "cloud-init query instance-id" + ).stdout.rstrip() + assert ( + f"/var/lib/cloud/instances/{instance_id}" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) + client.restart() + client.execute("cloud-init status --wait") + assert ( + instance_id + == client.execute("cloud-init query instance-id").stdout.rstrip() + ) + assert ( + f"/var/lib/cloud/instances/{instance_id}" + == client.execute( + "readlink -f /var/lib/cloud/instance" + ).stdout.rstrip() + ) From 5322dca2f3da7bb9b8d6f1fac6ccb00ef33ef8ee Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 31 Jul 2024 16:07:37 -0600 Subject: [PATCH 058/131] fix(NoCloudNet): Add network-config support (#5566) This enables support for network config v2 and v1 to NoCloud when used with http / ftp / etc. BREAKING_CHANGE: Adds an additional network request to NoCloud. --- cloudinit/sources/DataSourceNoCloud.py | 3 ++- cloudinit/sources/DataSourceOVF.py | 2 +- cloudinit/util.py | 15 +++++++++--- tests/unittests/test_util.py | 32 ++++++++++++++++++++++---- 4 files changed, 42 insertions(+), 10 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 291da950cac..0bf6e7c4ee2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -190,7 +190,7 @@ def _pp2d_callback(mp, data): # This could throw errors, but the user told us to do it # so if errors are raised, let them raise - (md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None) + md_seed, ud, vd, network = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed @@ -199,6 +199,7 @@ def _pp2d_callback(mp, data): ) mydata["user-data"] = ud mydata["vendor-data"] = vd + mydata["network-config"] = network found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index bd12f636a34..89fc5de8d66 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -92,7 +92,7 @@ def _get_data(self): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - (md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None) + (md_seed, ud, vd, _) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) diff --git a/cloudinit/util.py b/cloudinit/util.py index 19f1800928d..faa3e847b84 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -977,10 +977,11 @@ def read_optional_seed(fill, base="", ext="", timeout=5): 'meta-data' entries """ try: - md, ud, vd = read_seeded(base=base, ext=ext, timeout=timeout) + md, ud, vd, network = read_seeded(base=base, ext=ext, timeout=timeout) fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md + fill["network-config"] = md return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: @@ -1066,6 +1067,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10): ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) md_url = base.replace("%s", "meta-data" + ext) + network_url = base.replace("%s", "network-config" + ext) else: if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: if base[-1] != "/" and parse.urlparse(base).query == "": @@ -1074,12 +1076,19 @@ def read_seeded(base="", ext="", timeout=5, retries=10): ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) + network_url = "%s%s%s" % (base, "network-config", ext) + network_resp = url_helper.read_file_or_url( + network_url, timeout=timeout, retries=retries + ) + network = None + if network_resp.ok(): + network = load_yaml(network_resp.contents) md_resp = url_helper.read_file_or_url( md_url, timeout=timeout, retries=retries ) md = None if md_resp.ok(): - md = load_yaml(decode_binary(md_resp.contents), default={}) + md = load_yaml(md_resp.contents, default={}) ud_resp = url_helper.read_file_or_url( ud_url, timeout=timeout, retries=retries @@ -1101,7 +1110,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10): else: LOG.debug("Error in vendor-data response") - return (md, ud, vd) + return md, ud, vd, network def read_conf_d(confd, *, instance_data_file=None) -> dict: diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 8970fb4c863..2ceed7aa32c 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2452,16 +2452,23 @@ class TestReadSeeded: def test_unicode_not_messed_up(self, tmpdir): ud = b"userdatablob" vd = b"vendordatablob" + network = b"test: 'true'" helpers.populate_dir( tmpdir.strpath, - {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd}, + { + "meta-data": "key1: val1", + "user-data": ud, + "vendor-data": vd, + "network-config": network, + }, ) - (found_md, found_ud, found_vd) = util.read_seeded( + found_md, found_ud, found_vd, found_network = util.read_seeded( tmpdir.strpath + os.path.sep ) assert found_md == {"key1": "val1"} assert found_ud == ud assert found_vd == vd + assert found_network == {"test": "true"} @pytest.mark.parametrize( "base, feature_flag, req_urls", @@ -2470,6 +2477,7 @@ def test_unicode_not_messed_up(self, tmpdir): "http://10.0.0.1/%s?qs=1", True, [ + "http://10.0.0.1/network-config?qs=1", "http://10.0.0.1/meta-data?qs=1", "http://10.0.0.1/user-data?qs=1", "http://10.0.0.1/vendor-data?qs=1", @@ -2480,6 +2488,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008/", True, [ + "https://10.0.0.1:8008/network-config", "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", @@ -2490,6 +2499,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008", True, [ + "https://10.0.0.1:8008/network-config", "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", @@ -2500,6 +2510,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008", False, [ + "https://10.0.0.1:8008network-config", "https://10.0.0.1:8008meta-data", "https://10.0.0.1:8008user-data", "https://10.0.0.1:8008vendor-data", @@ -2510,6 +2521,7 @@ def test_unicode_not_messed_up(self, tmpdir): "https://10.0.0.1:8008?qs=", True, [ + "https://10.0.0.1:8008?qs=network-config", "https://10.0.0.1:8008?qs=meta-data", "https://10.0.0.1:8008?qs=user-data", "https://10.0.0.1:8008?qs=vendor-data", @@ -2540,12 +2552,15 @@ def fake_response(url, timeout, retries): "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH", feature_flag, ): - (found_md, found_ud, found_vd) = util.read_seeded(base) + found_md, found_ud, found_vd, found_network = util.read_seeded( + base + ) # Meta-data treated as YAML assert found_md == {"/meta-data": 1} # user-data, vendor-data read raw. It could be scripts or other format assert found_ud == "/user-data: 1" assert found_vd == "/vendor-data: 1" + assert found_network == {"/network-config": 1} assert [ mock.call(req_url, timeout=5, retries=10) for req_url in req_urls ] == m_read.call_args_list @@ -2560,15 +2575,22 @@ def setUp(self): def test_unicode_not_messed_up(self): ud = b"userdatablob" vd = None + network = b"test: 'true'" helpers.populate_dir( - self.tmp, {"meta-data": "key1: val1", "user-data": ud} + self.tmp, + { + "meta-data": "key1: val1", + "user-data": ud, + "network-config": network, + }, ) sdir = self.tmp + os.path.sep - (found_md, found_ud, found_vd) = util.read_seeded(sdir) + found_md, found_ud, found_vd, found_network = util.read_seeded(sdir) self.assertEqual(found_md, {"key1": "val1"}) self.assertEqual(found_ud, ud) self.assertEqual(found_vd, vd) + self.assertEqual(found_network, {"test": "true"}) class TestEncode(helpers.TestCase): From ea831d67618773bf9ea6f8a2acf0ccd5f6852f26 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 1 Aug 2024 20:55:53 -0600 Subject: [PATCH 059/131] fix: Integration tests (#5576) String output changed in 7703634ec0. Instance-id doesn't change on LXD / Focal. --- tests/integration_tests/test_instance_id.py | 3 ++- tests/integration_tests/test_kernel_command_line_match.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration_tests/test_instance_id.py b/tests/integration_tests/test_instance_id.py index ed94b6b61c7..dc2fbb0f00a 100644 --- a/tests/integration_tests/test_instance_id.py +++ b/tests/integration_tests/test_instance_id.py @@ -6,6 +6,7 @@ from cloudinit import subp from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM +from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL _INSTANCE_ID = 0 @@ -26,7 +27,7 @@ def setup_meta_data(instance: LXDInstance): # class TestInstanceID: @pytest.mark.skipif( - PLATFORM not in ["lxd_container", "lxd_vm"], + PLATFORM not in ["lxd_container", "lxd_vm"] or CURRENT_RELEASE == FOCAL, reason="Uses lxd-specific behavior.", ) @pytest.mark.lxd_setup.with_args(setup_meta_data) diff --git a/tests/integration_tests/test_kernel_command_line_match.py b/tests/integration_tests/test_kernel_command_line_match.py index 2fce349d098..57abf513ecc 100644 --- a/tests/integration_tests/test_kernel_command_line_match.py +++ b/tests/integration_tests/test_kernel_command_line_match.py @@ -104,8 +104,8 @@ def test_lxd_datasource_kernel_override_nocloud_net( ) assert url_val in client.execute("cloud-init query subplatform").stdout assert ( - "Detected platform: DataSourceNoCloudNet [seed=None]" - "[dsmode=net]. Checking for active instance data" + "Detected platform: DataSourceNoCloudNet. Checking for active" + "instance data" ) in logs From d15a7704b6e1317b1fa186667085372c69e6f586 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 2 Aug 2024 14:21:29 -0400 Subject: [PATCH 060/131] refactor: update handle function of cc_mounts (#5498) The handle function of cc_mounts was hard to grok and had one of the highest cyclomatic complexity scores in the codebase. Functionally, the code should be unchanged. --- cloudinit/config/cc_mounts.py | 375 +++++++++++++---------- tests/unittests/config/test_cc_mounts.py | 8 +- 2 files changed, 214 insertions(+), 169 deletions(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 0fdcf3c19e1..5efcec946d8 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -8,13 +8,15 @@ """Mounts: Configure mount points and swap files""" + +import copy import logging import math import os import re -from string import whitespace +from typing import Dict, List, Optional, Tuple, cast -from cloudinit import subp, type_utils, util +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -33,7 +35,6 @@ # Name matches 'server:/path' NETWORK_NAME_FILTER = r"^.+:.*" NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER) -WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" MNT_COMMENT = "comment=cloudconfig" MB = 2**20 @@ -133,6 +134,25 @@ def sanitize_devname(startname, transformer, aliases=None): return None +def sanitized_devname_is_valid( + original: str, sanitized: Optional[str], fstab_devs: Dict[str, str] +) -> bool: + """Get if the sanitized device name is valid.""" + if sanitized != original: + LOG.debug("changed %s => %s", original, sanitized) + if sanitized is None: + LOG.debug("Ignoring nonexistent default named mount %s", original) + return False + elif sanitized in fstab_devs: + LOG.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) + return False + return True + + def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: @@ -334,30 +354,16 @@ def handle_swapcfg(swapcfg): return None -def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno - def_mnt_opts = "defaults,nobootwait" - uses_systemd = cloud.distro.uses_systemd() - if uses_systemd: - def_mnt_opts = ( - "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" - ) - - defvals = [None, None, "auto", def_mnt_opts, "0", "2"] - defvals = cfg.get("mount_default_fields", defvals) - - # these are our default set of mounts - defmnts: list = [ - ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"], - ] - - cfgmnt = [] - if "mounts" in cfg: - cfgmnt = cfg["mounts"] - - LOG.debug("mounts configuration is %s", cfgmnt) +def parse_fstab() -> Tuple[List[str], Dict[str, str], List[str]]: + """Parse /etc/fstab. + Parse fstab, ignoring any lines containing "comment=cloudconfig". + :return: A 3-tuple containing: + - A list of lines exactly as they appear in fstab + - A dictionary with key being the first token in the line + and value being the entire line + - A list of any lines that were ignored due to "comment=cloudconfig" + """ fstab_lines = [] fstab_devs = {} fstab_removed = [] @@ -367,180 +373,219 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if MNT_COMMENT in line: fstab_removed.append(line) continue - - try: - toks = WS.split(line) - except Exception: - pass + toks = line.split() fstab_devs[toks[0]] = line fstab_lines.append(line) - - device_aliases = cfg.get("device_aliases", {}) - - for i in range(len(cfgmnt)): + return fstab_lines, fstab_devs, fstab_removed + + +def sanitize_mounts_configuration( + mounts: List[Optional[List[Optional[str]]]], + fstab_devs: Dict[str, str], + device_aliases: Dict[str, str], + default_fields: List[Optional[str]], + cloud: Cloud, +) -> List[List[str]]: + """Sanitize mounts to ensure we can work with devices in config. + + Specifically: + - Ensure the mounts configuration is a list of lists + - Transform and sanitize device names + - Ensure all tokens are strings + - Add default options to any lines without options + """ + updated_lines = [] + for line in mounts: # skip something that wasn't a list - if not isinstance(cfgmnt[i], list): - LOG.warning( - "Mount option %s not a list, got a %s instead", - (i + 1), - type_utils.obj_name(cfgmnt[i]), - ) + if not isinstance(line, list): + LOG.warning("Mount option not a list, ignoring: %s", line) continue - start = str(cfgmnt[i][0]) - sanitized = sanitize_devname( + start = str(line[0]) + sanitized_devname = sanitize_devname( start, cloud.device_name_to_device, aliases=device_aliases ) - if sanitized != start: - LOG.debug("changed %s => %s", start, sanitized) + if sanitized_devname_is_valid(start, sanitized_devname, fstab_devs): + updated_line = [sanitized_devname] + line[1:] + else: + updated_line = line - if sanitized is None: - LOG.debug("Ignoring nonexistent named mount %s", start) - continue - elif sanitized in fstab_devs: - LOG.info( - "Device %s already defined in fstab: %s", - sanitized, - fstab_devs[sanitized], - ) - continue + # Ensure all tokens are strings as users may not have quoted them + # If token is None, replace it with the default value + for index, token in enumerate(updated_line): + if token is None: + updated_line[index] = default_fields[index] + else: + updated_line[index] = str(updated_line[index]) - cfgmnt[i][0] = sanitized + # fill remaining values with defaults from defvals above + updated_line += default_fields[len(updated_line) :] - # in case the user did not quote a field (likely fs-freq, fs_passno) - # but do not convert None to 'None' (LP: #898365) - for j in range(len(cfgmnt[i])): - if cfgmnt[i][j] is None: - continue - else: - cfgmnt[i][j] = str(cfgmnt[i][j]) - - for i in range(len(cfgmnt)): - # fill in values with defaults from defvals above - for j in range(len(defvals)): - if len(cfgmnt[i]) <= j: - cfgmnt[i].append(defvals[j]) - elif cfgmnt[i][j] is None: - cfgmnt[i][j] = defvals[j] - - # if the second entry in the list is 'None' this - # clears all previous entries of that same 'fs_spec' - # (fs_spec is the first field in /etc/fstab, ie, that device) - if cfgmnt[i][1] is None: - for j in range(i): - if cfgmnt[j][0] == cfgmnt[i][0]: - cfgmnt[j][1] = None - - # for each of the "default" mounts, add them only if no other - # entry has the same device name - for defmnt in defmnts: - start = defmnt[0] + updated_lines.append(updated_line) + return updated_lines + + +def remove_nonexistent_devices(mounts: List[List[str]]) -> List[List[str]]: + """Remove any entries that have a device name that doesn't exist. + + If the second field of a mount line is None (not the string, the value), + we skip it along with any other entries that came before it that share + the same device name. + """ + actlist = [] + dev_denylist = [] + for line in mounts[::-1]: + if line[1] is None or line[0] in dev_denylist: + LOG.debug("Skipping nonexistent device named %s", line[0]) + dev_denylist.append(line[0]) + else: + actlist.append(line) + # Reverse the list to maintain the original order + return actlist[::-1] + + +def add_default_mounts_to_cfg( + mounts: List[List[str]], + default_mount_options: str, + fstab_devs: Dict[str, str], + device_aliases: Dict[str, str], + cloud: Cloud, +) -> List[List[str]]: + """Add default mounts to the user provided mount configuration. + + Add them only if no other entry has the same device name + """ + new_mounts = copy.deepcopy(mounts) + for default_mount in [ + ["ephemeral0", "/mnt", "auto", default_mount_options, "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], # Is this used anywhere? + ]: + start = default_mount[0] sanitized = sanitize_devname( start, cloud.device_name_to_device, aliases=device_aliases ) - if sanitized != start: - LOG.debug("changed default device %s => %s", start, sanitized) - - if sanitized is None: - LOG.debug("Ignoring nonexistent default named mount %s", start) - continue - elif sanitized in fstab_devs: - LOG.debug( - "Device %s already defined in fstab: %s", - sanitized, - fstab_devs[sanitized], - ) + if not sanitized_devname_is_valid(start, sanitized, fstab_devs): continue - defmnt[0] = sanitized + # Cast here because the previous call checked for None + default_mount[0] = cast(str, sanitized) - cfgmnt_has = False - for cfgm in cfgmnt: - if cfgm[0] == defmnt[0]: - cfgmnt_has = True - break - - if cfgmnt_has: + default_already_exists = any( + cfgm[0] == default_mount[0] for cfgm in mounts + ) + if default_already_exists: LOG.debug("Not including %s, already previously included", start) continue - cfgmnt.append(defmnt) + new_mounts.append(default_mount) + return new_mounts - # now, each entry in the cfgmnt list has all fstab values - # if the second field is None (not the string, the value) we skip it - actlist = [] - for x in cfgmnt: - if x[1] is None: - LOG.debug("Skipping nonexistent device named %s", x[0]) - else: - actlist.append(x) - swapret = handle_swapcfg(cfg.get("swap", {})) - if swapret: - actlist.append([swapret, "none", "swap", "sw", "0", "0"]) +def add_comment(actlist: List[List[str]]) -> List[List[str]]: + """Add "comment=cloudconfig" to the mount options of each entry.""" + return [ + entry[:3] + [f"{entry[3]},{MNT_COMMENT}"] + entry[4:] + for entry in actlist + ] + + +def activate_swap_if_needed(actlist: List[List[str]]) -> None: + """Call 'swapon -a' if any entry has a swap fs type.""" + if any(entry[2] == "swap" for entry in actlist): + subp.subp(["swapon", "-a"]) + - if len(actlist) == 0: +def mount_if_needed( + uses_systemd: bool, changes_made: bool, dirs: List[str] +) -> None: + """Call 'mount -a' if needed. + + If changes were made, always call 'mount -a'. Otherwise, call 'mount -a' + if any of the directories in the mount list are not already mounted. + """ + do_mount = False + if changes_made: + do_mount = True + else: + mount_points = { + val["mountpoint"] + for val in util.mounts().values() + if "mountpoint" in val + } + do_mount = bool(set(dirs).difference(mount_points)) + + if do_mount: + subp.subp(["mount", "-a"]) + if uses_systemd: + subp.subp(["systemctl", "daemon-reload"]) + + +def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: + """Handle the mounts configuration.""" + # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno + uses_systemd = cloud.distro.uses_systemd() + default_mount_options = ( + "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" + if uses_systemd + else "defaults,nobootwait" + ) + + hardcoded_defaults = [None, None, "auto", default_mount_options, "0", "2"] + default_fields: List[Optional[str]] = cfg.get( + "mount_default_fields", hardcoded_defaults + ) + mounts: List[Optional[List[Optional[str]]]] = cfg.get("mounts", []) + + LOG.debug("mounts configuration is %s", mounts) + + fstab_lines, fstab_devs, fstab_removed = parse_fstab() + device_aliases = cfg.get("device_aliases", {}) + + updated_cfg = sanitize_mounts_configuration( + mounts, fstab_devs, device_aliases, default_fields, cloud + ) + updated_cfg = add_default_mounts_to_cfg( + updated_cfg, default_mount_options, fstab_devs, device_aliases, cloud + ) + updated_cfg = remove_nonexistent_devices(updated_cfg) + updated_cfg = add_comment(updated_cfg) + + swapfile = handle_swapcfg(cfg.get("swap", {})) + if swapfile: + updated_cfg.append([swapfile, "none", "swap", "sw", "0", "0"]) + + if len(updated_cfg) == 0: + # This will only be true if there is no mount configuration at all + # Even if fstab has no functional changes, we'll get past this point + # as we remove any 'comment=cloudconfig' lines and then add them back + # in. LOG.debug("No modifications to fstab needed") return - cc_lines = [] - needswap = False - need_mount_all = False - dirs = [] - for entry in actlist: - # write 'comment' in the fs_mntops, entry, claiming this - entry[3] = "%s,%s" % (entry[3], MNT_COMMENT) - if entry[2] == "swap": - needswap = True - if entry[1].startswith("/"): - dirs.append(entry[1]) - cc_lines.append("\t".join(entry)) - - mount_points = [ - v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v - ] + cfg_lines = ["\t".join(entry) for entry in updated_cfg] + + dirs = [d[1] for d in updated_cfg if d[1].startswith("/")] + for d in dirs: try: util.ensure_dir(d) except Exception: util.logexc(LOG, "Failed to make '%s' config-mount", d) - # dirs is list of directories on which a volume should be mounted. - # If any of them does not already show up in the list of current - # mount points, we will definitely need to do mount -a. - if not need_mount_all and d not in mount_points: - need_mount_all = True - sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in fstab_removed] + sadds = [n.replace("\t", " ") for n in cfg_lines] + sdrops = [n.replace("\t", " ") for n in fstab_removed] - sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ - "+ " + add for add in sadds if add not in sdrops + sops = [f"- {drop}" for drop in sdrops if drop not in sadds] + [ + f"+ {add}" for add in sadds if add not in sdrops ] - fstab_lines.extend(cc_lines) + fstab_lines.extend(cfg_lines) contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) - activate_cmds = [] - if needswap: - activate_cmds.append(["swapon", "-a"]) - - if len(sops) == 0: - LOG.debug("No changes to /etc/fstab made.") - else: + if sops: LOG.debug("Changes to fstab: %s", sops) - need_mount_all = True - - if need_mount_all: - activate_cmds.append(["mount", "-a"]) - if uses_systemd: - activate_cmds.append(["systemctl", "daemon-reload"]) + else: + LOG.debug("No changes to /etc/fstab made.") - fmt = "Activating swap and mounts with: %s" - for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + " ".join(cmd) - try: - subp.subp(cmd) - LOG.debug(fmt, "PASS") - except subp.ProcessExecutionError: - LOG.warning(fmt, "FAIL") - util.logexc(LOG, fmt, "FAIL") + activate_swap_if_needed(updated_cfg) + mount_if_needed(uses_systemd, bool(sops), dirs) diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index 07ce4b0ba40..9982b6741c6 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -506,14 +506,14 @@ def test_no_change_fstab_sets_needs_mount_all(self): /dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2 {self.swap_path} none swap sw,comment=cloudconfig 0 0 """ # noqa: E501 - ) + ).strip() cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]} with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) cc_mounts.handle(None, cc, self.mock_cloud, []) with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() - assert fstab_original_content == fstab_new_content + assert fstab_original_content == fstab_new_content.strip() self.m_subp.assert_has_calls( [ mock.call(["mount", "-a"]), @@ -549,7 +549,7 @@ def test_fstab_mounts_combinations(self): ["/dev/sda5", "/mnt3"], # Takes the place of the line that was removed from fstab # with the cloudconfig comment - ["/dev/sda1", "/mnt", "xfs"], + ["/dev/sda1", "/mnt", "xfs", "auto", None, "2"], # The line that survies after previous Nones ["/dev/sda3", "/mnt4", "btrfs"], ] @@ -566,7 +566,7 @@ def test_fstab_mounts_combinations(self): LABEL=UEFI /dev/sda4 /mnt2 auto nofail,comment=cloudconfig 1 2 /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 - /dev/sda1 /mnt xfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda1 /mnt xfs auto,comment=cloudconfig 0 2 /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 /dev/sdb1 none swap sw,comment=cloudconfig 0 0 """ # noqa: E501 From ca3e6bc3acb33a944d4dae71bb70a3a8cd442e70 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 2 Aug 2024 12:58:58 -0600 Subject: [PATCH 061/131] chore: Add helper, refactor utilities into separate module (#5573) --- cloudinit/cmd/main.py | 9 +- cloudinit/config/cc_ansible.py | 10 +- cloudinit/config/cc_apt_configure.py | 8 +- cloudinit/config/cc_ca_certs.py | 6 +- cloudinit/config/cc_growpart.py | 4 +- cloudinit/config/cc_resizefs.py | 6 +- cloudinit/config/cc_rsyslog.py | 4 +- cloudinit/config/cc_set_passwords.py | 8 +- cloudinit/config/cc_ssh.py | 4 +- cloudinit/config/cc_update_etc_hosts.py | 4 +- cloudinit/config/modules.py | 8 +- cloudinit/config/schema.py | 12 +- cloudinit/distros/__init__.py | 5 +- cloudinit/distros/alpine.py | 4 +- cloudinit/distros/ug_util.py | 6 +- cloudinit/features.py | 5 + cloudinit/lifecycle.py | 242 ++++++++++++++++++ cloudinit/net/network_state.py | 4 +- cloudinit/netinfo.py | 4 +- cloudinit/sources/DataSourceConfigDrive.py | 4 +- cloudinit/sources/DataSourceDigitalOcean.py | 4 +- cloudinit/sources/DataSourceNoCloud.py | 8 +- cloudinit/sources/__init__.py | 4 +- cloudinit/ssh_util.py | 6 +- cloudinit/stages.py | 3 +- cloudinit/util.py | 208 +-------------- tests/integration_tests/cmd/test_schema.py | 4 +- .../datasources/test_nocloud.py | 4 +- .../modules/test_combined.py | 5 +- .../modules/test_ubuntu_pro.py | 4 +- tests/unittests/config/test_cc_ansible.py | 8 +- tests/unittests/config/test_cc_ssh.py | 4 +- tests/unittests/config/test_cc_ubuntu_pro.py | 9 +- tests/unittests/conftest.py | 4 +- tests/unittests/distros/test_create_users.py | 7 +- tests/unittests/net/test_network_state.py | 4 +- tests/unittests/sources/test_digitalocean.py | 4 +- tests/unittests/test_log.py | 10 +- tests/unittests/test_util.py | 40 ++- 39 files changed, 380 insertions(+), 317 deletions(-) create mode 100644 cloudinit/lifecycle.py diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 590173ae4fc..54ba79e1bbc 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -31,6 +31,7 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit import atomic_helper +from cloudinit import lifecycle from cloudinit.cmd.devel import read_cfg_paths from cloudinit.config import cc_set_hostname from cloudinit.config.modules import Modules @@ -94,7 +95,7 @@ def log_ppid(distro, bootstage_name): if distro.is_linux: ppid = os.getppid() if 1 != ppid and distro.uses_systemd(): - util.deprecate( + lifecycle.deprecate( deprecated=( "Unsupported configuration: boot stage called " f"by PID [{ppid}] outside of systemd" @@ -255,7 +256,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: is_cloud_cfg = False if is_cloud_cfg: if cmdline_name == "url": - return util.deprecate( + return lifecycle.deprecate( deprecated="The kernel command line key `url`", deprecated_version="22.3", extra_message=" Please use `cloud-config-url` " @@ -650,7 +651,7 @@ def main_modules(action_name, args): log_ppid(init.distro, bootstage_name) if name == "init": - util.deprecate( + lifecycle.deprecate( deprecated="`--mode init`", deprecated_version="24.1", extra_message="Use `cloud-init init` instead.", @@ -983,7 +984,7 @@ def main(sysv_args=None): parser_mod = subparsers.add_parser( "modules", help="Activate modules using a given configuration key." ) - extra_help = util.deprecate( + extra_help = lifecycle.deprecate( deprecated="`init`", deprecated_version="24.1", extra_message="Use `cloud-init init` instead.", diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index fce8ae3b4c4..3b9e931a58d 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -8,13 +8,13 @@ from copy import deepcopy from typing import Optional -from cloudinit import subp +from cloudinit import lifecycle, subp from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_INSTANCE -from cloudinit.util import Version, get_cfg_by_path +from cloudinit.util import get_cfg_by_path meta: MetaSchema = { "id": "cc_ansible", @@ -39,13 +39,13 @@ def __init__(self, distro: Distro): # and cloud-init might not have that set, default: /root self.env["HOME"] = os.environ.get("HOME", "/root") - def get_version(self) -> Optional[Version]: + def get_version(self) -> Optional[lifecycle.Version]: stdout, _ = self.do_as(self.cmd_version) first_line = stdout.splitlines().pop(0) matches = re.search(r"([\d\.]+)", first_line) if matches: version = matches.group(0) - return Version.from_str(version) + return lifecycle.Version.from_str(version) return None def pull(self, *args) -> str: @@ -210,7 +210,7 @@ def run_ansible_pull(pull: AnsiblePull, cfg: dict): v = pull.get_version() if not v: LOG.warning("Cannot parse ansible version") - elif v < Version(2, 7, 0): + elif v < lifecycle.Version(2, 7, 0): # diff was added in commit edaa0b52450ade9b86b5f63097ce18ebb147f46f if cfg.get("diff"): raise ValueError( diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index b79b6483b9e..787270e665d 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -17,7 +17,7 @@ from textwrap import indent from typing import Dict, Iterable, List, Mapping -from cloudinit import features, subp, templater, util +from cloudinit import features, lifecycle, subp, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -745,7 +745,7 @@ def add_apt_sources( def convert_v1_to_v2_apt_format(srclist): """convert v1 apt format to v2 (dict in apt_sources)""" srcdict = {} - util.deprecate( + lifecycle.deprecate( deprecated="Config key 'apt_sources'", deprecated_version="22.1", extra_message="Use 'apt' instead", @@ -824,7 +824,7 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - util.deprecate( + lifecycle.deprecate( deprecated=f"The following config key(s): {needtoconvert}", deprecated_version="22.1", ) @@ -832,7 +832,7 @@ def convert_v2_to_v3_apt_format(oldcfg): # if old AND new config are provided, prefer the new one (LP #1616831) newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: - util.deprecate( + lifecycle.deprecate( deprecated="Support for combined old and new apt module keys", deprecated_version="22.1", ) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 4e80947fd13..d6dbc977f88 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -7,7 +7,7 @@ import logging import os -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -231,7 +231,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: @param args: Any module arguments from cloud.cfg """ if "ca-certs" in cfg: - util.deprecate( + lifecycle.deprecate( deprecated="Key 'ca-certs'", deprecated_version="22.1", extra_message="Use 'ca_certs' instead.", @@ -254,7 +254,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # If there is a remove_defaults option set to true, disable the system # default trusted CA certs first. if "remove-defaults" in ca_cert_cfg: - util.deprecate( + lifecycle.deprecate( deprecated="Key 'remove-defaults'", deprecated_version="22.1", extra_message="Use 'remove_defaults' instead.", diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index e1a56f91f09..459f0a3cded 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -20,7 +20,7 @@ from pathlib import Path from typing import Optional, Tuple -from cloudinit import subp, temp_utils, util +from cloudinit import lifecycle, subp, temp_utils, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -542,7 +542,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: mode = mycfg.get("mode", "auto") if util.is_false(mode): if mode != "off": - util.deprecate( + lifecycle.deprecate( deprecated=f"Growpart's 'mode' key with value '{mode}'", deprecated_version="22.2", extra_message="Use 'off' instead.", diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 77a2a26a7c4..b90db58ff88 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -15,7 +15,7 @@ import stat from typing import Optional -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -56,8 +56,8 @@ def _resize_btrfs(mount_point, devpth): # btrfs has exclusive operations and resize may fail if btrfs is busy # doing one of the operations that prevents resize. As of btrfs 5.10 # the resize operation can be queued - btrfs_with_queue = util.Version.from_str("5.10") - system_btrfs_ver = util.Version.from_str( + btrfs_with_queue = lifecycle.Version.from_str("5.10") + system_btrfs_ver = lifecycle.Version.from_str( subp.subp(["btrfs", "--version"])[0].split("v")[-1].strip() ) if system_btrfs_ver >= btrfs_with_queue: diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 3edf9972bf9..88ec1c4f3a1 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -17,7 +17,7 @@ import re from textwrap import dedent -from cloudinit import log, subp, util +from cloudinit import lifecycle, log, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -153,7 +153,7 @@ def load_config(cfg: dict, distro: Distro) -> dict: distro_config = distro_default_rsyslog_config(distro) if isinstance(cfg.get("rsyslog"), list): - util.deprecate( + lifecycle.deprecate( deprecated="The rsyslog key with value of type 'list'", deprecated_version="22.2", ) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 21408105c74..224ae6b85fe 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -12,7 +12,7 @@ from string import ascii_letters, digits from typing import List -from cloudinit import features, subp, util +from cloudinit import features, lifecycle, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -71,7 +71,7 @@ def handle_ssh_pwauth(pw_auth, distro: Distro): cfg_name = "PasswordAuthentication" if isinstance(pw_auth, str): - util.deprecate( + lifecycle.deprecate( deprecated="Using a string value for the 'ssh_pwauth' key", deprecated_version="22.2", extra_message="Use a boolean value with 'ssh_pwauth'.", @@ -128,7 +128,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: chfg = cfg["chpasswd"] users_list = util.get_cfg_option_list(chfg, "users", default=[]) if "list" in chfg and chfg["list"]: - util.deprecate( + lifecycle.deprecate( deprecated="Config key 'lists'", deprecated_version="22.3", extra_message="Use 'users' instead.", @@ -137,7 +137,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: LOG.debug("Handling input for chpasswd as list.") plist = util.get_cfg_option_list(chfg, "list", plist) else: - util.deprecate( + lifecycle.deprecate( deprecated="The chpasswd multiline string", deprecated_version="22.2", extra_message="Use string type instead.", diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 00687cf867d..947469b5b6d 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -14,7 +14,7 @@ import sys from typing import List, Optional, Sequence -from cloudinit import ssh_util, subp, util +from cloudinit import lifecycle, ssh_util, subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -75,7 +75,7 @@ def set_redhat_keyfile_perms(keyfile: str) -> None: """ permissions_public = 0o644 ssh_version = ssh_util.get_opensshd_upstream_version() - if ssh_version and ssh_version < util.Version(9, 0): + if ssh_version and ssh_version < lifecycle.Version(9, 0): # fedora 37, centos 9 stream and below has sshd # versions less than 9 and private key permissions are # set to 0o640 from sshd-keygen. diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 45bb2df7d4b..dcd50701a20 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -10,7 +10,7 @@ import logging -from cloudinit import templater, util +from cloudinit import lifecycle, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema @@ -33,7 +33,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if util.translate_bool(manage_hosts, addons=["template"]): if manage_hosts == "template": - util.deprecate( + lifecycle.deprecate( deprecated="Value 'template' for key 'manage_etc_hosts'", deprecated_version="22.2", extra_message="Use 'true' instead.", diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py index f775802d74a..a82e1ff8e8e 100644 --- a/cloudinit/config/modules.py +++ b/cloudinit/config/modules.py @@ -12,7 +12,7 @@ from types import ModuleType from typing import Dict, List, NamedTuple, Optional -from cloudinit import config, importer, type_utils, util +from cloudinit import config, importer, lifecycle, type_utils, util from cloudinit.distros import ALL_DISTROS from cloudinit.helpers import ConfigMerger from cloudinit.reporting.events import ReportEventStack @@ -194,7 +194,7 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: if not mod_name: continue if freq and freq not in FREQUENCIES: - util.deprecate( + lifecycle.deprecate( deprecated=( f"Config specified module {raw_name} has an unknown" f" frequency {freq}" @@ -205,7 +205,7 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: # default meta attribute "frequency" value is used. freq = None if mod_name in RENAMED_MODULES: - util.deprecate( + lifecycle.deprecate( deprecated=( f"Module has been renamed from {mod_name} to " f"{RENAMED_MODULES[mod_name]}. Update any" @@ -278,7 +278,7 @@ def _run_modules(self, mostly_mods: List[ModuleDetails]): func_signature = signature(mod.handle) func_params = func_signature.parameters if len(func_params) == 5: - util.deprecate( + lifecycle.deprecate( deprecated="Config modules with a `log` parameter", deprecated_version="23.2", ) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 062ab92ecd8..a2fceecabcb 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -31,7 +31,7 @@ import yaml -from cloudinit import features, importer, safeyaml +from cloudinit import features, importer, lifecycle, safeyaml from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers import INCLUSION_TYPES_MAP, type_from_starts_with from cloudinit.helpers import Paths @@ -42,7 +42,6 @@ get_modules_from_dir, load_text_file, load_yaml, - should_log_deprecation, write_file, ) @@ -795,8 +794,11 @@ def validate_cloudconfig_schema( if isinstance( schema_error, SchemaDeprecationError ): # pylint: disable=W1116 - if schema_error.version == "devel" or should_log_deprecation( - schema_error.version, features.DEPRECATION_INFO_BOUNDARY + if ( + schema_error.version == "devel" + or lifecycle.should_log_deprecation( + schema_error.version, features.DEPRECATION_INFO_BOUNDARY + ) ): deprecations.append(SchemaProblem(path, schema_error.message)) else: @@ -818,7 +820,7 @@ def validate_cloudconfig_schema( deprecations, prefix="Deprecated cloud-config provided: ", ) - # This warning doesn't fit the standardized util.deprecated() + # This warning doesn't fit the standardized lifecycle.deprecated() # utility format, but it is a deprecation log, so log it directly. LOG.deprecated(message) # type: ignore if strict and (errors or deprecations or info_deprecations): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index e6bfb1d3b48..1afef63de95 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -36,6 +36,7 @@ from cloudinit import ( helpers, importer, + lifecycle, net, persistence, ssh_util, @@ -710,7 +711,7 @@ def add_user(self, name, **kwargs): groups = groups.split(",") if isinstance(groups, dict): - util.deprecate( + lifecycle.deprecate( deprecated=f"The user {name} has a 'groups' config value " "of type dict", deprecated_version="22.3", @@ -848,7 +849,7 @@ def create_user(self, name, **kwargs): if kwargs["sudo"]: self.write_sudo_rules(name, kwargs["sudo"]) elif kwargs["sudo"] is False: - util.deprecate( + lifecycle.deprecate( deprecated=f"The value of 'false' in user {name}'s " "'sudo' config", deprecated_version="22.2", diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index a1d0d900c9f..dae4b61564e 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -13,7 +13,7 @@ from datetime import datetime from typing import Any, Dict, Optional -from cloudinit import distros, helpers, subp, util +from cloudinit import distros, helpers, lifecycle, subp, util from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit.settings import PER_ALWAYS, PER_INSTANCE @@ -248,7 +248,7 @@ def add_user(self, name, **kwargs): if isinstance(groups, str): groups = groups.split(",") elif isinstance(groups, dict): - util.deprecate( + lifecycle.deprecate( deprecated=f"The user {name} has a 'groups' config value " "of type dict", deprecated_version="22.3", diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index b8d14937488..2d0a887e7c4 100644 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -11,7 +11,7 @@ import logging -from cloudinit import type_utils, util +from cloudinit import lifecycle, type_utils, util LOG = logging.getLogger(__name__) @@ -175,7 +175,7 @@ def normalize_users_groups(cfg, distro): # Translate it into a format that will be more useful going forward if isinstance(old_user, str): old_user = {"name": old_user} - util.deprecate( + lifecycle.deprecate( deprecated="'user' of type string", deprecated_version="22.2", extra_message="Use 'users' list instead.", @@ -208,7 +208,7 @@ def normalize_users_groups(cfg, distro): base_users = cfg.get("users", []) if isinstance(base_users, (dict, str)): - util.deprecate( + lifecycle.deprecate( deprecated=f"'users' of type {type(base_users)}", deprecated_version="22.2", extra_message="Use 'users' as a list.", diff --git a/cloudinit/features.py b/cloudinit/features.py index c3fdae18658..4f9a59e9925 100644 --- a/cloudinit/features.py +++ b/cloudinit/features.py @@ -107,6 +107,11 @@ the different log levels is that logs at DEPRECATED level result in a return code of 2 from `cloud-init status`. +This may may also be used in some limited cases where new error messages may be +logged which increase the risk of regression in stable downstreams where the +error was previously unreported yet downstream users expected stable behavior +across new cloud-init releases. + format: :: = | diff --git a/cloudinit/lifecycle.py b/cloudinit/lifecycle.py new file mode 100644 index 00000000000..871333ef6fb --- /dev/null +++ b/cloudinit/lifecycle.py @@ -0,0 +1,242 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import collections +import functools +import logging +from typing import NamedTuple, Optional + +from cloudinit import features, log + +LOG = logging.getLogger(__name__) + + +class DeprecationLog(NamedTuple): + log_level: int + message: str + + +@functools.total_ordering +class Version( + collections.namedtuple("Version", ["major", "minor", "patch", "rev"]) +): + """A class for comparing versions. + + Implemented as a named tuple with all ordering methods. Comparisons + between X.Y.N and X.Y always treats the more specific number as larger. + + :param major: the most significant number in a version + :param minor: next greatest significant number after major + :param patch: next greatest significant number after minor + :param rev: the least significant number in a version + + :raises TypeError: If invalid arguments are given. + :raises ValueError: If invalid arguments are given. + + Examples: + >>> Version(2, 9) == Version.from_str("2.9") + True + >>> Version(2, 9, 1) > Version.from_str("2.9.1") + False + >>> Version(3, 10) > Version.from_str("3.9.9.9") + True + >>> Version(3, 7) >= Version.from_str("3.7") + True + + """ + + def __new__( + cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 + ) -> "Version": + """Default of -1 allows us to tiebreak in favor of the most specific + number""" + return super(Version, cls).__new__(cls, major, minor, patch, rev) + + @classmethod + def from_str(cls, version: str) -> "Version": + """Create a Version object from a string. + + :param version: A period-delimited version string, max 4 segments. + + :raises TypeError: Raised if invalid arguments are given. + :raises ValueError: Raised if invalid arguments are given. + + :return: A Version object. + """ + return cls(*(list(map(int, version.split("."))))) + + def __gt__(self, other): + return 1 == self._compare_version(other) + + def __eq__(self, other): + return ( + self.major == other.major + and self.minor == other.minor + and self.patch == other.patch + and self.rev == other.rev + ) + + def __iter__(self): + """Iterate over the version (drop sentinels)""" + for n in (self.major, self.minor, self.patch, self.rev): + if n != -1: + yield str(n) + else: + break + + def __str__(self): + return ".".join(self) + + def __hash__(self): + return hash(str(self)) + + def _compare_version(self, other: "Version") -> int: + """Compare this Version to another. + + :param other: A Version object. + + :return: -1 if self > other, 1 if self < other, else 0 + """ + if self == other: + return 0 + if self.major > other.major: + return 1 + if self.minor > other.minor: + return 1 + if self.patch > other.patch: + return 1 + if self.rev > other.rev: + return 1 + return -1 + + +def should_log_deprecation(version: str, boundary_version: str) -> bool: + """Determine if a deprecation message should be logged. + + :param version: The version in which the thing was deprecated. + :param boundary_version: The version at which deprecation level is logged. + + :return: True if the message should be logged, else False. + """ + return boundary_version == "devel" or Version.from_str( + version + ) <= Version.from_str(boundary_version) + + +def log_with_downgradable_level( + *, + logger: logging.Logger, + version: str, + requested_level: int, + msg: str, + args, +): + """Log a message at the requested level, if that is acceptable. + + If the log level is too high due to the version boundary, log at DEBUG + level. Useful to add new warnings to previously unguarded code without + disrupting stable downstreams. + + :param logger: Logger object to log with + :param version: Version string of the version that this log was introduced + :param level: Preferred level at which this message should be logged + :param msg: Message, as passed to the logger. + :param args: Message formatting args, ass passed to the logger + + :return: True if the message should be logged, else False. + """ + if should_log_deprecation(version, features.DEPRECATION_INFO_BOUNDARY): + logger.log(requested_level, msg, args) + else: + logger.debug(msg, args) + + +def deprecate( + *, + deprecated: str, + deprecated_version: str, + extra_message: Optional[str] = None, + schedule: int = 5, + skip_log: bool = False, +) -> DeprecationLog: + """Mark a "thing" as deprecated. Deduplicated deprecations are + logged. + + :param deprecated: Noun to be deprecated. Write this as the start + of a sentence, with no period. Version and extra message will + be appended. + :param deprecated_version: The version in which the thing was + deprecated + :param extra_message: A remedy for the user's problem. A good + message will be actionable and specific (i.e., don't use a + generic "Use updated key." if the user used a deprecated key). + End the string with a period. + :param schedule: Manually set the deprecation schedule. Defaults to + 5 years. Leave a comment explaining your reason for deviation if + setting this value. + :param skip_log: Return log text rather than logging it. Useful for + running prior to logging setup. + :return: NamedTuple containing log level and log message + DeprecationLog(level: int, message: str) + + Note: uses keyword-only arguments to improve legibility + """ + if not hasattr(deprecate, "log"): + setattr(deprecate, "log", set()) + message = extra_message or "" + dedup = hash(deprecated + message + deprecated_version + str(schedule)) + version = Version.from_str(deprecated_version) + version_removed = Version(version.major + schedule, version.minor) + deprecate_msg = ( + f"{deprecated} is deprecated in " + f"{deprecated_version} and scheduled to be removed in " + f"{version_removed}. {message}" + ).rstrip() + if not should_log_deprecation( + deprecated_version, features.DEPRECATION_INFO_BOUNDARY + ): + level = logging.INFO + elif hasattr(LOG, "deprecated"): + level = log.DEPRECATED + else: + level = logging.WARN + log_cache = getattr(deprecate, "log") + if not skip_log and dedup not in log_cache: + log_cache.add(dedup) + LOG.log(level, deprecate_msg) + return DeprecationLog(level, deprecate_msg) + + +def deprecate_call( + *, deprecated_version: str, extra_message: str, schedule: int = 5 +): + """Mark a "thing" as deprecated. Deduplicated deprecations are + logged. + + :param deprecated_version: The version in which the thing was + deprecated + :param extra_message: A remedy for the user's problem. A good + message will be actionable and specific (i.e., don't use a + generic "Use updated key." if the user used a deprecated key). + End the string with a period. + :param schedule: Manually set the deprecation schedule. Defaults to + 5 years. Leave a comment explaining your reason for deviation if + setting this value. + + Note: uses keyword-only arguments to improve legibility + """ + + def wrapper(func): + @functools.wraps(func) + def decorator(*args, **kwargs): + # don't log message multiple times + out = func(*args, **kwargs) + deprecate( + deprecated_version=deprecated_version, + deprecated=func.__name__, + extra_message=extra_message, + schedule=schedule, + ) + return out + + return decorator + + return wrapper diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 9f34467be78..25471dc172c 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -9,7 +9,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict, Optional -from cloudinit import safeyaml, util +from cloudinit import lifecycle, safeyaml, util from cloudinit.net import ( find_interface_name_from_mac, get_interfaces_by_mac, @@ -86,7 +86,7 @@ def warn_deprecated_all_devices(dikt: dict) -> None: """Warn about deprecations of v2 properties for all devices""" if "gateway4" in dikt or "gateway6" in dikt: - util.deprecate( + lifecycle.deprecate( deprecated="The use of `gateway4` and `gateway6`", deprecated_version="22.4", extra_message="For more info check out: " diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 8b3db620018..6aee531638d 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -15,7 +15,7 @@ from ipaddress import IPv4Network from typing import Dict, List, Union -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util from cloudinit.net.network_state import net_prefix_to_ipv4_mask from cloudinit.simpletable import SimpleTable @@ -95,7 +95,7 @@ def _netdev_info_iproute_json(ipaddr_json): return devs -@util.deprecate_call( +@lifecycle.deprecate_call( deprecated_version="22.1", extra_message="Required by old iproute2 versions that don't " "support ip json output. Consider upgrading to a more recent version.", diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index d5db34cd1d7..5ca6c27d176 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -9,7 +9,7 @@ import logging import os -from cloudinit import sources, subp, util +from cloudinit import lifecycle, sources, subp, util from cloudinit.event import EventScope, EventType from cloudinit.net import eni from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform @@ -176,7 +176,7 @@ def network_config(self): elif self.network_eni is not None: self._network_config = eni.convert_eni_data(self.network_eni) LOG.debug("network config provided via converted eni data") - util.deprecate( + lifecycle.deprecate( deprecated="Eni network configuration in ConfigDrive", deprecated_version="24.3", extra_message=( diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 951006ed815..ec35af782f0 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -9,7 +9,7 @@ import logging import cloudinit.sources.helpers.digitalocean as do_helper -from cloudinit import sources, util +from cloudinit import lifecycle, sources, util LOG = logging.getLogger(__name__) @@ -55,7 +55,7 @@ def _unpickle(self, ci_pkl_version: int) -> None: self._deprecate() def _deprecate(self): - util.deprecate( + lifecycle.deprecate( deprecated="DataSourceDigitalOcean", deprecated_version="23.2", extra_message="Deprecated in favour of DataSourceConfigDrive.", diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 0bf6e7c4ee2..289205e8599 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -13,7 +13,7 @@ import os from functools import partial -from cloudinit import dmi, sources, util +from cloudinit import dmi, lifecycle, sources, util from cloudinit.net import eni LOG = logging.getLogger(__name__) @@ -131,7 +131,7 @@ def _pp2d_callback(mp, data): label = self.ds_cfg.get("fs_label", "cidata") if label is not None: if label.lower() != "cidata": - util.deprecate( + lifecycle.deprecate( deprecated="Custom fs_label keys", deprecated_version="24.3", extra_message="This key isn't supported by ds-identify.", @@ -272,7 +272,7 @@ def check_instance_id(self, sys_cfg): def network_config(self): if self._network_config is None: if self._network_eni is not None: - util.deprecate( + lifecycle.deprecate( deprecated="Eni network configuration in NoCloud", deprecated_version="24.3", extra_message=( @@ -424,7 +424,7 @@ def ds_detect(self): For backwards compatiblity, check for that dsname. """ log_deprecated = partial( - util.deprecate, + lifecycle.deprecate, deprecated="The 'nocloud-net' datasource name", deprecated_version="24.1", extra_message=( diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index eb39ddc7bb3..87b49fcaecc 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -19,7 +19,7 @@ from enum import Enum, unique from typing import Any, Dict, List, Optional, Tuple, Union -from cloudinit import atomic_helper, dmi, importer, net, type_utils +from cloudinit import atomic_helper, dmi, importer, lifecycle, net, type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json @@ -1230,7 +1230,7 @@ def parse_cmdline_or_dmi(input: str) -> str: deprecated = ds_parse_1 or ds_parse_2 if deprecated: dsname = deprecated.group(1).strip() - util.deprecate( + lifecycle.deprecate( deprecated=( f"Defining the datasource on the command line using " f"ci.ds={dsname} or " diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index cad85d596b8..70002086738 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -12,7 +12,7 @@ from contextlib import suppress from typing import List, Sequence, Tuple -from cloudinit import subp, util +from cloudinit import lifecycle, subp, util LOG = logging.getLogger(__name__) @@ -671,7 +671,7 @@ def get_opensshd_upstream_version(): upstream_version = "9.0" full_version = get_opensshd_version() if full_version is None: - return util.Version.from_str(upstream_version) + return lifecycle.Version.from_str(upstream_version) if "p" in full_version: upstream_version = full_version[: full_version.find("p")] elif " " in full_version: @@ -679,7 +679,7 @@ def get_opensshd_upstream_version(): else: upstream_version = full_version try: - upstream_version = util.Version.from_str(upstream_version) + upstream_version = lifecycle.Version.from_str(upstream_version) return upstream_version except (ValueError, TypeError): LOG.warning("Could not parse sshd version: %s", upstream_version) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d564cbbc289..1d911aaf3ac 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -22,6 +22,7 @@ handlers, helpers, importer, + lifecycle, net, sources, type_utils, @@ -914,7 +915,7 @@ def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE): return if isinstance(enabled, str): - util.deprecate( + lifecycle.deprecate( deprecated=f"Use of string '{enabled}' for " "'vendor_data:enabled' field", deprecated_version="23.1", diff --git a/cloudinit/util.py b/cloudinit/util.py index faa3e847b84..87b8aa071dc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -12,7 +12,6 @@ import contextlib import copy as obj_copy import email -import functools import glob import grp import gzip @@ -38,7 +37,7 @@ from collections import deque, namedtuple from contextlib import contextmanager, suppress from errno import ENOENT -from functools import lru_cache, total_ordering +from functools import lru_cache from pathlib import Path from types import ModuleType from typing import ( @@ -51,7 +50,6 @@ Generator, List, Mapping, - NamedTuple, Optional, Sequence, TypeVar, @@ -65,7 +63,6 @@ from cloudinit import ( features, importer, - log, mergers, net, settings, @@ -94,11 +91,6 @@ FALSE_STRINGS = ("off", "0", "no", "false") -class DeprecationLog(NamedTuple): - log_level: int - message: str - - def kernel_version(): return tuple(map(int, os.uname().release.split(".")[:2])) @@ -3143,204 +3135,6 @@ def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False): return rc -@total_ordering -class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])): - """A class for comparing versions. - - Implemented as a named tuple with all ordering methods. Comparisons - between X.Y.N and X.Y always treats the more specific number as larger. - - :param major: the most significant number in a version - :param minor: next greatest significant number after major - :param patch: next greatest significant number after minor - :param rev: the least significant number in a version - - :raises TypeError: If invalid arguments are given. - :raises ValueError: If invalid arguments are given. - - Examples: - >>> Version(2, 9) == Version.from_str("2.9") - True - >>> Version(2, 9, 1) > Version.from_str("2.9.1") - False - >>> Version(3, 10) > Version.from_str("3.9.9.9") - True - >>> Version(3, 7) >= Version.from_str("3.7") - True - - """ - - def __new__( - cls, major: int = -1, minor: int = -1, patch: int = -1, rev: int = -1 - ) -> "Version": - """Default of -1 allows us to tiebreak in favor of the most specific - number""" - return super(Version, cls).__new__(cls, major, minor, patch, rev) - - @classmethod - def from_str(cls, version: str) -> "Version": - """Create a Version object from a string. - - :param version: A period-delimited version string, max 4 segments. - - :raises TypeError: Raised if invalid arguments are given. - :raises ValueError: Raised if invalid arguments are given. - - :return: A Version object. - """ - return cls(*(list(map(int, version.split("."))))) - - def __gt__(self, other): - return 1 == self._compare_version(other) - - def __eq__(self, other): - return ( - self.major == other.major - and self.minor == other.minor - and self.patch == other.patch - and self.rev == other.rev - ) - - def __iter__(self): - """Iterate over the version (drop sentinels)""" - for n in (self.major, self.minor, self.patch, self.rev): - if n != -1: - yield str(n) - else: - break - - def __str__(self): - return ".".join(self) - - def __hash__(self): - return hash(str(self)) - - def _compare_version(self, other: "Version") -> int: - """Compare this Version to another. - - :param other: A Version object. - - :return: -1 if self > other, 1 if self < other, else 0 - """ - if self == other: - return 0 - if self.major > other.major: - return 1 - if self.minor > other.minor: - return 1 - if self.patch > other.patch: - return 1 - if self.rev > other.rev: - return 1 - return -1 - - -def should_log_deprecation(version: str, boundary_version: str) -> bool: - """Determine if a deprecation message should be logged. - - :param version: The version in which the thing was deprecated. - :param boundary_version: The version at which deprecation level is logged. - - :return: True if the message should be logged, else False. - """ - return boundary_version == "devel" or Version.from_str( - version - ) <= Version.from_str(boundary_version) - - -def deprecate( - *, - deprecated: str, - deprecated_version: str, - extra_message: Optional[str] = None, - schedule: int = 5, - skip_log: bool = False, -) -> DeprecationLog: - """Mark a "thing" as deprecated. Deduplicated deprecations are - logged. - - @param deprecated: Noun to be deprecated. Write this as the start - of a sentence, with no period. Version and extra message will - be appended. - @param deprecated_version: The version in which the thing was - deprecated - @param extra_message: A remedy for the user's problem. A good - message will be actionable and specific (i.e., don't use a - generic "Use updated key." if the user used a deprecated key). - End the string with a period. - @param schedule: Manually set the deprecation schedule. Defaults to - 5 years. Leave a comment explaining your reason for deviation if - setting this value. - @param skip_log: Return log text rather than logging it. Useful for - running prior to logging setup. - @return: NamedTuple containing log level and log message - DeprecationLog(level: int, message: str) - - Note: uses keyword-only arguments to improve legibility - """ - if not hasattr(deprecate, "log"): - setattr(deprecate, "log", set()) - message = extra_message or "" - dedup = hash(deprecated + message + deprecated_version + str(schedule)) - version = Version.from_str(deprecated_version) - version_removed = Version(version.major + schedule, version.minor) - deprecate_msg = ( - f"{deprecated} is deprecated in " - f"{deprecated_version} and scheduled to be removed in " - f"{version_removed}. {message}" - ).rstrip() - if not should_log_deprecation( - deprecated_version, features.DEPRECATION_INFO_BOUNDARY - ): - level = logging.INFO - elif hasattr(LOG, "deprecated"): - level = log.DEPRECATED - else: - level = logging.WARN - log_cache = getattr(deprecate, "log") - if not skip_log and dedup not in log_cache: - log_cache.add(dedup) - LOG.log(level, deprecate_msg) - return DeprecationLog(level, deprecate_msg) - - -def deprecate_call( - *, deprecated_version: str, extra_message: str, schedule: int = 5 -): - """Mark a "thing" as deprecated. Deduplicated deprecations are - logged. - - @param deprecated_version: The version in which the thing was - deprecated - @param extra_message: A remedy for the user's problem. A good - message will be actionable and specific (i.e., don't use a - generic "Use updated key." if the user used a deprecated key). - End the string with a period. - @param schedule: Manually set the deprecation schedule. Defaults to - 5 years. Leave a comment explaining your reason for deviation if - setting this value. - - Note: uses keyword-only arguments to improve legibility - """ - - def wrapper(func): - @functools.wraps(func) - def decorator(*args, **kwargs): - # don't log message multiple times - out = func(*args, **kwargs) - deprecate( - deprecated_version=deprecated_version, - deprecated=func.__name__, - extra_message=extra_message, - schedule=schedule, - ) - return out - - return decorator - - return wrapper - - def read_hotplug_enabled_file(paths: "Paths") -> dict: content: dict = {"scopes": []} try: diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index 3155a07919b..c954484012a 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -3,7 +3,7 @@ import pytest -from cloudinit.util import should_log_deprecation +from cloudinit import lifecycle from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.releases import CURRENT_RELEASE, MANTIC from tests.integration_tests.util import ( @@ -71,7 +71,7 @@ def test_clean_log(self, class_client: IntegrationInstance): ) # the deprecation_version is 22.2 in schema for apt_* keys in # user-data. Pass 22.2 in against the client's version_boundary. - if should_log_deprecation("22.2", version_boundary): + if lifecycle.should_log_deprecation("22.2", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index 6cfe037a448..24aecc0bd8d 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -5,8 +5,8 @@ import pytest from pycloudlib.lxd.instance import LXDInstance +from cloudinit import lifecycle from cloudinit.subp import subp -from cloudinit.util import should_log_deprecation from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM from tests.integration_tests.releases import CURRENT_RELEASE, FOCAL @@ -199,7 +199,7 @@ def test_smbios_seed_network(self, client: IntegrationInstance): client, "DEPRECATION_INFO_BOUNDARY" ) # nocloud-net deprecated in version 24.1 - if should_log_deprecation("24.1", version_boundary): + if lifecycle.should_log_deprecation("24.1", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 0bf1b3d49e8..2d8b51ee362 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -17,7 +17,8 @@ from pycloudlib.gce.instance import GceInstance import cloudinit.config -from cloudinit.util import is_true, should_log_deprecation +from cloudinit import lifecycle +from cloudinit.util import is_true from tests.integration_tests.decorators import retry from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.integration_settings import PLATFORM @@ -138,7 +139,7 @@ def test_deprecated_message(self, class_client: IntegrationInstance): ) # the changed_version is 22.2 in schema for user.sudo key in # user-data. Pass 22.2 in against the client's version_boundary. - if should_log_deprecation("22.2", version_boundary): + if lifecycle.should_log_deprecation("22.2", version_boundary): log_level = "DEPRECATED" deprecation_count = 2 else: diff --git a/tests/integration_tests/modules/test_ubuntu_pro.py b/tests/integration_tests/modules/test_ubuntu_pro.py index f4438163425..0f0cb944aec 100644 --- a/tests/integration_tests/modules/test_ubuntu_pro.py +++ b/tests/integration_tests/modules/test_ubuntu_pro.py @@ -5,7 +5,7 @@ import pytest from pycloudlib.cloud import ImageType -from cloudinit.util import should_log_deprecation +from cloudinit import lifecycle from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.instances import ( @@ -143,7 +143,7 @@ def test_valid_token(self, client: IntegrationInstance): client, "DEPRECATION_INFO_BOUNDARY" ) # ubuntu_advantage key is deprecated in version 24.1 - if should_log_deprecation("24.1", version_boundary): + if lifecycle.should_log_deprecation("24.1", version_boundary): log_level = "DEPRECATED" else: log_level = "INFO" diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py index 271d9d037ec..b5b25a64286 100644 --- a/tests/unittests/config/test_cc_ansible.py +++ b/tests/unittests/config/test_cc_ansible.py @@ -7,7 +7,7 @@ from pytest import mark, param, raises -from cloudinit import util +from cloudinit import lifecycle from cloudinit.config import cc_ansible from cloudinit.config.schema import ( SchemaValidationError, @@ -292,7 +292,7 @@ def test_required_keys(self, cfg, exception, mocker): mocker.patch(M_PATH + "AnsiblePull.check_deps") mocker.patch( M_PATH + "AnsiblePull.get_version", - return_value=cc_ansible.Version(2, 7, 1), + return_value=cc_ansible.lifecycle.Version(2, 7, 1), ) mocker.patch( M_PATH + "AnsiblePullDistro.is_installed", @@ -415,7 +415,7 @@ def test_parse_version_distro(self, m_subp): """Verify that the expected version is returned""" assert cc_ansible.AnsiblePullDistro( get_cloud().distro - ).get_version() == util.Version(2, 10, 8) + ).get_version() == lifecycle.Version(2, 10, 8) @mock.patch("cloudinit.subp.subp", side_effect=[(pip_version, "")]) def test_parse_version_pip(self, m_subp): @@ -424,7 +424,7 @@ def test_parse_version_pip(self, m_subp): distro.do_as = MagicMock(return_value=(pip_version, "")) pip = cc_ansible.AnsiblePullPip(distro, "root") received = pip.get_version() - expected = util.Version(2, 13, 2) + expected = lifecycle.Version(2, 13, 2) assert received == expected @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py index 49327bb67e5..a49fbf01baf 100644 --- a/tests/unittests/config/test_cc_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -7,7 +7,7 @@ import pytest -from cloudinit import ssh_util, util +from cloudinit import lifecycle, ssh_util from cloudinit.config import cc_ssh from cloudinit.config.schema import ( SchemaValidationError, @@ -334,7 +334,7 @@ def test_ssh_hostkey_permissions( Otherwise, 600. """ m_gid.return_value = 10 if ssh_keys_group_exists else -1 - m_sshd_version.return_value = util.Version(sshd_version, 0) + m_sshd_version.return_value = lifecycle.Version(sshd_version, 0) key_path = cc_ssh.KEY_FILE_TPL % "rsa" cloud = get_cloud(distro="centos") cc_ssh.handle("name", {"ssh_genkeytypes": ["rsa"]}, cloud, []) diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index df47e7ae41e..40f8035b30d 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -7,7 +7,7 @@ import pytest -from cloudinit import subp +from cloudinit import lifecycle, subp from cloudinit.config.cc_ubuntu_pro import ( _attach, _auto_attach, @@ -23,7 +23,6 @@ get_schema, validate_cloudconfig_schema, ) -from cloudinit.util import Version from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema from tests.unittests.util import get_cloud @@ -452,8 +451,10 @@ class TestUbuntuProSchema: # we're using a high enough version of jsonschema to not need # to skip this test. JSONSCHEMA_SKIP_REASON - if Version.from_str(getattr(jsonschema, "__version__", "999")) - < Version(4) + if lifecycle.Version.from_str( + getattr(jsonschema, "__version__", "999") + ) + < lifecycle.Version(4) else "", id="deprecation_of_ubuntu_advantage_skip_old_json", ), diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index e0baa63b99b..9401f2235ef 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -8,7 +8,7 @@ import pytest -from cloudinit import atomic_helper, log, util +from cloudinit import atomic_helper, lifecycle, log, util from cloudinit.cmd.devel import logs from cloudinit.gpg import GPG from tests.hypothesis import HAS_HYPOTHESIS @@ -152,7 +152,7 @@ def clear_deprecation_log(): # Since deprecations are de-duped, the existance (or non-existance) of # a deprecation warning in a previous test can cause the next test to # fail. - setattr(util.deprecate, "log", set()) + setattr(lifecycle.deprecate, "log", set()) PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py index 8fa7f0cc092..ebbbb418e8a 100644 --- a/tests/unittests/distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -4,8 +4,7 @@ import pytest -from cloudinit import distros, features, ssh_util -from cloudinit.util import should_log_deprecation +from cloudinit import distros, features, lifecycle, ssh_util from tests.unittests.helpers import mock from tests.unittests.util import abstract_to_concrete @@ -145,7 +144,7 @@ def test_create_groups_with_dict_deprecated( expected_levels = ( ["WARNING", "DEPRECATED"] - if should_log_deprecation( + if lifecycle.should_log_deprecation( "23.1", features.DEPRECATION_INFO_BOUNDARY ) else ["INFO"] @@ -180,7 +179,7 @@ def test_explicit_sudo_false(self, m_subp, dist, caplog): expected_levels = ( ["WARNING", "DEPRECATED"] - if should_log_deprecation( + if lifecycle.should_log_deprecation( "22.2", features.DEPRECATION_INFO_BOUNDARY ) else ["INFO"] diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index eaad90dc8e1..a03f60f86f8 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -5,7 +5,7 @@ import pytest import yaml -from cloudinit import util +from cloudinit import lifecycle from cloudinit.net import network_state from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.renderers import NAME_TO_RENDERER @@ -215,7 +215,7 @@ def test_v2_warns_deprecated_gateways( In netplan targets we perform a passthrough and the warning is not needed. """ - util.deprecate.__dict__["log"] = set() + lifecycle.deprecate.__dict__["log"] = set() ncfg = yaml.safe_load( cfg.format( gateway4="gateway4: 10.54.0.1", diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py index 34a92453335..c111e710ffc 100644 --- a/tests/unittests/sources/test_digitalocean.py +++ b/tests/unittests/sources/test_digitalocean.py @@ -165,7 +165,7 @@ def test_returns_false_not_on_docean(self, m_read_sysinfo): self.assertTrue(m_read_sysinfo.called) @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") - @mock.patch("cloudinit.sources.util.deprecate") + @mock.patch("cloudinit.sources.lifecycle.deprecate") def test_deprecation_log_on_init(self, mock_deprecate, _mock_readmd): ds = self.get_ds() self.assertTrue(ds.get_data()) @@ -176,7 +176,7 @@ def test_deprecation_log_on_init(self, mock_deprecate, _mock_readmd): ) @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") - @mock.patch("cloudinit.sources.util.deprecate") + @mock.patch("cloudinit.sources.lifecycle.deprecate") def test_deprecation_log_on_unpick(self, mock_deprecate, _mock_readmd): ds = self.get_ds() self.assertTrue(ds.get_data()) diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index 175afc0eb94..d67c3552157 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -10,7 +10,7 @@ import pytest -from cloudinit import log, util +from cloudinit import lifecycle, log, util from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT from tests.unittests.helpers import CiTestCase @@ -112,7 +112,7 @@ def test_deprecate_log_level_based_on_features( "DEPRECATION_INFO_BOUNDARY", deprecation_info_boundary, ) - util.deprecate( + lifecycle.deprecate( deprecated="some key", deprecated_version="19.2", extra_message="dont use it", @@ -125,17 +125,17 @@ def test_deprecate_log_level_based_on_features( def test_log_deduplication(self, caplog): log.define_extra_loggers() - util.deprecate( + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", ) - util.deprecate( + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", ) - util.deprecate( + lifecycle.deprecate( deprecated="stuff", deprecated_version="19.1", extra_message=":)", diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 2ceed7aa32c..c856f97564f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -22,7 +22,15 @@ import pytest import yaml -from cloudinit import atomic_helper, features, importer, subp, url_helper, util +from cloudinit import ( + atomic_helper, + features, + importer, + lifecycle, + subp, + url_helper, + util, +) from cloudinit.distros import Distro from cloudinit.helpers import Paths from cloudinit.sources import DataSourceHostname @@ -3095,9 +3103,13 @@ class TestVersion: ) def test_eq(self, v1, v2, eq): if eq: - assert util.Version.from_str(v1) == util.Version.from_str(v2) + assert lifecycle.Version.from_str( + v1 + ) == lifecycle.Version.from_str(v2) if not eq: - assert util.Version.from_str(v1) != util.Version.from_str(v2) + assert lifecycle.Version.from_str( + v1 + ) != lifecycle.Version.from_str(v2) @pytest.mark.parametrize( ("v1", "v2", "gt"), @@ -3111,11 +3123,15 @@ def test_eq(self, v1, v2, eq): ) def test_gt(self, v1, v2, gt): if gt: - assert util.Version.from_str(v1) > util.Version.from_str(v2) + assert lifecycle.Version.from_str(v1) > lifecycle.Version.from_str( + v2 + ) if not gt: - assert util.Version.from_str(v1) < util.Version.from_str( + assert lifecycle.Version.from_str(v1) < lifecycle.Version.from_str( v2 - ) or util.Version.from_str(v1) == util.Version.from_str(v2) + ) or lifecycle.Version.from_str(v1) == lifecycle.Version.from_str( + v2 + ) @pytest.mark.parametrize( ("version"), @@ -3129,31 +3145,31 @@ def test_gt(self, v1, v2, gt): ) def test_to_version_and_back_to_str(self, version): """Verify __str__, __iter__, and Version.from_str()""" - assert version == str(util.Version.from_str(version)) + assert version == str(lifecycle.Version.from_str(version)) @pytest.mark.parametrize( ("str_ver", "cls_ver"), ( ( "0.0.0.0", - util.Version(0, 0, 0, 0), + lifecycle.Version(0, 0, 0, 0), ), ( "1.0.0.0", - util.Version(1, 0, 0, 0), + lifecycle.Version(1, 0, 0, 0), ), ( "1.0.2.0", - util.Version(1, 0, 2, 0), + lifecycle.Version(1, 0, 2, 0), ), ( "9.8.2.0", - util.Version(9, 8, 2, 0), + lifecycle.Version(9, 8, 2, 0), ), ), ) def test_from_str(self, str_ver, cls_ver): - assert util.Version.from_str(str_ver) == cls_ver + assert lifecycle.Version.from_str(str_ver) == cls_ver @pytest.mark.allow_dns_lookup From 143bc9e40f7f33695216db60f73e10a900d1d1cd Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 2 Aug 2024 15:02:42 -0600 Subject: [PATCH 062/131] feat: Single process optimization (#5489) Python interpreter initialization and module import time contributes a significant amount of wall clock time to cloud-init's runtime (and therefore to total boot time). Cloud-init has four stages. Each stage starts its own Python interpreter and loads the same libraries. To eliminate the redundant work of starting an interpreter and loading libraries, this changes cloud-init to run as a single process. Systemd service ordering is retained by using the existing cloud-init services as shims which use a synchronization protocol to start each cloud-init stage and to communicate that each stage is complete to the init system. Since multiple cloud-init processes sit in the critical chain of starting the system, this reduces boot time (including time to ssh login and time to cloud-init completion). Currently only systemd is supported, but the synchronization protocol should be capable of supporting other init systems as well with minor changes. Note: This enables many additional follow-on improvements that eliminate redundant work. However, these potential improvements are temporarily ignored. This commit has been structured to minimize the changes required to capture the majority of primary performance savings while preserving correctness and the ability to preserve backwards compatibility. Since this changes the semantics of the existing cloud-init unit files, this change takes the opportunity to rename one of its systemd units which causes frequent user confusion. The unit named cloud-init.service is often mistaken by users for being the only cloud-init service, when it is simply one of four stages. This stage is documented as the "network" stage, so this service will be renamed to "cloud-init-network.service". A new notify service is added as part of this implementation which contains the cloud-init process. This unit is named "cloud-init-main.service". Synchronization protocol ======================== - create one Unix socket for each systemd service stage - send sd_notify() - For each of the four stages (local, network, config, final): - when init system sends "start" to the Unix socket, start the stage - when running stage is complete, send "done" to Unix socket File changes ============ socket.py (new) --------------- - define a systemd-notify helper function - define a context manager which implements a multi-socket synchronization protocol cloud-init.service -> cloud-init-network.service (renamed) ---------------------------------------------------------- - renamed to cloud-network.service cloud-{init-local,init-network,config,final}.services ------------------------------------------- - change ExecStart to use netcat to connect to Unix socket and: - send a start message - wait for completion response - note: a pure Python equivalent is possible for any downstreams which do not package openbsd's netcat cloud-init-main.service (new) ----------------------------- - use service type to 'notify' - invoke cloud-init in single process mode - adopt systemd ordering requirements from cloud-init-local.service - adopt KillMode from cloud-final.service main.py ------- - Add command line flag to indicate "all stages" mode - In this mode run each stage followed by an IPC synchronization protocol step cloud-final.services -------------------- - drop KillMode cloud-init-local.services ------------------------- - drop dependencies made redundant by ordering after cloud-init-main.service Performance Impact ================== On Ubuntu 24.04, Python's wall clock start up time as measured with `time python3 -c 'import cloudinit.cmd.main' on a few cloud types: lxc container: 0.256s QEMU machine: 0.300s gce instance: 0.367s ec2 instance: 0.491s This change eliminates x1 this start up time from time to ssh. This change eliminates x3 this start up time from cloud-init's total completion. Total benefit varies based on the platform that the instance is hosted by, but all platforms will measurably benefit from this change. BREAKING_CHANGE: Run all four cloud-init services as a single systemd service. --- cloudinit/cmd/main.py | 122 ++++++++-- cloudinit/cmd/status.py | 3 +- cloudinit/config/cc_mounts.py | 2 +- .../schemas/schema-cloud-config-v1.json | 4 +- cloudinit/log.py | 3 + cloudinit/socket.py | 174 +++++++++++++++ config/cloud.cfg.tmpl | 2 +- doc/module-docs/cc_mounts/data.yaml | 4 +- doc/rtd/explanation/boot.rst | 2 +- doc/rtd/howto/debugging.rst | 2 +- packages/redhat/cloud-init.spec.in | 4 +- systemd/cloud-config.service.tmpl | 9 +- systemd/cloud-config.target | 4 +- systemd/cloud-final.service.tmpl | 10 +- systemd/cloud-init-local.service.tmpl | 10 +- systemd/cloud-init-main.service.tmpl | 52 +++++ ...e.tmpl => cloud-init-network.service.tmpl} | 9 +- .../assets/enable_coverage.py | 2 +- .../assets/enable_profile.py | 2 +- tests/integration_tests/conftest.py | 2 +- .../datasources/test_nocloud.py | 4 +- tests/unittests/cmd/test_main.py | 6 +- tests/unittests/config/test_cc_mounts.py | 4 +- tests/unittests/test_all_stages.py | 208 ++++++++++++++++++ tests/unittests/test_cli.py | 4 +- 25 files changed, 601 insertions(+), 47 deletions(-) create mode 100644 cloudinit/socket.py create mode 100644 systemd/cloud-init-main.service.tmpl rename systemd/{cloud-init.service.tmpl => cloud-init-network.service.tmpl} (71%) create mode 100644 tests/unittests/test_all_stages.py diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 54ba79e1bbc..2de9826bb83 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -24,6 +24,7 @@ from cloudinit import netinfo from cloudinit import signal_handler from cloudinit import sources +from cloudinit import socket from cloudinit import stages from cloudinit import url_helper from cloudinit import util @@ -38,7 +39,12 @@ from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import log from cloudinit.reporting import events -from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG +from cloudinit.settings import ( + PER_INSTANCE, + PER_ALWAYS, + PER_ONCE, + CLOUD_CONFIG, +) # Welcome message template WELCOME_MSG_TPL = ( @@ -362,8 +368,11 @@ def main_init(name, args): outfmt = None errfmt = None try: - close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) - outfmt, errfmt = util.fixup_output(init.cfg, name) + if not args.skip_log_setup: + close_stdin(lambda msg: early_logs.append((logging.DEBUG, msg))) + outfmt, errfmt = util.fixup_output(init.cfg, name) + else: + outfmt, errfmt = util.get_output_cfg(init.cfg, name) except Exception: msg = "Failed to setup output redirection!" util.logexc(LOG, msg) @@ -375,8 +384,9 @@ def main_init(name, args): "Logging being reset, this logger may no longer be active shortly" ) log.reset_logging() - log.setup_logging(init.cfg) - apply_reporting_cfg(init.cfg) + if not args.skip_log_setup: + log.setup_logging(init.cfg) + apply_reporting_cfg(init.cfg) # Any log usage prior to setup_logging above did not have local user log # config applied. We send the welcome message now, as stderr/out have @@ -633,8 +643,9 @@ def main_modules(action_name, args): mods = Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 try: - close_stdin() - util.fixup_output(mods.cfg, name) + if not args.skip_log_setup: + close_stdin() + util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to setup output redirection!") if args.debug: @@ -643,8 +654,9 @@ def main_modules(action_name, args): "Logging being reset, this logger may no longer be active shortly" ) log.reset_logging() - log.setup_logging(mods.cfg) - apply_reporting_cfg(init.cfg) + if not args.skip_log_setup: + log.setup_logging(mods.cfg) + apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) @@ -804,9 +816,10 @@ def status_wrapper(name, args): ) v1[mode]["start"] = float(util.uptime()) - preexisting_recoverable_errors = next( + handler = next( filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) - ).export_logs() + ) + preexisting_recoverable_errors = handler.export_logs() # Write status.json prior to running init / module code atomic_helper.write_json(status_path, status) @@ -847,11 +860,8 @@ def status_wrapper(name, args): v1["stage"] = None # merge new recoverable errors into existing recoverable error list - new_recoverable_errors = next( - filter( - lambda h: isinstance(h, log.LogExporter), root_logger.handlers - ) - ).export_logs() + new_recoverable_errors = handler.export_logs() + handler.clean_logs() for key in new_recoverable_errors.keys(): if key in preexisting_recoverable_errors: v1[mode]["recoverable_errors"][key] = list( @@ -953,9 +963,19 @@ def main(sysv_args=None): default=False, ) + parser.add_argument( + "--all-stages", + dest="all_stages", + action="store_true", + help=( + "Run cloud-init's stages under a single process using a " + "syncronization protocol. This is not intended for CLI usage." + ), + default=False, + ) + parser.set_defaults(reporter=None) subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") - subparsers.required = True # Each action and its sub-options (if any) parser_init = subparsers.add_parser( @@ -1143,8 +1163,76 @@ def main(sysv_args=None): status_parser(parser_status) parser_status.set_defaults(action=("status", handle_status_args)) + else: + parser.error("a subcommand is required") args = parser.parse_args(args=sysv_args) + setattr(args, "skip_log_setup", False) + if not args.all_stages: + return sub_main(args) + return all_stages(parser) + + +def all_stages(parser): + """Run all stages in a single process using an ordering protocol.""" + LOG.info("Running cloud-init in single process mode.") + + # this _must_ be called before sd_notify is called otherwise netcat may + # attempt to send "start" before a socket exists + sync = socket.SocketSync("local", "network", "config", "final") + + # notify systemd that this stage has completed + socket.sd_notify("READY=1") + # wait for cloud-init-local.service to start + with sync("local"): + # set up logger + args = parser.parse_args(args=["init", "--local"]) + args.skip_log_setup = False + # run local stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-init-network.service to start + with sync("network"): + # skip re-setting up logger + args = parser.parse_args(args=["init"]) + args.skip_log_setup = True + # run init stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-config.service to start + with sync("config"): + # skip re-setting up logger + args = parser.parse_args(args=["modules", "--mode=config"]) + args.skip_log_setup = True + # run config stage + sync.systemd_exit_code = sub_main(args) + + # wait for cloud-final.service to start + with sync("final"): + # skip re-setting up logger + args = parser.parse_args(args=["modules", "--mode=final"]) + args.skip_log_setup = True + # run final stage + sync.systemd_exit_code = sub_main(args) + + # signal completion to cloud-init-main.service + if sync.experienced_any_error: + message = "a stage of cloud-init exited non-zero" + if sync.first_exception: + message = f"first exception received: {sync.first_exception}" + socket.sd_notify( + f"STATUS=Completed with failure, {message}. Run 'cloud-init status" + " --long' for more details." + ) + socket.sd_notify("STOPPING=1") + # exit 1 for a fatal failure in any stage + return 1 + else: + socket.sd_notify("STATUS=Completed") + socket.sd_notify("STOPPING=1") + + +def sub_main(args): # Subparsers.required = True and each subparser sets action=(name, functor) (name, functor) = args.action diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index 39089802984..f027321ce22 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -318,8 +318,9 @@ def systemd_failed(wait: bool) -> bool: for service in [ "cloud-final.service", "cloud-config.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-init-local.service", + "cloud-init-main.service", ]: try: stdout = query_systemctl( diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 5efcec946d8..1d9f821bbd0 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -524,7 +524,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno uses_systemd = cloud.distro.uses_systemd() default_mount_options = ( - "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" + "defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev" if uses_systemd else "defaults,nobootwait" ) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index c05bd994212..4ae8b4a8f70 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -2022,12 +2022,12 @@ }, "mount_default_fields": { "type": "array", - "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init.service,_netdev``", + "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev``", "default": [ null, null, "auto", - "defaults,nofail,x-systemd.after=cloud-init.service", + "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2" ], diff --git a/cloudinit/log.py b/cloudinit/log.py index 61b96262aa1..983b426b7ce 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -152,6 +152,9 @@ def emit(self, record: logging.LogRecord): def export_logs(self): return copy.deepcopy(self.holder) + def clean_logs(self): + self.holder = defaultdict(list) + def flush(self): pass diff --git a/cloudinit/socket.py b/cloudinit/socket.py new file mode 100644 index 00000000000..7ef19f43798 --- /dev/null +++ b/cloudinit/socket.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""A module for common socket helpers.""" +import logging +import os +import socket +import sys +import time +from contextlib import suppress + +from cloudinit.settings import DEFAULT_RUN_DIR + +LOG = logging.getLogger(__name__) + + +def sd_notify(message: str): + """Send a sd_notify message. + + :param message: sd-notify message (must be valid ascii) + """ + socket_path = os.environ.get("NOTIFY_SOCKET", "") + + if not socket_path: + # not running under systemd, no-op + return + + elif socket_path[0] == "@": + # abstract + socket_path.replace("@", "\0", 1) + + # unix domain + elif socket_path[0] != "/": + raise OSError("Unsupported socket type") + + with socket.socket( + socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC + ) as sock: + LOG.info("Sending sd_notify(%s)", str(message)) + sock.connect(socket_path) + sock.sendall(message.encode("ascii")) + + +class SocketSync: + """A two way synchronization protocol over Unix domain sockets.""" + + def __init__(self, *names: str): + """Initialize a synchronization context. + + 1) Ensure that the socket directory exists. + 2) Bind a socket for each stage. + + Binding the sockets on initialization allows receipt of stage + "start" notifications prior to the cloud-init stage being ready to + start. + + :param names: stage names, used as a unique identifiers + """ + self.stage = "" + self.remote = "" + self.first_exception = "" + self.systemd_exit_code = 0 + self.experienced_any_error = False + self.sockets = { + name: socket.socket( + socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC + ) + for name in names + } + # ensure the directory exists + os.makedirs(f"{DEFAULT_RUN_DIR}/share", mode=0o700, exist_ok=True) + # removing stale sockets and bind + for name, sock in self.sockets.items(): + socket_path = f"{DEFAULT_RUN_DIR}/share/{name}.sock" + with suppress(FileNotFoundError): + os.remove(socket_path) + sock.bind(socket_path) + + def __call__(self, stage: str): + """Set the stage before entering context. + + This enables the context manager to be initialized separately from + each stage synchronization. + + :param stage: the name of a stage to synchronize + + Example: + sync = SocketSync("stage 1", "stage 2"): + with sync("stage 1"): + pass + with sync("stage 2"): + pass + """ + if stage not in self.sockets: + raise ValueError(f"Invalid stage name: {stage}") + self.stage = stage + return self + + def __enter__(self): + """Wait until a message has been received on this stage's socket. + + Once the message has been received, enter the context. + """ + if os.isatty(sys.stdin.fileno()): + LOG.info( + "Stdin is a tty, so skipping stage synchronization protocol" + ) + return + self.systemd_exit_code = 0 + sd_notify( + "STATUS=Waiting on external services to " + f"complete before starting the {self.stage} stage." + ) + start_time = time.monotonic() + # block until init system sends us data + # the first value returned contains a message from the init system + # (should be "start") + # the second value contains the path to a unix socket on which to + # reply, which is expected to be /path/to/{self.stage}-return.sock + sock = self.sockets[self.stage] + chunk, self.remote = sock.recvfrom(5) + + if b"start" != chunk: + # The protocol expects to receive a command "start" + self.__exit__(None, None, None) + raise ValueError(f"Received invalid message: [{str(chunk)}]") + elif f"{DEFAULT_RUN_DIR}/share/{self.stage}-return.sock" != str( + self.remote + ): + # assert that the return path is in a directory with appropriate + # permissions + self.__exit__(None, None, None) + raise ValueError(f"Unexpected path to unix socket: {self.remote}") + + total = time.monotonic() - start_time + time_msg = f"took {total: .3f}s to " if total > 0.01 else "" + sd_notify(f"STATUS=Running ({self.stage} stage)") + LOG.debug("sync(%s): synchronization %scomplete", self.stage, time_msg) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Notify the socket that this stage is complete.""" + message = f"Completed socket interaction for boot stage {self.stage}" + if exc_type: + # handle exception thrown in context + self.systemd_exit_code = 1 + self.experienced_any_error = True + status = f"{repr(exc_val)} in {exc_tb.tb_frame}" + message = ( + 'fatal error, run "systemctl status cloud-init-main.service" ' + 'and "cloud-init status --long" for more details' + ) + if not self.first_exception: + self.first_exception = status + LOG.fatal(status) + sd_notify(f"STATUS={status}") + + self.experienced_any_error = self.experienced_any_error or bool( + self.systemd_exit_code + ) + sock = self.sockets[self.stage] + sock.connect(self.remote) + + # the returned message will be executed in a subshell + # hardcode this message rather than sending a more informative message + # to avoid having to sanitize inputs (to prevent escaping the shell) + sock.sendall( + f"echo '{message}'; exit {self.systemd_exit_code};".encode() + ) + sock.close() + + # suppress exception - the exception was logged and the init system + # notified of stage completion (and the exception received as a status + # message). Raising an exception would block the rest of boot, so carry + # on in a degraded state. + return True diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 4b1efdbcbf1..bc3e6067ec4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -62,7 +62,7 @@ disable_root: true "openmandriva", "photon", "TencentOS"] or is_rhel %} {% if is_rhel %} -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init.service,_netdev', '0', '2'] +mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev', '0', '2'] {% else %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% endif %} diff --git a/doc/module-docs/cc_mounts/data.yaml b/doc/module-docs/cc_mounts/data.yaml index 751b301d501..18193f062d3 100644 --- a/doc/module-docs/cc_mounts/data.yaml +++ b/doc/module-docs/cc_mounts/data.yaml @@ -18,7 +18,7 @@ cc_mounts: .. code-block:: yaml mounts: - - ["ephemeral0", "/mnt", "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + - ["ephemeral0", "/mnt", "auto", "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2"] - ["swap", "none", "swap", "sw", "0", "0"] In order to remove a previously-listed mount, an entry can be added to the @@ -32,7 +32,7 @@ cc_mounts: .. code-block:: yaml - mount_default_fields: [none, none, "auto", "defaults,nofail,x-systemd.after=cloud-init.service", "0", "2"] + mount_default_fields: [none, none, "auto", "defaults,nofail,x-systemd.after=cloud-init-network.service", "0", "2"] Non-systemd init systems will vary in ``mount_default_fields``. diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index a975ca7a093..ff3b65ebd28 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -108,7 +108,7 @@ Network ======= +------------------+----------------------------------------------------------+ -| systemd service | ``cloud-init.service`` | +| systemd service | ``cloud-init-network.service`` | +---------+--------+----------------------------------------------------------+ | runs | after local stage and configured networking is up | +---------+--------+----------------------------------------------------------+ diff --git a/doc/rtd/howto/debugging.rst b/doc/rtd/howto/debugging.rst index c8b2a2634bc..546e8dd9f45 100644 --- a/doc/rtd/howto/debugging.rst +++ b/doc/rtd/howto/debugging.rst @@ -55,7 +55,7 @@ Cloud-init did not run .. code-block:: - systemctl status cloud-init-local.service cloud-init.service\ + systemctl status cloud-init-local.service cloud-init-network.service\ cloud-config.service cloud-final.service Cloud-init may have started to run, but not completed. This shows how many, diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index bc57fe9aac9..672cd426673 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -124,7 +124,7 @@ if [ $1 -eq 1 ] then /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-network.service >/dev/null 2>&1 || : /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : fi %else @@ -141,7 +141,7 @@ if [ $1 -eq 0 ] then /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl --no-reload disable cloud-init-network.service >/dev/null 2>&1 || : /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : fi %else diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl index 79c75c71ae6..9067d6e4bc0 100644 --- a/systemd/cloud-config.service.tmpl +++ b/systemd/cloud-config.service.tmpl @@ -10,7 +10,14 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=config +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/config.sock -s /run/cloud-init/share/config-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/systemd/cloud-config.target b/systemd/cloud-config.target index 2d65e3433ce..be754bbd19d 100644 --- a/systemd/cloud-config.target +++ b/systemd/cloud-config.target @@ -14,5 +14,5 @@ [Unit] Description=Cloud-config availability -Wants=cloud-init-local.service cloud-init.service -After=cloud-init-local.service cloud-init.service +Wants=cloud-init-local.service cloud-init-network.service +After=cloud-init-local.service cloud-init-network.service diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl index b66533643d3..9fb2f681f73 100644 --- a/systemd/cloud-final.service.tmpl +++ b/systemd/cloud-final.service.tmpl @@ -15,10 +15,16 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=final +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/final.sock -s /run/cloud-init/share/final-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 -KillMode=process {% if variant in ["almalinux", "cloudlinux", "rhel"] %} # Restart NetworkManager if it is present and running. ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl index 0da2d8337e9..b0a534b8f9a 100644 --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl @@ -7,7 +7,6 @@ DefaultDependencies=no {% endif %} Wants=network-pre.target After=hv_kvp_daemon.service -After=systemd-remount-fs.service {% if variant in ["almalinux", "cloudlinux", "rhel"] %} Requires=dbus.socket After=dbus.socket @@ -38,7 +37,14 @@ ExecStartPre=/bin/mkdir -p /run/cloud-init ExecStartPre=/sbin/restorecon /run/cloud-init ExecStartPre=/usr/bin/touch /run/cloud-init/enabled {% endif %} -ExecStart=/usr/bin/cloud-init init --local +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/local.sock -s /run/cloud-init/share/local-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/systemd/cloud-init-main.service.tmpl b/systemd/cloud-init-main.service.tmpl new file mode 100644 index 00000000000..1ddfd62073e --- /dev/null +++ b/systemd/cloud-init-main.service.tmpl @@ -0,0 +1,52 @@ +## template:jinja +# systemd ordering resources +# ========================== +# https://systemd.io/NETWORK_ONLINE/ +# https://docs.cloud-init.io/en/latest/explanation/boot.html +# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/ +# https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html +# https://www.freedesktop.org/software/systemd/man/latest/systemd-remount-fs.service.html +[Unit] +Description=Cloud-init: Single Process +Wants=network-pre.target +{% if variant in ["almalinux", "cloudlinux", "ubuntu", "unknown", "debian", "rhel"] %} +DefaultDependencies=no +{% endif %} +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +Requires=dbus.socket +After=dbus.socket +Before=network.service +Before=firewalld.target +Conflicts=shutdown.target +{% endif %} +{% if variant in ["ubuntu", "unknown", "debian"] %} +Before=sysinit.target +Conflicts=shutdown.target +{% endif %} + +After=systemd-remount-fs.service +Before=sysinit.target +Before=cloud-init-local.service +Conflicts=shutdown.target +RequiresMountsFor=/var/lib/cloud +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled +ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Service] +Type=notify +ExecStart=/usr/bin/cloud-init --all-stages +KillMode=process +TasksMax=infinity +TimeoutStartSec=infinity +{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +ExecStartPre=/bin/mkdir -p /run/cloud-init +ExecStartPre=/sbin/restorecon /run/cloud-init +ExecStartPre=/usr/bin/touch /run/cloud-init/enabled +{% endif %} + +# Output needs to appear in instance console output +StandardOutput=journal+console + +[Install] +WantedBy=cloud-init.target diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init-network.service.tmpl similarity index 71% rename from systemd/cloud-init.service.tmpl rename to systemd/cloud-init-network.service.tmpl index 58031cc4331..6957b39f1ee 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init-network.service.tmpl @@ -46,7 +46,14 @@ ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled [Service] Type=oneshot -ExecStart=/usr/bin/cloud-init init +# This service is a shim which preserves systemd ordering while allowing a +# single Python process to run cloud-init's logic. This works by communicating +# with the cloud-init process over a unix socket to tell the process that this +# stage can start, and then wait on a return socket until the cloud-init +# process has completed this stage. The output from the return socket is piped +# into a shell so that the process can send a completion message (defaults to +# "done", otherwise includes an error message) and an exit code to systemd. +ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/network.sock -s /run/cloud-init/share/network-return.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/tests/integration_tests/assets/enable_coverage.py b/tests/integration_tests/assets/enable_coverage.py index ed71ceef8f5..1d18fcbef04 100644 --- a/tests/integration_tests/assets/enable_coverage.py +++ b/tests/integration_tests/assets/enable_coverage.py @@ -2,7 +2,7 @@ services = [ "cloud-init-local.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-config.service", "cloud-final.service", ] diff --git a/tests/integration_tests/assets/enable_profile.py b/tests/integration_tests/assets/enable_profile.py index a6a0070c3c5..9b68e42ce05 100644 --- a/tests/integration_tests/assets/enable_profile.py +++ b/tests/integration_tests/assets/enable_profile.py @@ -2,7 +2,7 @@ services = [ "cloud-init-local.service", - "cloud-init.service", + "cloud-init-network.service", "cloud-config.service", "cloud-final.service", ] diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index c3b8531ae92..8ba5a81b2b5 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -191,7 +191,7 @@ def _collect_profile(instance: IntegrationInstance, log_dir: Path): log_dir / "profile" / "local.stats", ) instance.pull_file( - "/var/log/cloud-init.service.stats", + "/var/log/cloud-init-network.service.stats", log_dir / "profile" / "network.stats", ) instance.pull_file( diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index 24aecc0bd8d..c3462c433a3 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -162,7 +162,7 @@ def test_smbios_seed_network(self, client: IntegrationInstance): """\ [Unit] Description=Serve a local webserver - Before=cloud-init.service + Before=cloud-init-network.service Wants=cloud-init-local.service DefaultDependencies=no After=systemd-networkd-wait-online.service @@ -354,7 +354,7 @@ def _boot_with_cmdline( # and NoCloud operates in network timeframe After=systemd-networkd-wait-online.service After=networking.service - Before=cloud-init.service + Before=cloud-init-network.service [Service] Type=exec diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py index f9b3faab130..bad728f2a72 100644 --- a/tests/unittests/cmd/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -13,7 +13,9 @@ from cloudinit.util import ensure_dir, load_text_file, write_file from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call -MyArgs = namedtuple("MyArgs", "debug files force local reporter subcommand") +MyArgs = namedtuple( + "MyArgs", "debug files force local reporter subcommand skip_log_setup" +) class TestMain(FilesystemMockingTestCase): @@ -76,6 +78,7 @@ def test_main_init_run_net_runs_modules(self): local=False, reporter=None, subcommand="init", + skip_log_setup=False, ) (_item1, item2) = wrap_and_call( "cloudinit.cmd.main", @@ -122,6 +125,7 @@ def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): local=False, reporter=None, subcommand="init", + skip_log_setup=False, ) def set_hostname(name, cfg, cloud, args): diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index 9982b6741c6..7e85987b744 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -565,9 +565,9 @@ def test_fstab_mounts_combinations(self): LABEL=keepme none ext4 defaults 0 0 LABEL=UEFI /dev/sda4 /mnt2 auto nofail,comment=cloudconfig 1 2 - /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 /dev/sda1 /mnt xfs auto,comment=cloudconfig 0 2 - /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 /dev/sdb1 none swap sw,comment=cloudconfig 0 0 """ # noqa: E501 ).strip() diff --git a/tests/unittests/test_all_stages.py b/tests/unittests/test_all_stages.py new file mode 100644 index 00000000000..90bde5e1add --- /dev/null +++ b/tests/unittests/test_all_stages.py @@ -0,0 +1,208 @@ +import random +import signal +import socket +import time +from threading import Thread +from unittest import mock + +from cloudinit import socket as ci_socket + + +class Sync: + """A device to send and receive synchronization messages + + Creating an instance of the device sends a b"start" + """ + + def __init__(self, name: str, path: str): + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + self.sock.connect(f"{path}/share/{name}.sock") + self.sock.bind(f"{path}/share/{name}-return.sock") + self.sock.sendall(b"start") + + def receive(self): + """receive 5 bytes from the socket""" + received = self.sock.recv(4096) + self.sock.close() + return received + + +class Timeout: + """A utility which may be used to verify that a timeout occurs + + TimeoutError is raised on successful timeout. + + Create a signal handler and use signal.alarm to verify that the + timeout occured. + """ + + def handle_timeout(self, *_): + raise TimeoutError() + + def __enter__(self): + signal.signal(signal.SIGALRM, self.handle_timeout) + # 1 second is, unfortunately, the minimum + signal.alarm(1) + + def __exit__(self, *_): + signal.alarm(0) + + +def test_all_stages_times_out(tmp_path): + """Verify that no "start" makes the protocol block""" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first") + + try: + with Timeout(): + # this should block for 1 second + with sync("first"): + pass + except TimeoutError: + # success is a timeout + pass + else: + raise AssertionError("Expected the thing to timeout!") + + +def test_all_stages(tmp_path): + """Verify that a socket can store "start" messages + + After a socket has been been bound but before it has started listening + """ + expected = "echo 'Completed socket interaction for boot stage {}'; exit 0;" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first", "second", "third") + + # send all three syncs to the sockets + first = Sync("first", tmp_path) + second = Sync("second", tmp_path) + third = Sync("third", tmp_path) + + # "wait" on the first sync event + with sync("first"): + pass + + # check that the first sync returned + assert expected.format("first").encode() == first.receive() + # "wait" on the second sync event + with sync("second"): + pass + # check that the second sync returned + assert expected.format("second").encode() == second.receive() + # "wait" on the third sync event + with sync("third"): + pass + # check that the third sync returned + assert expected.format("third").encode() == third.receive() + + +def test_all_stages_threaded(tmp_path): + """Verify that arbitrary "start" order works""" + + # in milliseconds + max_sleep = 100 + # initialize random number generator + random.seed(time.time()) + expected = "echo 'Completed socket interaction for boot stage {}'; exit 0;" + sync_storage = {} + + def syncer(index: int, name: str): + """sleep for 0-100ms then send a sync notification + + this allows sync order to be arbitrary + """ + time.sleep(0.001 * random.randint(0, max_sleep)) + sync_storage[index] = Sync(name, tmp_path) + + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + + sync = ci_socket.SocketSync( + "first", "second", "third", "fourth", "fifth" + ) + + for i, name in { + 1: "first", + 2: "second", + 3: "third", + 4: "fourth", + 5: "fifth", + }.items(): + t = Thread(target=syncer, args=(i, name)) + t.run() + + # wait on the first sync event + with sync("first"): + pass + + # check that the first sync returned + assert expected.format("first").encode() == sync_storage[1].receive() + + # wait on the second sync event + with sync("second"): + pass + + # check that the second sync returned + assert expected.format("second").encode() == sync_storage[2].receive() + + # wait on the third sync event + with sync("third"): + pass + + # check that the third sync returned + assert expected.format("third").encode() == sync_storage[3].receive() + with sync("fourth"): + pass + + # check that the fourth sync returned + assert expected.format("fourth").encode() == sync_storage[4].receive() + + with sync("fifth"): + pass + + # check that the fifth sync returned + assert expected.format("fifth").encode() == sync_storage[5].receive() + + +def test_all_stages_exception(tmp_path): + """Verify that exceptions log messages produce a valid warning message""" + with mock.patch.object( + ci_socket, "DEFAULT_RUN_DIR", tmp_path + ), mock.patch.object(ci_socket, "sd_notify"), mock.patch.object( + ci_socket.os, "isatty", return_value=False + ), mock.patch.object( + ci_socket.sys.stdin, "fileno" + ): + sync = ci_socket.SocketSync("first", "second", "third") + + # send all three syncs to the sockets + first = Sync("first", tmp_path) + + # "wait" on the first sync event + with sync("first"): + # verify that an exception in context doesn't raise + 1 / 0 # pylint: disable=W0104 + + assert ( + b"echo 'fatal error, run \"systemctl status cloud-init-main." + b'service" and "cloud-init status --long" for ' + b"more details'; exit 1;" == first.receive() + ) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 6ab6d496b16..a7c3b1ba38b 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -160,9 +160,7 @@ def test_no_arguments_shows_usage(self, capsys): def test_no_arguments_shows_error_message(self, capsys): exit_code = self._call_main() - missing_subcommand_message = ( - "the following arguments are required: subcommand" - ) + missing_subcommand_message = "a subcommand is required" _out, err = capsys.readouterr() assert ( missing_subcommand_message in err From b7b11bc04343a40ab4d1a1a6024fd824e98401af Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 3 Aug 2024 00:28:50 -0600 Subject: [PATCH 063/131] fix: nocloud no fail when network-config absent (#5580) Commit 5322dca2 introduced an assumption to read_seeded that network-config must always be present for NoCloud datasource. Since it is still considered and optional supplemental configuration allow the read_seeed calls to succeed in the absence of network-config. Avoids failures seen in tests/integration-tests/datasources/test_nocloud.py:: test_nocloud_seedfrom_vendordata --- cloudinit/sources/DataSourceNoCloud.py | 3 ++- cloudinit/util.py | 14 +++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 289205e8599..23bb2663bf8 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -481,7 +481,8 @@ def get_datasource_list(depends): logging.basicConfig(level=logging.DEBUG) seedfrom = argv[1] - md_seed, ud, vd = util.read_seeded(seedfrom) + md_seed, ud, vd, network = util.read_seeded(seedfrom) print(f"seeded: {md_seed}") print(f"ud: {ud}") print(f"vd: {vd}") + print(f"network: {network}") diff --git a/cloudinit/util.py b/cloudinit/util.py index 87b8aa071dc..34d3623a7f7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1069,12 +1069,16 @@ def read_seeded(base="", ext="", timeout=5, retries=10): vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) network_url = "%s%s%s" % (base, "network-config", ext) - network_resp = url_helper.read_file_or_url( - network_url, timeout=timeout, retries=retries - ) network = None - if network_resp.ok(): - network = load_yaml(network_resp.contents) + try: + network_resp = url_helper.read_file_or_url( + network_url, timeout=timeout, retries=retries + ) + except url_helper.UrlError as e: + LOG.debug("No network config provided: %s", e) + else: + if network_resp.ok(): + network = load_yaml(network_resp.contents) md_resp = url_helper.read_file_or_url( md_url, timeout=timeout, retries=retries ) From 0aea65c204f474686003065290e73c53bf2d078d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 5 Aug 2024 17:24:43 -0400 Subject: [PATCH 064/131] chore: Fix log message in url_helper.py (#5583) --- cloudinit/url_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index d409e322858..eb2442993b5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -739,7 +739,7 @@ def read_url_handle_exceptions( time_taken = int(time.monotonic() - start_time) max_wait_str = "%ss" % max_wait if max_wait else "unlimited" status_msg = "Calling '%s' failed [%s/%s]: %s" % ( - url or getattr(url_exc, "url", "url ? None"), + url or getattr(url_exc, "url", "url"), time_taken, max_wait_str, reason, From c0ffdd4d0637216a3db4b5d3c6c0592a08043ff0 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 5 Aug 2024 16:16:27 -0600 Subject: [PATCH 065/131] fix: Update default LXD meta-data with user meta-data (#5584) This was previously unnecessary because: a. LXD automatically appends the user.meta-data key to default meta-data. b. In the presence of duplicate keys, PyYAML uses the last key. This change is the cloud-init part of a set of changes that will enable cloud-init to avoid depending on undefined behavior. In the future LXD may stop appending user-defined meta-data to its default meta-data. This change makes cloud-init forward compatible to LXD for when that change is implemented. GH-5575 --- cloudinit/sources/DataSourceLXD.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 4f69d90eb70..43be28e0a15 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -210,8 +210,8 @@ def _get_data(self) -> bool: config = self._crawled_metadata.get("config", {}) user_metadata = config.get("user.meta-data", {}) if user_metadata: - user_metadata = _raw_instance_data_to_dict( - "user.meta-data", user_metadata + self.metadata.update( + _raw_instance_data_to_dict("user.meta-data", user_metadata) ) if "user-data" in self._crawled_metadata: self.userdata_raw = self._crawled_metadata["user-data"] From 5252fa3eef2feb68e7e4e160af46076a070f21ab Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 6 Aug 2024 09:04:37 -0500 Subject: [PATCH 066/131] update changelog (new upstream snapshot) --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 2917cf71b4a..a82bb92dd8d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium + + * Upstream snapshot based on upstream/main at c0ffdd4d. + + -- James Falcon Tue, 06 Aug 2024 09:04:37 -0500 + cloud-init (24.2-0ubuntu1~20.04.1) focal; urgency=medium * d/control: remove netifaces due to GH-4634 From d396de1ad007adf6637eee2ec516fa2fd2c474a3 Mon Sep 17 00:00:00 2001 From: Alec Warren Date: Tue, 6 Aug 2024 10:21:58 -0400 Subject: [PATCH 067/131] refactor: refactor and fix mypy in DataSourceIBMCloud.py (#5509) Fixes GH-5445 --- cloudinit/sources/DataSourceIBMCloud.py | 97 ++++++++++++++---------- cloudinit/sources/__init__.py | 2 +- pyproject.toml | 1 - tests/unittests/sources/test_ibmcloud.py | 6 ++ 4 files changed, 64 insertions(+), 42 deletions(-) diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 89edd79f7ea..9b3ce8d9f83 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -96,6 +96,7 @@ import json import logging import os +from typing import Any, Callable, Dict, Optional, Tuple from cloudinit import atomic_helper, sources, subp, util from cloudinit.sources.helpers import openstack @@ -176,7 +177,7 @@ def network_config(self): # environment handles networking configuration. Not cloud-init. return {"config": "disabled", "version": 1} if self._network_config is None: - if self.network_json is not None: + if self.network_json not in (sources.UNSET, None): LOG.debug("network config provided via network_json") self._network_config = openstack.convert_net_json( self.network_json, known_macs=None @@ -186,7 +187,12 @@ def network_config(self): return self._network_config -def _read_system_uuid(): +def _read_system_uuid() -> Optional[str]: + """ + Read the system uuid. + + :return: the system uuid or None if not available. + """ uuid_path = "/sys/hypervisor/uuid" if not os.path.isfile(uuid_path): return None @@ -194,6 +200,11 @@ def _read_system_uuid(): def _is_xen(): + """ + Return boolean indicating if this is a xen hypervisor. + + :return: True if this is a xen hypervisor, False otherwise. + """ return os.path.exists("/proc/xen") @@ -201,7 +212,7 @@ def _is_ibm_provisioning( prov_cfg="/root/provisioningConfiguration.cfg", inst_log="/root/swinstall.log", boot_ref="/proc/1/environ", -): +) -> bool: """Return boolean indicating if this boot is ibm provisioning boot.""" if os.path.exists(prov_cfg): msg = "config '%s' exists." % prov_cfg @@ -229,7 +240,7 @@ def _is_ibm_provisioning( return result -def get_ibm_platform(): +def get_ibm_platform() -> Tuple[Optional[str], Optional[str]]: """Return a tuple (Platform, path) If this is Not IBM cloud, then the return value is (None, None). @@ -242,7 +253,7 @@ def get_ibm_platform(): return not_found # fslabels contains only the first entry with a given label. - fslabels = {} + fslabels: Dict[str, Dict] = {} try: devs = util.blkid() except subp.ProcessExecutionError as e: @@ -289,10 +300,10 @@ def get_ibm_platform(): return not_found -def read_md(): +def read_md() -> Optional[Dict[str, Any]]: """Read data from IBM Cloud. - @return: None if not running on IBM Cloud. + :return: None if not running on IBM Cloud. dictionary with guaranteed fields: metadata, version and optional fields: userdata, vendordata, networkdata. Also includes the system uuid from /sys/hypervisor/uuid.""" @@ -300,7 +311,7 @@ def read_md(): if platform is None: LOG.debug("This is not an IBMCloud platform.") return None - elif platform in PROVISIONING: + elif platform in PROVISIONING or path is None: LOG.debug("Cloud-init is disabled during provisioning: %s.", platform) return None @@ -325,71 +336,76 @@ def read_md(): return ret -def metadata_from_dir(source_dir): +def metadata_from_dir(source_dir: str) -> Dict[str, Any]: """Walk source_dir extracting standardized metadata. Certain metadata keys are renamed to present a standardized set of metadata keys. This function has a lot in common with ConfigDriveReader.read_v2 but - there are a number of inconsistencies, such key renames and as only - presenting a 'latest' version which make it an unlikely candidate to share + there are a number of inconsistencies, such as key renames and only + presenting a 'latest' version, which make it an unlikely candidate to share code. - @return: Dict containing translated metadata, userdata, vendordata, + :return: Dict containing translated metadata, userdata, vendordata, networkdata as present. """ - def opath(fname): + def opath(fname: str) -> str: return os.path.join("openstack", "latest", fname) - def load_json_bytes(blob): + def load_json_bytes(blob: bytes) -> Dict[str, Any]: + """ + Load JSON from a byte string. + + This technically could return a list or a str, but we are only + assuming a dict here. + + :param blob: The byte string to load JSON from. + :return: The loaded JSON object. + """ return json.loads(blob.decode("utf-8")) + def load_file(path: str, translator: Callable[[bytes], Any]) -> Any: + try: + raw = util.load_binary_file(path) + return translator(raw) + except IOError as e: + LOG.debug("Failed reading path '%s': %s", path, e) + return None + except Exception as e: + raise sources.BrokenMetadata(f"Failed decoding {path}: {e}") + files = [ # tuples of (results_name, path, translator) ("metadata_raw", opath("meta_data.json"), load_json_bytes), - ("userdata", opath("user_data"), None), + ("userdata", opath("user_data"), lambda x: x), ("vendordata", opath("vendor_data.json"), load_json_bytes), ("networkdata", opath("network_data.json"), load_json_bytes), ] - results = {} - for (name, path, transl) in files: - fpath = os.path.join(source_dir, path) - raw = None - try: - raw = util.load_binary_file(fpath) - except IOError as e: - LOG.debug("Failed reading path '%s': %s", fpath, e) - - if raw is None or transl is None: - data = raw - else: - try: - data = transl(raw) - except Exception as e: - raise sources.BrokenMetadata( - "Failed decoding %s: %s" % (path, e) - ) + results: Dict[str, Any] = {} - results[name] = data + for name, path, transl in files: + fpath = os.path.join(source_dir, path) + results[name] = load_file(fpath, transl) - if results.get("metadata_raw") is None: + if results["metadata_raw"] is None: raise sources.BrokenMetadata( - "%s missing required file 'meta_data.json'" % source_dir + f"{source_dir} missing required file 'meta_data.json'", ) results["metadata"] = {} md_raw = results["metadata_raw"] md = results["metadata"] + if "random_seed" in md_raw: try: md["random_seed"] = base64.b64decode(md_raw["random_seed"]) except (ValueError, TypeError) as e: raise sources.BrokenMetadata( - "Badly formatted metadata random_seed entry: %s" % e + f"Badly formatted metadata random_seed entry: {e}" ) renames = ( @@ -397,9 +413,10 @@ def load_json_bytes(blob): ("hostname", "local-hostname"), ("uuid", "instance-id"), ) - for mdname, newname in renames: - if mdname in md_raw: - md[newname] = md_raw[mdname] + + for old_key, new_key in renames: + if old_key in md_raw: + md[new_key] = md_raw[old_key] return results diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 87b49fcaecc..a3958d9b918 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -325,7 +325,7 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): self.vendordata_raw = None self.vendordata2_raw = None self.metadata_address = None - self.network_json = UNSET + self.network_json: Optional[str] = UNSET self.ec2_metadata = UNSET self.ds_cfg = util.get_cfg_by_path( diff --git a/pyproject.toml b/pyproject.toml index d5578c1379b..df969290451 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,6 @@ module = [ "cloudinit.sources.DataSourceExoscale", "cloudinit.sources.DataSourceGCE", "cloudinit.sources.DataSourceHetzner", - "cloudinit.sources.DataSourceIBMCloud", "cloudinit.sources.DataSourceMAAS", "cloudinit.sources.DataSourceNoCloud", "cloudinit.sources.DataSourceOVF", diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index bee486f4dd5..37c2594dce3 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -272,6 +272,8 @@ def test_template_live(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertEqual(self.userdata, ret["userdata"]) @@ -298,6 +300,8 @@ def test_os_code_live(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertEqual(self.userdata, ret["userdata"]) @@ -320,6 +324,8 @@ def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): ) ret = ibm.read_md() + if ret is None: # this is needed for mypy - ensures ret is not None + self.fail("read_md returned None unexpectedly") self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertIsNone(ret["userdata"]) From b71f48f8f4e6220e281c74076ba6dc763a27e000 Mon Sep 17 00:00:00 2001 From: Ksenija Stanojevic Date: Tue, 6 Aug 2024 10:19:39 -0700 Subject: [PATCH 068/131] azure: check azure-proxy-agent status (#5138) Azure Guest Proxy Agent is a new feature in Azure that offers a key exchange protocol to secure communication between guest and host using eBPF. Add opt-in feature which enables the Azure Guest Proxy Agent when ovf-env.xml has ProvisionGuestProxyAgent=True. Report provisioning failures if ProvisionGuestProxyAgent is enabled but images do not have azure-proxy-agent installed or functional. --- cloudinit/sources/DataSourceAzure.py | 43 +++++ cloudinit/sources/azure/errors.py | 19 +- tests/unittests/sources/test_azure.py | 243 ++++++++++++++++++++++++++ 3 files changed, 304 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44b1e194fa4..be4b5a1fbaf 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -553,6 +553,44 @@ def _is_ephemeral_networking_up(self) -> bool: or self._ephemeral_dhcp_ctx.lease is None ) + def _check_azure_proxy_agent_status(self) -> None: + """Check if azure-proxy-agent is ready for communication with WS/IMDS. + + If ProvisionGuestProxyAgent is true, query azure-proxy-agent status, + waiting up to 120 seconds for the proxy to negotiate with Wireserver + and configure an eBPF proxy. Once azure-proxy-agent is ready, + it will exit with code 0 and cloud-init can then expect to be able to + communicate with these services. + + Fail deployment if azure-proxy-agent is not found or otherwise returns + an error. + + For more information, check out: + https://github.com/azure/guestproxyagent + """ + try: + cmd = [ + "azure-proxy-agent", + "--status", + "--wait", + "120", + ] + out, err = subp.subp(cmd) + report_diagnostic_event( + "Running azure-proxy-agent %s resulted" + "in stderr output: %s with stdout: %s" % (cmd, err, out), + logger_func=LOG.debug, + ) + except subp.ProcessExecutionError as error: + if isinstance(error.reason, FileNotFoundError): + report_error = errors.ReportableErrorProxyAgentNotFound() + self._report_failure(report_error) + else: + reportable_error = ( + errors.ReportableErrorProxyAgentStatusFailure(error) + ) + self._report_failure(reportable_error) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -632,6 +670,11 @@ def crawl_metadata(self): imds_md = {} if self._is_ephemeral_networking_up(): + # check if azure-proxy-agent is enabled in the ovf-env.xml file. + # azure-proxy-agent feature is opt-in and disabled by default. + if cfg.get("ProvisionGuestProxyAgent"): + self._check_azure_proxy_agent_status() + imds_md = self.get_metadata_from_imds(report_failure=True) if not imds_md and ovf_source is None: diff --git a/cloudinit/sources/azure/errors.py b/cloudinit/sources/azure/errors.py index 851a9b6f956..2f715e0c4c7 100644 --- a/cloudinit/sources/azure/errors.py +++ b/cloudinit/sources/azure/errors.py @@ -13,7 +13,7 @@ import requests -from cloudinit import version +from cloudinit import subp, version from cloudinit.sources.azure import identity from cloudinit.url_helper import UrlError @@ -195,3 +195,20 @@ def __init__(self, exception: Exception) -> None: self.supporting_data["exception"] = repr(exception) self.supporting_data["traceback_base64"] = trace_base64 + + +class ReportableErrorProxyAgentNotFound(ReportableError): + def __init__(self) -> None: + super().__init__( + "Unable to activate Azure Guest Proxy Agent." + "azure-proxy-agent not found" + ) + + +class ReportableErrorProxyAgentStatusFailure(ReportableError): + def __init__(self, exception: subp.ProcessExecutionError) -> None: + super().__init__("azure-proxy-agent status failure") + + self.supporting_data["exit_code"] = exception.exit_code + self.supporting_data["stdout"] = exception.stdout + self.supporting_data["stderr"] = exception.stderr diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index b96f5c718da..40c04016d67 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -54,6 +54,16 @@ def mock_wrapping_setup_ephemeral_networking(azure_ds): yield m +@pytest.fixture +def mock_wrapping_report_failure(azure_ds): + with mock.patch.object( + azure_ds, + "_report_failure", + wraps=azure_ds._report_failure, + ) as m: + yield m + + @pytest.fixture def mock_azure_helper_readurl(): with mock.patch( @@ -3764,6 +3774,91 @@ def provisioning_setup( } def test_no_pps(self): + ovf = construct_ovf_env(provision_guest_proxy_agent=False) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [] + + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + timeout=30, + headers_cb=imds.headers_cb, + exception_cb=mock.ANY, + infinite=True, + log_req_resp=True, + ), + ] + + # Verify DHCP is setup once. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20) + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ) + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready once. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev="/dev/sr0", + pubkey_info=None, + ) + ] + + # Verify netlink. + assert self.mock_netlink.mock_calls == [] + + # Verify no reported_ready marker written. + assert self.wrapped_util_write_file.mock_calls == [] + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 0 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + + def test_no_pps_gpa(self): + """test full provisioning scope when azure-proxy-agent + is enabled and running.""" + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + ovf = construct_ovf_env(provision_guest_proxy_agent=True) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) self.mock_readurl.side_effect = [ mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -3771,6 +3866,11 @@ def test_no_pps(self): self.azure_ds._check_and_get_data() + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] assert self.mock_readurl.mock_calls == [ mock.call( "http://169.254.169.254/metadata/instance?" @@ -3829,11 +3929,96 @@ def test_no_pps(self): # Verify reports via KVP. assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 # Verify dmesg reported via KVP. assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + def test_no_pps_gpa_fail(self): + """test full provisioning scope when azure-proxy-agent is enabled and + throwing an exception during provisioning.""" + self.mock_subp_subp.side_effect = [ + subp.ProcessExecutionError( + cmd=["failed", "azure-proxy-agent"], + stdout="test_stdout", + stderr="test_stderr", + exit_code=4, + ), + ] + ovf = construct_ovf_env(provision_guest_proxy_agent=True) + md, ud, cfg = dsaz.read_azure_ovf(ovf) + self.mock_util_mount_cb.return_value = (md, ud, cfg, {}) + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + timeout=30, + headers_cb=imds.headers_cb, + exception_cb=mock.ANY, + infinite=True, + log_req_resp=True, + ), + ] + + # Verify DHCP is setup once. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20) + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ) + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready once. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [] + + # Verify netlink. + assert self.mock_netlink.mock_calls == [] + + # Verify no reported_ready marker written. + assert self.wrapped_util_write_file.mock_calls == [] + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 1 + assert len(self.mock_azure_report_failure_to_fabric.mock_calls) == 1 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 0 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 1 + @pytest.mark.parametrize("pps_type", ["Savable", "Running"]) def test_stale_pps(self, pps_type): imds_md_source = copy.deepcopy(self.imds_md) @@ -4522,6 +4707,64 @@ def test_imds_failure_results_in_provisioning_failure(self): assert len(self.mock_kvp_report_success_to_host.mock_calls) == 0 +class TestCheckAzureProxyAgent: + @pytest.fixture(autouse=True) + def proxy_setup( + self, + azure_ds, + mock_subp_subp, + caplog, + mock_wrapping_report_failure, + mock_timestamp, + ): + self.azure_ds = azure_ds + self.mock_subp_subp = mock_subp_subp + self.caplog = caplog + self.mock_wrapping_report_failure = mock_wrapping_report_failure + self.mock_timestamp = mock_timestamp + + def test_check_azure_proxy_agent_status(self): + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "Running azure-proxy-agent" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [] + + def test_check_azure_proxy_agent_status_notfound(self): + exception = subp.ProcessExecutionError(reason=FileNotFoundError()) + self.mock_subp_subp.side_effect = [ + exception, + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "azure-proxy-agent not found" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [ + mock.call( + errors.ReportableErrorProxyAgentNotFound(), + ), + ] + + def test_check_azure_proxy_agent_status_failure(self): + exception = subp.ProcessExecutionError( + cmd=["failed", "azure-proxy-agent"], + stdout="test_stdout", + stderr="test_stderr", + exit_code=4, + ) + self.mock_subp_subp.side_effect = [ + exception, + ] + self.azure_ds._check_azure_proxy_agent_status() + assert "azure-proxy-agent status failure" in self.caplog.text + assert self.mock_wrapping_report_failure.mock_calls == [ + mock.call( + errors.ReportableErrorProxyAgentStatusFailure( + exception=exception + ), + ), + ] + + class TestGetMetadataFromImds: @pytest.mark.parametrize("route_configured_for_imds", [False, True]) @pytest.mark.parametrize("report_failure", [False, True]) From f93a6b5a6a7888f013f21a5311b2f5ca1010ef55 Mon Sep 17 00:00:00 2001 From: Alec Warren Date: Tue, 6 Aug 2024 13:26:29 -0400 Subject: [PATCH 069/131] doc: improve integration testing configuration instructions (#5556) --- doc/rtd/development/integration_tests.rst | 240 ++++++++++++++++++++-- 1 file changed, 227 insertions(+), 13 deletions(-) diff --git a/doc/rtd/development/integration_tests.rst b/doc/rtd/development/integration_tests.rst index 5fe5845dd4b..aecb0224455 100644 --- a/doc/rtd/development/integration_tests.rst +++ b/doc/rtd/development/integration_tests.rst @@ -27,30 +27,169 @@ Test execution ============== Test execution happens via ``pytest``. A ``tox`` definition exists to run -integration tests. To run all integration tests, you would run: +integration tests. When using this, normal ``pytest`` arguments can be +passed to the ``tox`` command by appending them after the ``--``. See the +following commands for examples. -.. code-block:: bash +.. tab-set:: - $ tox -e integration-tests + .. tab-item:: All integration tests -``pytest`` arguments may also be passed. For example: + .. code-block:: bash + + tox -e integration-tests + + .. tab-item:: Tests inside file or directory + + .. code-block:: bash + + tox -e integration-tests tests/integration_tests/modules/test_combined.py + + .. tab-item:: A specific test + + .. code-block:: bash + + tox -e integration-tests tests/integration_tests/modules/test_combined.py::test_bootcmd -.. code-block:: bash - $ tox -e integration-tests tests/integration_tests/modules/test_combined.py Configuration ============= All possible configuration values are defined in -`tests/integration_tests/integration_settings.py`_. Defaults can be overridden -by supplying values in :file:`tests/integration_tests/user_settings.py` or by +`tests/integration_tests/integration_settings.py`_. Look in this file for +the full list of variables that are available and for context on what each +variable does and what the default values are. +Defaults can be overriden by supplying values in +:file:`tests/integration_tests/user_settings.py` or by providing an environment variable of the same name prepended with ``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting: .. code-block:: bash - CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/ + CLOUD_INIT_PLATFORM='ec2' tox -e integration_tests -- tests/integration_tests/ + + +Common integration test run configurations +========================================== + + +Keep instance after test run +------------------------------- + +By default, the test instance is torn down after the test run. To keep +the instance running after the test run, set the ``KEEP_INSTANCE`` variable +to ``True``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_KEEP_INSTANCE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + KEEP_INSTANCE = True + + +Use in-place cloud-init source code +------------------------------------- + +The simplest way to test an integraton test using your current cloud-init +changes is to set the ``CLOUD_INIT_SOURCE`` to ``IN_PLACE``. This works ONLY +on LXD containers. This will mount the source code as-is directly into +the container to override the pre-existing cloud-init code within the +container. This won't work for non-local LXD remotes and won't run any +installation code since the source code is mounted directly. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_CLOUD_INIT_SOURCE=IN_PLACE tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + CLOUD_INIT_SOURCE = 'IN_PLACE' + + +Collecting logs after test run +------------------------------- + +By default, logs are collected only when a test fails, by running ``cloud-init +collect-logs`` on the instance. To collect logs after every test run, set the +``COLLECT_LOGS`` variable to ``ALWAYS``. + +By default, the logs are collected to the ``/tmp/cloud_init_test_logs`` +directory. To change the directory, set the ``LOCAL_LOG_PATH`` variable to +the desired path. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_COLLECT_LOGS=ALWAYS CLOUD_INIT_LOCAL_LOG_PATH=/tmp/your-local-directory tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + COLLECT_LOGS = "ALWAYS" + LOCAL_LOG_PATH = "/tmp/logs" + + +Advanced test reporting and profiling +------------------------------------- + +For advanced test reporting, set the ``INCLUDE_COVERAGE`` variable to ``True``. +This will generate a coverage report for the integration test run, and the +report will be stored in an ``html`` directory inside the directory specified +by ``LOCAL_LOG_PATH``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_INCLUDE_COVERAGE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + INCLUDE_COVERAGE = True + + +Addtionally, for profiling the integration tests, set the ``INCLUDE_PROFILE`` +variable to ``True``. This will generate a profile report for the integration +test run, and the report will be stored in the directory specified by +``LOCAL_LOG_PATH``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_INCLUDE_PROFILE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + INCLUDE_PROFILE = True + Cloud interaction ================= @@ -65,6 +204,39 @@ For a minimal setup using LXD, write the following to [lxd] + +For more information on configuring pycloudlib, see the +`pycloudlib configuration documentation`_. + +To specify a specific cloud to test against, first, ensure that your pycloudlib +configuration is set up correctly. Then, modify the ``PLATFORM`` variable to be +on of: + +- ``azure``: Microsoft Azure +- ``ec2``: Amazon EC2 +- ``gce``: Google Compute Engine +- ``ibm``: IBM Cloud +- ``lxd_container``: LXD container +- ``lxd_vm``: LXD VM +- ``oci``: Oracle Cloud Infrastructure +- ``openstack``: OpenStack +- ``qemu``: QEMU + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_PLATFORM='lxd_container' tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + PLATFORM = 'lxd_container' + + Image selection =============== @@ -87,14 +259,32 @@ tests against the image in question. If it's a RHEL8 image, then we would expect Ubuntu-specific tests to fail (and vice versa). To address this, a full image specification can be given. This is of -the form: ``[::[::]]`` where ``image_id`` is a +the form: ``[::::::]`` where ``image_id`` is a cloud's image ID, ``os`` is the OS name, and ``release`` is the OS -release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is -``ubuntu:bionic::ubuntu::bionic`` or RHEL8 on Amazon is +release name. So, for example, Ubuntu 24.04 LTS (Noble Numbat) on LXD is +``ubuntu:noble::ubuntu::noble::24.04`` or RHEL8 on Amazon is ``ami-justanexample::rhel::8``. When a full specification is given, only tests which are intended for use on that OS and release will be executed. +To run integration tests on a specific image, modify the ``OS_IMAGE`` +variable to be the desired image specification. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_OS_IMAGE='jammy' tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + OS_IMAGE = 'jammy' + + Image setup =========== @@ -108,6 +298,29 @@ via fixture. Image setup roughly follows these steps: * Take a snapshot of the instance to be used as a new image from which new instances can be launched. + +Keep image after test run +-------------------------- + +By default, the image created during the test run is torn down after +the test run. If further debugging is needed, you can keep the image snapshot +for further use by setting the ``KEEP_IMAGE`` variable to ``True``. + +.. tab-set:: + + .. tab-item:: Inline environment variable + + .. code-block:: bash + + CLOUD_INIT_KEEP_IMAGE=True tox -e integration_tests + + .. tab-item:: user_settings.py file + + .. code-block:: python + + KEEP_IMAGE = True + + Test setup ========== @@ -155,7 +368,7 @@ The ``client`` fixture should be used for most test cases. It ensures: ``module_client`` and ``class_client`` fixtures also exist for the purpose of running multiple tests against a single launched instance. They provide the exact same functionality as ``client``, but are -scoped to the module or class respectively. +scoped to the module or class respectively.ci ``session_cloud`` ----------------- @@ -213,3 +426,4 @@ Customizing the launch arguments before launching an instance manually: .. _first be configured: https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration .. _Pytest marks: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224 .. _IntegrationCloud: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102 +.. _pycloudlib configuration documentation: https://pycloudlib.readthedocs.io/en/latest/configuration.html From 171926a8c9d7e5dfc133d7c254ff2fd231f8d1bf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 6 Aug 2024 09:06:14 -0500 Subject: [PATCH 070/131] refresh patches --- ...retain-file-argument-as-main-cmd-arg.patch | 21 ++++++------ ...ported-systemd-condition-environment.patch | 32 ++++--------------- ...560d-cloud-config-after-snap-seeding.patch | 2 +- 3 files changed, 17 insertions(+), 38 deletions(-) diff --git a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch index 98a3f1cd6aa..62ae666bec4 100644 --- a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch +++ b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch @@ -9,7 +9,7 @@ Bug: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/2064300 Last-Update: 2024-04-30 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py -@@ -121,6 +121,11 @@ def extract_fns(args): +@@ -147,6 +147,11 @@ def extract_fns(args): # since it would of broke if it couldn't have # read that file already... fn_cfgs = [] @@ -21,7 +21,7 @@ Last-Update: 2024-04-30 Date: Tue, 6 Aug 2024 09:06:21 -0500 Subject: [PATCH 071/131] update changelog --- debian/changelog | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/debian/changelog b/debian/changelog index a82bb92dd8d..8c156bc0a97 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,10 @@ cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium * Upstream snapshot based on upstream/main at c0ffdd4d. + * refresh patches: + - d/p/cli-retain-file-argument-as-main-cmd-arg.patch + - d/p/drop-unsupported-systemd-condition-environment.patch + - d/p/revert-551f560d-cloud-config-after-snap-seeding.patch -- James Falcon Tue, 06 Aug 2024 09:04:37 -0500 From 7532589f42e5bd2bb4b7f906ce289244efd7792c Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 5 Aug 2024 15:38:56 -0600 Subject: [PATCH 072/131] fix: Fix ftp failures (#5585) - fix exception handling when retr fails - test: Close connection on failure - test: Ensure server is running before it is queried --- cloudinit/url_helper.py | 54 ++++++++++++------- .../datasources/test_nocloud.py | 7 ++- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index eb2442993b5..9cb3d4a0088 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -140,6 +140,8 @@ def read_ftps(url: str, timeout: float = 5.0, **kwargs: dict) -> "FtpResponse": user=user, passwd=url_parts.password or "", ) + LOG.debug("Creating a secure connection") + ftp_tls.prot_p() except ftplib.error_perm as e: LOG.warning( "Attempted to connect to an insecure ftp server but used " @@ -156,15 +158,27 @@ def read_ftps(url: str, timeout: float = 5.0, **kwargs: dict) -> "FtpResponse": headers=None, url=url, ) from e - LOG.debug("Creating a secure connection") - ftp_tls.prot_p() - LOG.debug("Reading file: %s", url_parts.path) - ftp_tls.retrbinary(f"RETR {url_parts.path}", callback=buffer.write) - - response = FtpResponse(buffer.getvalue(), url) - LOG.debug("Closing connection") - ftp_tls.close() - return response + try: + LOG.debug("Reading file: %s", url_parts.path) + ftp_tls.retrbinary( + f"RETR {url_parts.path}", callback=buffer.write + ) + + return FtpResponse(buffer.getvalue(), url) + except ftplib.all_errors as e: + code = ftp_get_return_code_from_exception(e) + raise UrlError( + cause=( + "Reading file from ftp server" + f" failed for url {url} [{code}]" + ), + code=code, + headers=None, + url=url, + ) from e + finally: + LOG.debug("Closing connection") + ftp_tls.close() else: try: ftp = ftplib.FTP() @@ -176,6 +190,14 @@ def read_ftps(url: str, timeout: float = 5.0, **kwargs: dict) -> "FtpResponse": port=port, timeout=timeout or 5.0, # uses float internally ) + LOG.debug("Attempting to login with user [%s]", user) + ftp.login( + user=user, + passwd=url_parts.password or "", + ) + LOG.debug("Reading file: %s", url_parts.path) + ftp.retrbinary(f"RETR {url_parts.path}", callback=buffer.write) + return FtpResponse(buffer.getvalue(), url) except ftplib.all_errors as e: code = ftp_get_return_code_from_exception(e) raise UrlError( @@ -187,17 +209,9 @@ def read_ftps(url: str, timeout: float = 5.0, **kwargs: dict) -> "FtpResponse": headers=None, url=url, ) from e - LOG.debug("Attempting to login with user [%s]", user) - ftp.login( - user=user, - passwd=url_parts.password or "", - ) - LOG.debug("Reading file: %s", url_parts.path) - ftp.retrbinary(f"RETR {url_parts.path}", callback=buffer.write) - response = FtpResponse(buffer.getvalue(), url) - LOG.debug("Closing connection") - ftp.close() - return response + finally: + LOG.debug("Closing connection") + ftp.close() def _read_file(path: str, **kwargs) -> "FileResponse": diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py index c3462c433a3..f6659d45d17 100644 --- a/tests/integration_tests/datasources/test_nocloud.py +++ b/tests/integration_tests/datasources/test_nocloud.py @@ -267,6 +267,8 @@ def _boot_with_cmdline( #!/usr/bin/python3 import logging + from systemd.daemon import notify + from pyftpdlib.authorizers import DummyAuthorizer from pyftpdlib.handlers import FTPHandler, TLS_FTPHandler from pyftpdlib.servers import FTPServer @@ -298,6 +300,9 @@ def _boot_with_cmdline( handler.abstracted_fs = UnixFilesystem server = FTPServer(("localhost", 2121), handler) + # tell systemd to proceed + notify("READY=1") + # start the ftp server server.serve_forever() """ @@ -357,7 +362,7 @@ def _boot_with_cmdline( Before=cloud-init-network.service [Service] - Type=exec + Type=notify ExecStart=/server.py [Install] From acf04d6165db031d6385cf2f70b228b5b036ae69 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 5 Aug 2024 17:00:26 -0600 Subject: [PATCH 073/131] fix: Fix tests which have outdated strings (#5585) User output and service names recently changed. --- .../test_kernel_command_line_match.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/integration_tests/test_kernel_command_line_match.py b/tests/integration_tests/test_kernel_command_line_match.py index 57abf513ecc..a6eb8533c56 100644 --- a/tests/integration_tests/test_kernel_command_line_match.py +++ b/tests/integration_tests/test_kernel_command_line_match.py @@ -103,10 +103,7 @@ def test_lxd_datasource_kernel_override_nocloud_net( == client.execute("cloud-init query platform").stdout.strip() ) assert url_val in client.execute("cloud-init query subplatform").stdout - assert ( - "Detected platform: DataSourceNoCloudNet. Checking for active" - "instance data" - ) in logs + assert "Detected DataSourceNoCloudNet" in logs @pytest.mark.skipif(PLATFORM != "lxd_vm", reason="Modifies grub config") @@ -116,7 +113,7 @@ def test_lxd_disable_cloud_init_cmdline(client: IntegrationInstance): override_kernel_command_line("cloud-init=disabled", client) assert "Active: inactive (dead)" in client.execute( - "systemctl status cloud-init" + "systemctl status cloud-init.target" ) @@ -128,7 +125,7 @@ def test_lxd_disable_cloud_init_file(client: IntegrationInstance): client.execute("cloud-init --clean") client.restart() assert "Active: inactive (dead)" in client.execute( - "systemctl status cloud-init" + "systemctl status cloud-init.target" ) @@ -142,5 +139,5 @@ def test_lxd_disable_cloud_init_env(client: IntegrationInstance): client.execute("cloud-init --clean") client.restart() assert "Active: inactive (dead)" in client.execute( - "systemctl status cloud-init" + "systemctl status cloud-init.target" ) From 0787d6299e6946f07610e6027ce9b80ee5efe8b0 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 7 Aug 2024 10:19:34 -0600 Subject: [PATCH 074/131] feat(snap): avoid refresh on package_upgrade: true and refresh.hold (#5426) When snap refresh.hold is set to forever, an admin is saying they do not want generic automated refreshes of snaps performed by default. This should be an indicator to cloud-init to avoid calling snap refresh on such systems due to a `package_upgrade: true` present in user-data. For network-limited environments with images which have the snap package manager but don't want to wait and timeout on snap refresh, the following user-data can be provided to still allow for package_upgrade: true, and avoid a 20-30 second wait on snaps being unable to access certain snap URLs. #cloud-config package_upgrade: true snap: commands: 00: snap refresh --hold=forever cloud-init now interrogates the state refresh.hold value by calling snap get system -d If snap refresh --hold was called in that environment to set 'forever', cloud-init will skip calling refresh and log the reason for skipping. We cannot honor short time-based refresh.holds because the snap services place a short hold in early boot anyway as systemd units startup. Fixes: GH-5290 --- cloudinit/distros/package_management/snap.py | 21 ++++- .../data.yaml | 11 ++- .../example2.yaml | 7 ++ .../test_package_update_upgrade_install.py | 34 ++++++++ .../test_cc_package_update_upgrade_install.py | 2 +- tests/unittests/distros/test_ubuntu.py | 84 ++++++++++++++++++- 6 files changed, 150 insertions(+), 9 deletions(-) create mode 100644 doc/module-docs/cc_package_update_upgrade_install/example2.yaml diff --git a/cloudinit/distros/package_management/snap.py b/cloudinit/distros/package_management/snap.py index baab9e3ca85..8732cbc43e0 100644 --- a/cloudinit/distros/package_management/snap.py +++ b/cloudinit/distros/package_management/snap.py @@ -35,4 +35,23 @@ def install_packages(self, pkglist: Iterable) -> UninstalledPackages: @staticmethod def upgrade_packages(): - subp.subp(["snap", "refresh"]) + command = ["snap", "get", "system", "-d"] + snap_hold = None + try: + result = subp.subp(command) + snap_hold = ( + util.load_json(result.stdout).get("refresh", {}).get("hold") + ) + except subp.ProcessExecutionError as e: + LOG.info( + "Continuing to snap refresh. Unable to run command: %s: %s", + command, + e, + ) + if snap_hold == "forever": + LOG.info( + "Skipping snap refresh because refresh.hold is set to '%s'", + snap_hold, + ) + else: + subp.subp(["snap", "refresh"]) diff --git a/doc/module-docs/cc_package_update_upgrade_install/data.yaml b/doc/module-docs/cc_package_update_upgrade_install/data.yaml index 121720ab1bd..9474857f560 100644 --- a/doc/module-docs/cc_package_update_upgrade_install/data.yaml +++ b/doc/module-docs/cc_package_update_upgrade_install/data.yaml @@ -1,13 +1,16 @@ cc_package_update_upgrade_install: description: | This module allows packages to be updated, upgraded or installed during - boot. If any packages are to be installed or an upgrade is to be performed - then the package cache will be updated first. If a package installation or - upgrade requires a reboot, then a reboot can be performed if - ``package_reboot_if_required`` is specified. + boot using any available package manager present on a system such as apt, + pkg, snap, yum or zypper. If any packages are to be installed or an upgrade + is to be performed then the package cache will be updated first. If a + package installation or upgrade requires a reboot, then a reboot can be + performed if ``package_reboot_if_required`` is specified. examples: - comment: | Example 1: file: cc_package_update_upgrade_install/example1.yaml + - comment: "By default, ``package_upgrade: true`` performs upgrades on any installed package manager. To avoid calling ``snap refresh`` in images with snap installed, set snap refresh.hold to ``forever`` will prevent cloud-init's snap interaction during any boot" + file: cc_package_update_upgrade_install/example2.yaml name: Package Update Upgrade Install title: Update, upgrade, and install packages diff --git a/doc/module-docs/cc_package_update_upgrade_install/example2.yaml b/doc/module-docs/cc_package_update_upgrade_install/example2.yaml new file mode 100644 index 00000000000..754712ca384 --- /dev/null +++ b/doc/module-docs/cc_package_update_upgrade_install/example2.yaml @@ -0,0 +1,7 @@ +#cloud-config +package_update: true +package_upgrade: true +snap: + commands: + 00: snap refresh --hold=forever +package_reboot_if_required: true diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index b4c2d3dd102..7da54054263 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -86,6 +86,40 @@ def test_snap_packages_are_installed(self, class_client): assert "curl" in output assert "postman" in output + def test_snap_refresh_not_called_when_refresh_hold_forever( + self, class_client + ): + """Assert snap refresh is not called when snap refresh --hold is set. + + Certain network-limited or secure environments may opt to avoid + contacting snap API endpoints. In those scenarios, it is expected + that automated snap refresh is held for all snaps. Typically, this is + done with snap refresh --hold in those environments. + + Assert cloud-init does not attempt to call snap refresh when + refresh.hold is forever. + """ + assert class_client.execute( + [ + "grep", + r"Running command \['snap', 'refresh'", + "/var/log/cloud-init.log", + ] + ).ok + assert class_client.execute("snap refresh --hold").ok + class_client.instance.clean() + class_client.restart() + assert class_client.execute( + [ + "grep", + r"Running command \['snap', 'refresh']", + "/var/log/cloud-init.log", + ] + ).failed + assert class_client.execute( + "grep 'Skipping snap refresh' /var/log/cloud-init.log" + ).ok + HELLO_VERSIONS_BY_RELEASE = { "oracular": "2.10-3build2", diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py index ad3651ad7b9..c1ede2bc574 100644 --- a/tests/unittests/config/test_cc_package_update_upgrade_install.py +++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py @@ -122,7 +122,7 @@ def _isfile(filename: str): caplog.set_level(logging.WARNING) with mock.patch( - "cloudinit.subp.subp", return_value=("fakeout", "fakeerr") + "cloudinit.subp.subp", return_value=SubpResult("{}", "fakeerr") ) as m_subp: with mock.patch("os.path.isfile", side_effect=_isfile): with mock.patch(M_PATH + "time.sleep") as m_sleep: diff --git a/tests/unittests/distros/test_ubuntu.py b/tests/unittests/distros/test_ubuntu.py index 39be1b2efaf..2391447e2af 100644 --- a/tests/unittests/distros/test_ubuntu.py +++ b/tests/unittests/distros/test_ubuntu.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging + import pytest from cloudinit.distros import fetch +from cloudinit.subp import SubpResult class TestPackageCommand: @@ -14,7 +17,7 @@ def test_package_command_only_refresh_snap_when_available( "cloudinit.distros.ubuntu.Snap.available", return_value=snap_available, ) - m_snap_upgrade_packges = mocker.patch( + m_snap_upgrade_packages = mocker.patch( "cloudinit.distros.ubuntu.Snap.upgrade_packages", return_value=snap_available, ) @@ -27,6 +30,81 @@ def test_package_command_only_refresh_snap_when_available( m_apt_run_package_command.assert_called_once_with("upgrade") m_snap_available.assert_called_once() if snap_available: - m_snap_upgrade_packges.assert_called_once() + m_snap_upgrade_packages.assert_called_once() + else: + m_snap_upgrade_packages.assert_not_called() + + @pytest.mark.parametrize( + "subp_side_effect,expected_log", + ( + pytest.param( + [ + SubpResult( + stdout='{"refresh": {"hold": "forever"}}', stderr=None + ) + ], + "Skipping snap refresh because refresh.hold is set to" + " 'forever'", + id="skip_snap_refresh_due_to_global_hold_forever", + ), + pytest.param( + [ + SubpResult( + stdout=( + '{"refresh": {"hold":' + ' "2024-07-08T15:38:20-06:00"}}' + ), + stderr=None, + ), + SubpResult(stdout="All snaps up to date.", stderr=""), + ], + "", + id="perform_snap_refresh_due_to_temporary_global_hold", + ), + pytest.param( + [ + SubpResult( + stdout="{}", + stderr=( + 'error: snap "core" has no "refresh.hold" ' + "configuration option" + ), + ), + SubpResult(stdout="All snaps up to date.", stderr=""), + ], + "", + id="snap_refresh_performed_when_no_global_hold_is_set", + ), + ), + ) + def test_package_command_avoids_snap_refresh_when_refresh_hold_is_forever( + self, subp_side_effect, expected_log, caplog, mocker + ): + """Do not call snap refresh when snap refresh.hold is forever. + + This indicates an environment where snaps refreshes are not preferred + for whatever reason. + """ + m_snap_available = mocker.patch( + "cloudinit.distros.ubuntu.Snap.available", + return_value=True, + ) + m_subp = mocker.patch( + "cloudinit.subp.subp", + side_effect=subp_side_effect, + ) + m_apt_run_package_command = mocker.patch( + "cloudinit.distros.package_management.apt.Apt.run_package_command", + ) + cls = fetch("ubuntu") + distro = cls("ubuntu", {}, None) + with caplog.at_level(logging.INFO): + distro.package_command("upgrade") + m_apt_run_package_command.assert_called_once_with("upgrade") + m_snap_available.assert_called_once() + expected_calls = [mocker.call(["snap", "get", "system", "-d"])] + if expected_log: + assert expected_log in caplog.text else: - m_snap_upgrade_packges.assert_not_called() + expected_calls.append(mocker.call(["snap", "refresh"])) + assert m_subp.call_args_list == expected_calls From 670cf09a3b5786a0a1a44996f068bd70de995827 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 5 Aug 2024 16:52:40 -0600 Subject: [PATCH 075/131] Add no-single-process.patch --- debian/patches/no-single-process.patch | 252 +++++++++++++++++++++++++ debian/patches/series | 1 + 2 files changed, 253 insertions(+) create mode 100644 debian/patches/no-single-process.patch diff --git a/debian/patches/no-single-process.patch b/debian/patches/no-single-process.patch new file mode 100644 index 00000000000..18a58cb4fae --- /dev/null +++ b/debian/patches/no-single-process.patch @@ -0,0 +1,252 @@ +Description: remove single process optimization +This optimization is a big change in behavior, patch it out. + +Author: Brett Holman +Last-Update: 2024-08-02 + +--- a/systemd/cloud-config.service.tmpl ++++ b/systemd/cloud-config.service.tmpl +@@ -10,14 +10,7 @@ + + [Service] + Type=oneshot +-# This service is a shim which preserves systemd ordering while allowing a +-# single Python process to run cloud-init's logic. This works by communicating +-# with the cloud-init process over a unix socket to tell the process that this +-# stage can start, and then wait on a return socket until the cloud-init +-# process has completed this stage. The output from the return socket is piped +-# into a shell so that the process can send a completion message (defaults to +-# "done", otherwise includes an error message) and an exit code to systemd. +-ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/config.sock -s /run/cloud-init/share/config-return.sock | sh' ++ExecStart=/usr/bin/cloud-init modules --mode=config + RemainAfterExit=yes + TimeoutSec=0 + +--- a/systemd/cloud-final.service.tmpl ++++ b/systemd/cloud-final.service.tmpl +@@ -15,16 +15,10 @@ + + [Service] + Type=oneshot +-# This service is a shim which preserves systemd ordering while allowing a +-# single Python process to run cloud-init's logic. This works by communicating +-# with the cloud-init process over a unix socket to tell the process that this +-# stage can start, and then wait on a return socket until the cloud-init +-# process has completed this stage. The output from the return socket is piped +-# into a shell so that the process can send a completion message (defaults to +-# "done", otherwise includes an error message) and an exit code to systemd. +-ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/final.sock -s /run/cloud-init/share/final-return.sock | sh' ++ExecStart=/usr/bin/cloud-init modules --mode=final + RemainAfterExit=yes + TimeoutSec=0 ++KillMode=process + {% if variant in ["almalinux", "cloudlinux", "rhel"] %} + # Restart NetworkManager if it is present and running. + ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ +--- a/systemd/cloud-init-local.service.tmpl ++++ b/systemd/cloud-init-local.service.tmpl +@@ -7,6 +7,7 @@ + {% endif %} + Wants=network-pre.target + After=hv_kvp_daemon.service ++After=systemd-remount-fs.service + {% if variant in ["almalinux", "cloudlinux", "rhel"] %} + Requires=dbus.socket + After=dbus.socket +@@ -37,14 +38,7 @@ + ExecStartPre=/sbin/restorecon /run/cloud-init + ExecStartPre=/usr/bin/touch /run/cloud-init/enabled + {% endif %} +-# This service is a shim which preserves systemd ordering while allowing a +-# single Python process to run cloud-init's logic. This works by communicating +-# with the cloud-init process over a unix socket to tell the process that this +-# stage can start, and then wait on a return socket until the cloud-init +-# process has completed this stage. The output from the return socket is piped +-# into a shell so that the process can send a completion message (defaults to +-# "done", otherwise includes an error message) and an exit code to systemd. +-ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/local.sock -s /run/cloud-init/share/local-return.sock | sh' ++ExecStart=/usr/bin/cloud-init init --local + RemainAfterExit=yes + TimeoutSec=0 + +--- a/systemd/cloud-init-main.service.tmpl ++++ /dev/null +@@ -1,52 +0,0 @@ +-## template:jinja +-# systemd ordering resources +-# ========================== +-# https://systemd.io/NETWORK_ONLINE/ +-# https://docs.cloud-init.io/en/latest/explanation/boot.html +-# https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/ +-# https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html +-# https://www.freedesktop.org/software/systemd/man/latest/systemd-remount-fs.service.html +-[Unit] +-Description=Cloud-init: Single Process +-Wants=network-pre.target +-{% if variant in ["almalinux", "cloudlinux", "ubuntu", "unknown", "debian", "rhel"] %} +-DefaultDependencies=no +-{% endif %} +-{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +-Requires=dbus.socket +-After=dbus.socket +-Before=network.service +-Before=firewalld.target +-Conflicts=shutdown.target +-{% endif %} +-{% if variant in ["ubuntu", "unknown", "debian"] %} +-Before=sysinit.target +-Conflicts=shutdown.target +-{% endif %} +- +-After=systemd-remount-fs.service +-Before=sysinit.target +-Before=cloud-init-local.service +-Conflicts=shutdown.target +-RequiresMountsFor=/var/lib/cloud +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled +- +-[Service] +-Type=notify +-ExecStart=/usr/bin/cloud-init --all-stages +-KillMode=process +-TasksMax=infinity +-TimeoutStartSec=infinity +-{% if variant in ["almalinux", "cloudlinux", "rhel"] %} +-ExecStartPre=/bin/mkdir -p /run/cloud-init +-ExecStartPre=/sbin/restorecon /run/cloud-init +-ExecStartPre=/usr/bin/touch /run/cloud-init/enabled +-{% endif %} +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +--- a/systemd/cloud-init-network.service.tmpl ++++ /dev/null +@@ -1,64 +0,0 @@ +-## template:jinja +-[Unit] +-# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html +-Description=Cloud-init: Network Stage +-{% if variant not in ["almalinux", "cloudlinux", "photon", "rhel"] %} +-DefaultDependencies=no +-{% endif %} +-Wants=cloud-init-local.service +-Wants=sshd-keygen.service +-Wants=sshd.service +-After=cloud-init-local.service +-After=systemd-networkd-wait-online.service +-{% if variant in ["ubuntu", "unknown", "debian"] %} +-After=networking.service +-{% endif %} +-{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", +- "miraclelinux", "openeuler", "OpenCloudOS", "openmandriva", "rhel", "rocky", +- "suse", "TencentOS", "virtuozzo"] %} +- +-After=network.service +-After=NetworkManager.service +-After=NetworkManager-wait-online.service +-{% endif %} +-{% if variant in ["suse"] %} +-After=wicked.service +-# setting hostname via hostnamectl depends on dbus, which otherwise +-# would not be guaranteed at this point. +-After=dbus.service +-{% endif %} +-Before=network-online.target +-Before=sshd-keygen.service +-Before=sshd.service +-Before=systemd-user-sessions.service +-{% if variant in ["ubuntu", "unknown", "debian"] %} +-Before=sysinit.target +-Before=shutdown.target +-Conflicts=shutdown.target +-{% endif %} +-{% if variant in ["suse"] %} +-Before=shutdown.target +-Conflicts=shutdown.target +-{% endif %} +-ConditionPathExists=!/etc/cloud/cloud-init.disabled +-ConditionKernelCommandLine=!cloud-init=disabled +-ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled +- +-[Service] +-Type=oneshot +-# This service is a shim which preserves systemd ordering while allowing a +-# single Python process to run cloud-init's logic. This works by communicating +-# with the cloud-init process over a unix socket to tell the process that this +-# stage can start, and then wait on a return socket until the cloud-init +-# process has completed this stage. The output from the return socket is piped +-# into a shell so that the process can send a completion message (defaults to +-# "done", otherwise includes an error message) and an exit code to systemd. +-ExecStart=sh -c 'echo "start" | nc.openbsd -Uu -W1 /run/cloud-init/share/network.sock -s /run/cloud-init/share/network-return.sock | sh' +-RemainAfterExit=yes +-TimeoutSec=0 +- +-# Output needs to appear in instance console output +-StandardOutput=journal+console +- +-[Install] +-WantedBy=cloud-init.target +--- /dev/null ++++ b/systemd/cloud-init.service.tmpl +@@ -0,0 +1,57 @@ ++## template:jinja ++[Unit] ++# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html ++Description=Cloud-init: Network Stage ++{% if variant not in ["almalinux", "cloudlinux", "photon", "rhel"] %} ++DefaultDependencies=no ++{% endif %} ++Wants=cloud-init-local.service ++Wants=sshd-keygen.service ++Wants=sshd.service ++After=cloud-init-local.service ++After=systemd-networkd-wait-online.service ++{% if variant in ["ubuntu", "unknown", "debian"] %} ++After=networking.service ++{% endif %} ++{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", ++ "miraclelinux", "openeuler", "OpenCloudOS", "openmandriva", "rhel", "rocky", ++ "suse", "TencentOS", "virtuozzo"] %} ++ ++After=network.service ++After=NetworkManager.service ++After=NetworkManager-wait-online.service ++{% endif %} ++{% if variant in ["suse"] %} ++After=wicked.service ++# setting hostname via hostnamectl depends on dbus, which otherwise ++# would not be guaranteed at this point. ++After=dbus.service ++{% endif %} ++Before=network-online.target ++Before=sshd-keygen.service ++Before=sshd.service ++Before=systemd-user-sessions.service ++{% if variant in ["ubuntu", "unknown", "debian"] %} ++Before=sysinit.target ++Before=shutdown.target ++Conflicts=shutdown.target ++{% endif %} ++{% if variant in ["suse"] %} ++Before=shutdown.target ++Conflicts=shutdown.target ++{% endif %} ++ConditionPathExists=!/etc/cloud/cloud-init.disabled ++ConditionKernelCommandLine=!cloud-init=disabled ++ ++[Service] ++Type=oneshot ++ExecStart=/usr/bin/cloud-init init ++RemainAfterExit=yes ++TimeoutSec=0 ++ ++# Output needs to appear in instance console output ++StandardOutput=journal+console ++ ++[Install] ++WantedBy=cloud-init.target diff --git a/debian/patches/series b/debian/patches/series index 7dffcbd012a..40dfd970132 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -12,3 +12,4 @@ retain-ec2-default-net-update-events.patch cli-retain-file-argument-as-main-cmd-arg.patch drop-unsupported-systemd-condition-environment.patch deprecation-version-boundary.patch +no-single-process.patch From 80fb9ec7d6dc7ff7e020c609724c97b8d4af03b4 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 7 Aug 2024 13:09:11 -0600 Subject: [PATCH 076/131] Update changelog --- debian/changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/changelog b/debian/changelog index 8c156bc0a97..0c633d5c59b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,6 @@ cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium + * d/p/no-single-process.patch: Remove single process optimization * Upstream snapshot based on upstream/main at c0ffdd4d. * refresh patches: - d/p/cli-retain-file-argument-as-main-cmd-arg.patch From 5f9a919c5e04125e79f0e9a58dd124e6d88ef342 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 5 Aug 2024 16:53:42 -0600 Subject: [PATCH 077/131] Add no-nocloud-network.patch --- debian/patches/no-nocloud-network.patch | 26 +++++++++++++++++++++++++ debian/patches/series | 1 + 2 files changed, 27 insertions(+) create mode 100644 debian/patches/no-nocloud-network.patch diff --git a/debian/patches/no-nocloud-network.patch b/debian/patches/no-nocloud-network.patch new file mode 100644 index 00000000000..5d3269a4ed4 --- /dev/null +++ b/debian/patches/no-nocloud-network.patch @@ -0,0 +1,26 @@ +Description: Don't allow network-config +This may add a new wait time for a file that doesn't exist on existing series +so patch it out. + +Author: Brett Holman +Last-Update: 2024-08-02 + +--- a/cloudinit/sources/DataSourceNoCloud.py ++++ b/cloudinit/sources/DataSourceNoCloud.py +@@ -190,7 +190,7 @@ + + # This could throw errors, but the user told us to do it + # so if errors are raised, let them raise +- md_seed, ud, vd, network = util.read_seeded(seedfrom, timeout=None) ++ md_seed, ud, vd, _ = util.read_seeded(seedfrom, timeout=None) + LOG.debug("Using seeded cache data from %s", seedfrom) + + # Values in the command line override those from the seed +@@ -199,7 +199,6 @@ + ) + mydata["user-data"] = ud + mydata["vendor-data"] = vd +- mydata["network-config"] = network + found.append(seedfrom) + + # Now that we have exhausted any other places merge in the defaults diff --git a/debian/patches/series b/debian/patches/series index 40dfd970132..3e48619923a 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -13,3 +13,4 @@ cli-retain-file-argument-as-main-cmd-arg.patch drop-unsupported-systemd-condition-environment.patch deprecation-version-boundary.patch no-single-process.patch +no-nocloud-network.patch From 437843796a93ec8c4ebffe2dc5c86682b8976090 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 7 Aug 2024 13:10:01 -0600 Subject: [PATCH 078/131] Update changelog --- debian/changelog | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/changelog b/debian/changelog index 0c633d5c59b..0d7a323ccf8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,6 @@ cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium + * d/p/no-nocloud-network.patch: Remove nocloud network feature * d/p/no-single-process.patch: Remove single process optimization * Upstream snapshot based on upstream/main at c0ffdd4d. * refresh patches: From edd92b712fcb6944e7d701febf2dd5aa01ee14dd Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 7 Aug 2024 16:21:50 -0600 Subject: [PATCH 079/131] fix: read_optional_seed to set network-config when present (#5593) Commit 5322dca2f added network-config support to nocloud's read_optional_seed function. It persisted meta-data as network-config. Add tests and fix to track network-config value. --- cloudinit/util.py | 2 +- tests/unittests/test_util.py | 60 ++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 34d3623a7f7..31ba1c83574 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -973,7 +973,7 @@ def read_optional_seed(fill, base="", ext="", timeout=5): fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md - fill["network-config"] = md + fill["network-config"] = network return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index c856f97564f..190eca7610e 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2456,6 +2456,66 @@ def test_unicode_not_messed_up(self): self.assertNotIn("\x00", roundtripped) +class TestReadOptionalSeed: + @pytest.mark.parametrize( + "seed_dir,expected_fill,retval", + ( + ({}, {}, False), + ({"meta-data": "md"}, {}, False), + ( + {"meta-data": "md: val", "user-data": "ud"}, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": None, + "vendor-data": None, + }, + True, + ), + ( + { + "meta-data": "md: val", + "user-data": "ud", + "network-config": "net: cfg", + }, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": {"net": "cfg"}, + "vendor-data": None, + }, + True, + ), + ( + { + "meta-data": "md: val", + "user-data": "ud", + "vendor-data": "vd", + }, + { + "meta-data": {"md": "val"}, + "user-data": b"ud", + "network-config": None, + "vendor-data": b"vd", + }, + True, + ), + ), + ) + def test_read_optional_seed_sets_fill_on_success( + self, seed_dir, expected_fill, retval, tmpdir + ): + """Set fill dict values based on seed files present.""" + if seed_dir is not None: + helpers.populate_dir(tmpdir.strpath, seed_dir) + fill = {} + assert ( + util.read_optional_seed(fill, tmpdir.strpath + os.path.sep) + is retval + ) + assert fill == expected_fill + + class TestReadSeeded: def test_unicode_not_messed_up(self, tmpdir): ud = b"userdatablob" From 65014b97420b41dcb6e7ea17c66bb2539f9b09fc Mon Sep 17 00:00:00 2001 From: PengpengSun <40026211+PengpengSun@users.noreply.github.com> Date: Sat, 10 Aug 2024 03:32:40 +0800 Subject: [PATCH 080/131] Revert "fix(vmware): Set IPv6 to dhcp when there is no IPv6 addr (#5471)" (#5596) This reverts commit 2b6fe6403db769de14f7c7b7e4aa65f5bea8f3e0. When there is no IPv6 set to dhcp explicitly, NetworkManager keyfile defaults to method=auto, may-fail=true. When there is Ipv6 set to dhcp explictily, NetworkManager keyfile will be set to method=auto, may-fail=false. The default settings are what we want, so revert the previous change to keep IPv6 not set explicitly. --- .../sources/helpers/vmware/imc/config_nic.py | 2 +- .../sources/vmware/test_vmware_config_file.py | 68 +++++-------------- 2 files changed, 18 insertions(+), 52 deletions(-) diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 254518af9e3..b07214a228b 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -207,7 +207,7 @@ def gen_ipv6(self, name, nic): """ if not nic.staticIpv6: - return ([{"type": "dhcp6"}], []) + return ([], []) subnet_list = [] # Static Ipv6 diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index c1415934141..fd4bb481e46 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -241,45 +241,27 @@ def test_get_nics_list_dhcp(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" ) subnets = nic1.get("subnets") - self.assertEqual(2, len(subnets), "number of subnets for NIC1") - subnet_ipv4 = subnets[0] - self.assertEqual( - "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC1" - ) - self.assertEqual( - "auto", subnet_ipv4.get("control"), "NIC1 Control type" - ) - subnet_ipv6 = subnets[1] - self.assertEqual( - "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC1" - ) + self.assertEqual(1, len(subnets), "number of subnets for NIC1") + subnet = subnets[0] + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1") + self.assertEqual("auto", subnet.get("control"), "NIC1 Control type") - # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2" ) subnets = nic2.get("subnets") - self.assertEqual(2, len(subnets), "number of subnets for NIC2") - subnet_ipv4 = subnets[0] - self.assertEqual( - "dhcp", subnet_ipv4.get("type"), "Ipv4 DHCP type for NIC2" - ) - self.assertEqual( - "auto", subnet_ipv4.get("control"), "NIC2 Control type" - ) - subnet_ipv6 = subnets[1] - self.assertEqual( - "dhcp6", subnet_ipv6.get("type"), "Ipv6 DHCP type for NIC2" - ) + self.assertEqual(1, len(subnets), "number of subnets for NIC2") + subnet = subnets[0] + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2") + self.assertEqual("auto", subnet.get("control"), "NIC2 Control type") def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates network subnets @@ -304,7 +286,6 @@ def test_get_nics_list_static(self): elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - # Test NIC1 self.assertEqual("physical", nic1.get("type"), "type of NIC1") self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") self.assertEqual( @@ -364,7 +345,6 @@ def test_get_nics_list_static(self): else: self.assertEqual(True, False, "invalid gateway %s" % (gateway)) - # Test NIC2 self.assertEqual("physical", nic2.get("type"), "type of NIC2") self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") self.assertEqual( @@ -372,18 +352,16 @@ def test_get_nics_list_static(self): ) subnets = nic2.get("subnets") - self.assertEqual(2, len(subnets), "Number of subnets for NIC2") + self.assertEqual(1, len(subnets), "Number of subnets for NIC2") - subnet_ipv4 = subnets[0] - self.assertEqual("static", subnet_ipv4.get("type"), "Subnet type") + subnet = subnets[0] + self.assertEqual("static", subnet.get("type"), "Subnet type") self.assertEqual( - "192.168.6.102", subnet_ipv4.get("address"), "Subnet address" + "192.168.6.102", subnet.get("address"), "Subnet address" ) self.assertEqual( - "255.255.0.0", subnet_ipv4.get("netmask"), "Subnet netmask" + "255.255.0.0", subnet.get("netmask"), "Subnet netmask" ) - subnet_ipv6 = subnets[1] - self.assertEqual("dhcp6", subnet_ipv6.get("type"), "Subnet type") def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -470,10 +448,7 @@ def test_non_primary_nic_without_gateway(self): "type": "static", "address": "10.20.87.154", "netmask": "255.255.252.0", - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -524,10 +499,7 @@ def test_non_primary_nic_with_gateway(self): "metric": 10000, } ], - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -587,10 +559,7 @@ def test_cust_non_primary_nic_with_gateway_(self): "metric": 10000, } ], - }, - { - "type": "dhcp6", - }, + } ], } ], @@ -635,10 +604,7 @@ def test_a_primary_nic_with_gateway(self): "address": "10.20.87.154", "netmask": "255.255.252.0", "gateway": "10.20.87.253", - }, - { - "type": "dhcp6", - }, + } ], } ], From e3db1adbb60482eee50c035311f9479740a05e28 Mon Sep 17 00:00:00 2001 From: Ani Sinha Date: Sat, 10 Aug 2024 01:08:39 +0530 Subject: [PATCH 081/131] chore: add comment explaining the NetworkManager may-fail setting (#5598) chore: add comment explaining the NetworkManager may-fail setting The value of may-fail in network manager keyfile is a source of confusion as the default value of it is True for Network Manager and False for network manager renderer implementation. Add a comment to explain why the renderer sets may-fail to False in its implementation. --- cloudinit/net/network_manager.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index 06305668fe4..b5b9697e5f0 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -171,6 +171,20 @@ def _set_ip_method(self, family, subnet_type): self._set_default("ipv4", "method", "disabled") self.config[family]["method"] = method + + # Network Manager sets the value of `may-fail` to `True` by default. + # Please see https://www.networkmanager.dev/docs/api/1.32.10/settings-ipv6.html. + # Therefore, when no configuration for ipv4 or ipv6 is specified, + # `may-fail = True` applies. When the user explicitly configures ipv4 + # or ipv6, `may-fail` is set to `False`. This is so because it is + # assumed that a network failure with the user provided configuration + # is unexpected. In other words, we think that the user knows what + # works in their target environment and what does not and they have + # correctly configured cloud-init network configuration such that + # it works in that environment. When no such configuration is + # specified, we do not know what would work and what would not in + # user's environment. Therefore, we are more conservative in assuming + # that failure with ipv4 or ipv6 can be expected or tolerated. self._set_default(family, "may-fail", "false") def _get_next_numbered_section(self, section, key_prefix) -> str: From bd6cd1fbee12ac81ff6c46cc5f979cfdf76e5e13 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 8 Aug 2024 10:25:07 -0600 Subject: [PATCH 082/131] chore: Deprecate old commands in help output (#5595) --- cloudinit/cmd/main.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 2de9826bb83..72f13fb2257 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -949,14 +949,17 @@ def main(sysv_args=None): "--debug", "-d", action="store_true", - help="Show additional pre-action logging (default: %(default)s).", + help=( + "DEPRECATED: Show additional pre-action " + "logging (default: %(default)s)." + ), default=False, ) parser.add_argument( "--force", action="store_true", help=( - "Force running even if no datasource is" + "DEPRECATED: Force running even if no datasource is" " found (use at your own risk)." ), dest="force", @@ -979,7 +982,10 @@ def main(sysv_args=None): # Each action and its sub-options (if any) parser_init = subparsers.add_parser( - "init", help="Initialize cloud-init and perform initial modules." + "init", + help=( + "DEPRECATED: Initialize cloud-init and perform initial modules." + ), ) parser_init.add_argument( "--local", @@ -1002,7 +1008,8 @@ def main(sysv_args=None): # These settings are used for the 'config' and 'final' stages parser_mod = subparsers.add_parser( - "modules", help="Activate modules using a given configuration key." + "modules", + help=("DEPRECATED: Activate modules using a given configuration key."), ) extra_help = lifecycle.deprecate( deprecated="`init`", @@ -1033,7 +1040,11 @@ def main(sysv_args=None): # This subcommand allows you to run a single module parser_single = subparsers.add_parser( - "single", help="Run a single module." + "single", + help=( + "Manually run a single module. Useful for " + "testing during development." + ), ) parser_single.add_argument( "--name", From 6ae8f68008f01a0cb894a3ad3678866f5b4a6474 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 8 Aug 2024 10:26:20 -0600 Subject: [PATCH 083/131] doc: Describe all stages in a single process (#5595) --- doc/man/cloud-init.1 | 48 ++++++++++++++------------ doc/rtd/explanation/boot.rst | 10 +++--- doc/rtd/howto/rerun_cloud_init.rst | 32 ++++++++--------- doc/rtd/reference/breaking_changes.rst | 42 ++++++++++++++++++++++ doc/rtd/reference/cli.rst | 18 +++++----- 5 files changed, 95 insertions(+), 55 deletions(-) diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1 index 8776099c02a..d69c5abae32 100644 --- a/doc/man/cloud-init.1 +++ b/doc/man/cloud-init.1 @@ -4,7 +4,7 @@ cloud-init \- Cloud instance initialization .SH SYNOPSIS -.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v] [SUBCOMMAND]" +.BR "cloud-init" " [-h] [-d] [--force] [-v] [SUBCOMMAND]" .SH DESCRIPTION Cloud-init provides a mechanism for cloud instance initialization. @@ -12,27 +12,19 @@ This is done by identifying the cloud platform that is in use, reading provided cloud metadata and optional vendor and user data, and then initializing the instance as requested. -Generally, this command is not normally meant to be run directly by -the user. However, some subcommands may useful for development or -debug of deployments. - .SH OPTIONS .TP .B "-h, --help" Show help message and exit. -.TP -.B "-d, --debug" -Show additional pre-action logging (default: False). - -.TP -.B "--force" -Force running even if no datasource is found (use at your own risk). - .TP .B "-v, --version" Show program's version number and exit. +.TP +.B "--all-stages" +INTERNAL: Run cloud-init's stages under a single process using a syncronization protocol. This is not intended for CLI usage. + .SH SUBCOMMANDS Please see the help output for each subcommand for additional details, flags, and subcommands. @@ -57,14 +49,6 @@ Run development tools. See help output for subcommand details. .B "features" List defined features. -.TP -.B "init" -Initialize cloud-init and execute initial modules. - -.TP -.B "modules" -Activate modules using a given configuration key. - .TP .B "query" Query standardized instance metadata from the command line. @@ -75,12 +59,30 @@ Validate cloud-config files using jsonschema. .TP .B "single" -Run a single module. +Manually run a single module. Useful for testing during development. .TP .B "status" Report cloud-init status or wait on completion. +.SH DEPRECATED + +.TP +.B "-d, --debug" +Show additional pre-action logging (default: False). + +.TP +.B "--force" +Force running even if no datasource is found (use at your own risk). + +.TP +.B "init" +Initialize cloud-init and execute initial modules. + +.TP +.B "modules" +Activate modules using a given configuration key. + .SH EXIT STATUS .IP @@ -95,4 +97,4 @@ Report cloud-init status or wait on completion. Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0 .SH SEE ALSO -Full documentation at: +Full documentation at: diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index ff3b65ebd28..7fe3a6e9bb4 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -48,8 +48,7 @@ Detect A platform identification tool called ``ds-identify`` runs in the first stage. This tool detects which platform the instance is running on. This tool is integrated into the init system to disable cloud-init when no platform is -found, and enable cloud-init when a valid platform is detected. This stage -might not be present for every installation of cloud-init. +found, and enable cloud-init when a valid platform is detected. .. _boot-Local: @@ -88,10 +87,9 @@ is rendered. This includes clearing of all previous (stale) configuration including persistent device naming with old MAC addresses. This stage must block network bring-up or any stale configuration that might -have already been applied. Otherwise, that could have negative effects such -as DHCP hooks or broadcast of an old hostname. It would also put the system -in an odd state to recover from, as it may then have to restart network -devices. +have already been applied. Otherwise, that could have negative effects such as +broadcast of an old hostname. It would also put the system in an odd state to +recover from, as it may then have to restart network devices. ``Cloud-init`` then exits and expects for the continued boot of the operating system to bring network configuration up as configured. diff --git a/doc/rtd/howto/rerun_cloud_init.rst b/doc/rtd/howto/rerun_cloud_init.rst index b7adb30ff30..9af4d19e3ce 100644 --- a/doc/rtd/howto/rerun_cloud_init.rst +++ b/doc/rtd/howto/rerun_cloud_init.rst @@ -64,33 +64,31 @@ a result. .. _partially_rerun_cloud_init: -How to partially re-run cloud-init -================================== - -If the behavior you are testing runs on every boot, there are a couple -of ways to test this behavior. - Manually run cloud-init stages ------------------------------ -Note that during normal boot of cloud-init, the init system runs these -stages at specific points during boot. This means that running the code -manually after booting the system may cause the code to interact with -the system in a different way than it does while it boots. +During normal boot of cloud-init, the init system runs the following command +command: .. code-block:: shell-session - cloud-init init --local - cloud-init init - cloud-init modules --mode=config - cloud-init modules --mode=final + cloud-init --all-stages + +Keep in mind that running this manually may not behave the same as cloud-init +behaves when it is started by the init system. The first reason for this is +that cloud-init's stages are intended to run before and after specific events +in the boot order, so there are no guarantees that it will do the right thing +when running out of order. The second reason is that cloud-init will skip its +normal synchronization protocol when it detects that stdin is a tty for purpose +of debugging and development. + +This command cannot be expected to be stable when executed outside of the init +system due to its ordering requirements. Reboot the instance ------------------- -Rebooting the instance will take a little bit longer, however it will -make cloud-init stages run at the correct times during boot, so it will -behave more correctly. +Rebooting the instance will re-run any parts of cloud-init that run per-boot. .. code-block:: shell-session diff --git a/doc/rtd/reference/breaking_changes.rst b/doc/rtd/reference/breaking_changes.rst index 0df6fcfde58..0eba4431f0d 100644 --- a/doc/rtd/reference/breaking_changes.rst +++ b/doc/rtd/reference/breaking_changes.rst @@ -11,6 +11,45 @@ releases. many operating system vendors patch out breaking changes in cloud-init to ensure consistent behavior on their platform. +24.3 +==== + +Single Process Optimization +--------------------------- + +As a performance optimization, cloud-init no longer runs as four seperate +Python processes. Instead, it launches a single process and then +communicates with the init system over a Unix socket to allow the init system +to tell it when it should start each stage and to tell the init system when +each stage has completed. Init system ordering is preserved. + +This should have no noticable affect for end users, besides a faster boot time. +This is a breaking change for two reasons: + +1. a precaution to avoid unintentionally breaking users on stable distributions +2. this change included renaming a systemd service: + ``cloud-init.service`` -> ``cloud-init-network.service`` + +The now-deprecated command line arguments used to invoke each stage will still +be supported for a period of time to allow for adoption and stabilization. Any +systemd distribution that wants to revert this behavior may want to +`patch this change`_. + +Support has not yet been added for non-systemd distributions, however it is +possible to add support. + +Note that this change adds dependency on the openbsd netcat implementation, +which is already on Ubuntu as part of ``ubuntu-minimal``. + +Addition of NoCloud network-config +---------------------------------- + +The NoCloud datasource now has support for providing network configuration +using network-config. Any installation that doesn't provide this configuration +file will experience a retry/timeout in boot. Adding an empty +``network-config`` file should provide backwards compatibility with previous +behavior. + 24.1 ==== @@ -96,3 +135,6 @@ behavior as a result of this change. Workarounds include updating the kernel command line and optionally configuring a ``datasource_list`` in ``/etc/cloud/cloud.cfg.d/*.cfg``. + + +.. _patch this change: https://github.com/canonical/cloud-init/blob/ubuntu/noble/debian/patches/no-single-process.patch diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index eb800b22a75..bdc59c2808a 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -15,20 +15,20 @@ Example output: .. code-block:: - usage: cloud-init [-h] [--version] [--debug] [--force] - {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ... + usage: cloud-init [-h] [--version] [--debug] [--force] [--all-stages] {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} ... options: -h, --help show this help message and exit --version, -v Show program's version number and exit. --debug, -d Show additional pre-action logging (default: False). --force Force running even if no datasource is found (use at your own risk). + --all-stages Run cloud-init's stages under a single process using a syncronization protocol. This is not intended for CLI usage. Subcommands: {init,modules,single,query,features,analyze,devel,collect-logs,clean,status,schema} - init Initialize cloud-init and perform initial modules. - modules Activate modules using a given configuration key. - single Run a single module. + init DEPRECATED: Initialize cloud-init and perform initial modules. + modules DEPRECATED: Activate modules using a given configuration key. + single Manually run a single module. Useful for testing during development. query Query standardized instance metadata from the command line. features List defined features. analyze Devel tool: Analyze cloud-init logs and data. @@ -185,8 +185,8 @@ Example output: .. _cli_init: -:command:`init` -=============== +:command:`init` (deprecated) +============================ Generally run by OS init systems to execute ``cloud-init``'s stages: *init* and *init-local*. See :ref:`boot_stages` for more info. @@ -200,8 +200,8 @@ generally gated to run only once due to semaphores in .. _cli_modules: -:command:`modules` -================== +:command:`modules` (deprecated) +=============================== Generally run by OS init systems to execute ``modules:config`` and ``modules:final`` boot stages. This executes cloud config :ref:`modules` From 00144670618bc509b3bb71653d03323f7af15e3d Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 8 Aug 2024 10:16:49 -0600 Subject: [PATCH 084/131] doc(boot): Make first boot a dedicated page (#5595) Also shift the format page higher in the explanation page list, since this is a high traffic page. --- doc/rtd/explanation/boot.rst | 92 +----------------------------- doc/rtd/explanation/first_boot.rst | 91 +++++++++++++++++++++++++++++ doc/rtd/explanation/index.rst | 3 +- 3 files changed, 95 insertions(+), 91 deletions(-) create mode 100644 doc/rtd/explanation/first_boot.rst diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst index 7fe3a6e9bb4..ac1f6193125 100644 --- a/doc/rtd/explanation/boot.rst +++ b/doc/rtd/explanation/boot.rst @@ -187,95 +187,7 @@ finished, the :command:`cloud-init status --wait` subcommand can help block external scripts until ``cloud-init`` is done without having to write your own ``systemd`` units dependency chains. See :ref:`cli_status` for more info. -.. _boot-First_boot_determination: - -First boot determination -======================== - -``Cloud-init`` has to determine whether or not the current boot is the first -boot of a new instance, so that it applies the appropriate configuration. On -an instance's first boot, it should run all "per-instance" configuration, -whereas on a subsequent boot it should run only "per-boot" configuration. This -section describes how ``cloud-init`` performs this determination, as well as -why it is necessary. - -When it runs, ``cloud-init`` stores a cache of its internal state for use -across stages and boots. - -If this cache is present, then ``cloud-init`` has run on this system -before [#not-present]_. There are two cases where this could occur. Most -commonly, the instance has been rebooted, and this is a second/subsequent -boot. Alternatively, the filesystem has been attached to a *new* instance, -and this is the instance's first boot. The most obvious case where this -happens is when an instance is launched from an image captured from a -launched instance. - -By default, ``cloud-init`` attempts to determine which case it is running -in by checking the instance ID in the cache against the instance ID it -determines at runtime. If they do not match, then this is an instance's -first boot; otherwise, it's a subsequent boot. Internally, ``cloud-init`` -refers to this behaviour as ``check``. - -This behaviour is required for images captured from launched instances to -behave correctly, and so is the default that generic cloud images ship with. -However, there are cases where it can cause problems [#problems]_. For these -cases, ``cloud-init`` has support for modifying its behaviour to trust the -instance ID that is present in the system unconditionally. This means that -``cloud-init`` will never detect a new instance when the cache is present, -and it follows that the only way to cause ``cloud-init`` to detect a new -instance (and therefore its first boot) is to manually remove -``cloud-init``'s cache. Internally, this behaviour is referred to as -``trust``. - -To configure which of these behaviours to use, ``cloud-init`` exposes the -``manual_cache_clean`` configuration option. When ``false`` (the default), -``cloud-init`` will ``check`` and clean the cache if the instance IDs do -not match (this is the default, as discussed above). When ``true``, -``cloud-init`` will ``trust`` the existing cache (and therefore not clean it). - -Manual cache cleaning -===================== - -``Cloud-init`` ships a command for manually cleaning the cache: -:command:`cloud-init clean`. See :ref:`cli_clean`'s documentation for further -details. - -Reverting ``manual_cache_clean`` setting ----------------------------------------- - -Currently there is no support for switching an instance that is launched with -``manual_cache_clean: true`` from ``trust`` behaviour to ``check`` behaviour, -other than manually cleaning the cache. - -.. warning:: If you want to capture an instance that is currently in ``trust`` - mode as an image for launching other instances, you **must** manually clean - the cache. If you do not do so, then instances launched from the captured - image will all detect their first boot as a subsequent boot of the captured - instance, and will not apply any per-instance configuration. - - This is a functional issue, but also a potential security one: - ``cloud-init`` is responsible for rotating SSH host keys on first boot, - and this will not happen on these instances. - -.. [#not-present] It follows that if this cache is not present, - ``cloud-init`` has not run on this system before, so this is - unambiguously this instance's first boot. - -.. [#problems] A couple of ways in which this strict reliance on the presence - of a datasource has been observed to cause problems: - - - If a cloud's metadata service is flaky and ``cloud-init`` cannot - obtain the instance ID locally on that platform, ``cloud-init``'s - instance ID determination will sometimes fail to determine the current - instance ID, which makes it impossible to determine if this is an - instance's first or subsequent boot (`#1885527`_). - - If ``cloud-init`` is used to provision a physical appliance or device - and an attacker can present a datasource to the device with a different - instance ID, then ``cloud-init``'s default behaviour will detect this as - an instance's first boot and reset the device using the attacker's - configuration (this has been observed with the - :ref:`NoCloud datasource` in `#1879530`_). +See the :ref:`first boot documentation ` to learn how +cloud-init decides that a boot is the "first boot". .. _generator: https://www.freedesktop.org/software/systemd/man/systemd.generator.html -.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527 -.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530 diff --git a/doc/rtd/explanation/first_boot.rst b/doc/rtd/explanation/first_boot.rst new file mode 100644 index 00000000000..2348e6e2c0a --- /dev/null +++ b/doc/rtd/explanation/first_boot.rst @@ -0,0 +1,91 @@ +.. _First_boot_determination: + +First boot determination +======================== + +``Cloud-init`` has to determine whether or not the current boot is the first +boot of a new instance, so that it applies the appropriate configuration. On +an instance's first boot, it should run all "per-instance" configuration, +whereas on a subsequent boot it should run only "per-boot" configuration. This +section describes how ``cloud-init`` performs this determination, as well as +why it is necessary. + +When it runs, ``cloud-init`` stores a cache of its internal state for use +across stages and boots. + +If this cache is present, then ``cloud-init`` has run on this system +before [#not-present]_. There are two cases where this could occur. Most +commonly, the instance has been rebooted, and this is a second/subsequent +boot. Alternatively, the filesystem has been attached to a *new* instance, +and this is the instance's first boot. The most obvious case where this +happens is when an instance is launched from an image captured from a +launched instance. + +By default, ``cloud-init`` attempts to determine which case it is running +in by checking the instance ID in the cache against the instance ID it +determines at runtime. If they do not match, then this is an instance's +first boot; otherwise, it's a subsequent boot. Internally, ``cloud-init`` +refers to this behaviour as ``check``. + +This behaviour is required for images captured from launched instances to +behave correctly, and so is the default that generic cloud images ship with. +However, there are cases where it can cause problems [#problems]_. For these +cases, ``cloud-init`` has support for modifying its behaviour to trust the +instance ID that is present in the system unconditionally. This means that +``cloud-init`` will never detect a new instance when the cache is present, +and it follows that the only way to cause ``cloud-init`` to detect a new +instance (and therefore its first boot) is to manually remove +``cloud-init``'s cache. Internally, this behaviour is referred to as +``trust``. + +To configure which of these behaviours to use, ``cloud-init`` exposes the +``manual_cache_clean`` configuration option. When ``false`` (the default), +``cloud-init`` will ``check`` and clean the cache if the instance IDs do +not match (this is the default, as discussed above). When ``true``, +``cloud-init`` will ``trust`` the existing cache (and therefore not clean it). + +Manual cache cleaning +===================== + +``Cloud-init`` ships a command for manually cleaning the cache: +:command:`cloud-init clean`. See :ref:`cli_clean`'s documentation for further +details. + +Reverting ``manual_cache_clean`` setting +---------------------------------------- + +Currently there is no support for switching an instance that is launched with +``manual_cache_clean: true`` from ``trust`` behaviour to ``check`` behaviour, +other than manually cleaning the cache. + +.. warning:: If you want to capture an instance that is currently in ``trust`` + mode as an image for launching other instances, you **must** manually clean + the cache. If you do not do so, then instances launched from the captured + image will all detect their first boot as a subsequent boot of the captured + instance, and will not apply any per-instance configuration. + + This is a functional issue, but also a potential security one: + ``cloud-init`` is responsible for rotating SSH host keys on first boot, + and this will not happen on these instances. + +.. [#not-present] It follows that if this cache is not present, + ``cloud-init`` has not run on this system before, so this is + unambiguously this instance's first boot. + +.. [#problems] A couple of ways in which this strict reliance on the presence + of a datasource has been observed to cause problems: + + - If a cloud's metadata service is flaky and ``cloud-init`` cannot + obtain the instance ID locally on that platform, ``cloud-init``'s + instance ID determination will sometimes fail to determine the current + instance ID, which makes it impossible to determine if this is an + instance's first or subsequent boot (`#1885527`_). + - If ``cloud-init`` is used to provision a physical appliance or device + and an attacker can present a datasource to the device with a different + instance ID, then ``cloud-init``'s default behaviour will detect this as + an instance's first boot and reset the device using the attacker's + configuration (this has been observed with the + :ref:`NoCloud datasource` in `#1879530`_). + +.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527 +.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530 diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst index 503c7098a00..8a1adc4639e 100644 --- a/doc/rtd/explanation/index.rst +++ b/doc/rtd/explanation/index.rst @@ -11,9 +11,10 @@ knowledge and become better at using and configuring ``cloud-init``. :maxdepth: 1 introduction.rst + format.rst configuration.rst boot.rst - format.rst + first_boot.rst events.rst instancedata.rst vendordata.rst From baeb35cc36acac92a7e55db945b6f77b5ca642cc Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 8 Aug 2024 14:20:23 -0600 Subject: [PATCH 085/131] doc: Add misc links, improve wording (#5595) --- doc/rtd/explanation/analyze.rst | 9 ++++++--- doc/rtd/explanation/events.rst | 7 ++++--- doc/rtd/explanation/format.rst | 9 ++++++++- doc/rtd/explanation/instancedata.rst | 4 ++-- doc/rtd/explanation/introduction.rst | 2 +- doc/rtd/explanation/kernel-command-line.rst | 14 ++++---------- doc/rtd/explanation/vendordata.rst | 7 ++++--- doc/rtd/reference/datasources/vmware.rst | 2 +- 8 files changed, 30 insertions(+), 24 deletions(-) diff --git a/doc/rtd/explanation/analyze.rst b/doc/rtd/explanation/analyze.rst index 3ab9f1b7fd2..04205aec704 100644 --- a/doc/rtd/explanation/analyze.rst +++ b/doc/rtd/explanation/analyze.rst @@ -3,15 +3,18 @@ Performance *********** -The :command:`analyze` subcommand was added to ``cloud-init`` to help analyze -``cloud-init`` boot time performance. It is loosely based on -``systemd-analyze``, where there are four subcommands: +The :command:`analyze` subcommand helps to analyze ``cloud-init`` boot time +performance. It is loosely based on ``systemd-analyze``, where there are four +subcommands: - :command:`blame` - :command:`show` - :command:`dump` - :command:`boot` +The analyze subcommand works by parsing the cloud-init log file for timestamps +associated with specific events. + Usage ===== diff --git a/doc/rtd/explanation/events.rst b/doc/rtd/explanation/events.rst index 38356d38eb0..4335ae2f2c8 100644 --- a/doc/rtd/explanation/events.rst +++ b/doc/rtd/explanation/events.rst @@ -66,9 +66,10 @@ Hotplug ======= When the ``hotplug`` event is supported by the datasource and configured in -user data, ``cloud-init`` will respond to the addition or removal of network -interfaces to the system. In addition to fetching and updating the system -metadata, ``cloud-init`` will also bring up/down the newly added interface. +:ref:`user data`, ``cloud-init`` will respond to the +addition or removal of network interfaces to the system. In addition to +fetching and updating the system metadata, ``cloud-init`` will also bring +up/down the newly added interface. .. warning:: Due to its use of ``systemd`` sockets, ``hotplug`` functionality is diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst index bed2b61af11..7d8a4a2176c 100644 --- a/doc/rtd/explanation/format.rst +++ b/doc/rtd/explanation/format.rst @@ -5,7 +5,9 @@ User data formats User data is configuration data provided by a user of a cloud platform to an instance at launch. User data can be passed to cloud-init in any of many -formats documented here. +formats documented here. User data is combined with the other +:ref:`configuration sources` to create a combined configuration +which modifies an instance. Configuration types =================== @@ -385,6 +387,11 @@ as binary data and so may be processed automatically. |Part handler |#part-handler |text/part-handler | +--------------------+-----------------------------+-------------------------+ +Continued reading +================= + +See the :ref:`configuration sources` documentation for +information about other sources of configuration for cloud-init. .. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py .. _YAML: https://yaml.org/spec/1.1/current.html diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst index d2aadc083ee..1196fcb3793 100644 --- a/doc/rtd/explanation/instancedata.rst +++ b/doc/rtd/explanation/instancedata.rst @@ -63,10 +63,10 @@ provided to this instance. Non-root users referencing ``userdata`` or Using ``instance-data`` ======================= -``instance-data`` can be used in: +``instance-data`` can be used in the following configuration types: * :ref:`User data scripts`. -* :ref:`Cloud-config data`. +* :ref:`Cloud-config`. * :ref:`Base configuration`. * Command line interface via :command:`cloud-init query` or :command:`cloud-init devel render`. diff --git a/doc/rtd/explanation/introduction.rst b/doc/rtd/explanation/introduction.rst index ce7f9da7706..d14fe19c518 100644 --- a/doc/rtd/explanation/introduction.rst +++ b/doc/rtd/explanation/introduction.rst @@ -113,6 +113,6 @@ and how it works, you will probably want to You can also read in more detail about what cloud-init does :ref:`during the different boot stages`, and the -:ref:`types of configuration` you can pass to cloud-init and +:ref:`types of configuration` you can pass to cloud-init and how they're used. diff --git a/doc/rtd/explanation/kernel-command-line.rst b/doc/rtd/explanation/kernel-command-line.rst index 501812b8c75..c7f861a69ed 100644 --- a/doc/rtd/explanation/kernel-command-line.rst +++ b/doc/rtd/explanation/kernel-command-line.rst @@ -2,18 +2,12 @@ Kernel command line ******************* Providing configuration data via the kernel command line is somewhat of a last -resort, since this method only supports -:ref:`cloud config` starting with -`#cloud-config`, and many datasources do not support injecting kernel -command line arguments without modifying the bootloader. - -Despite the limitations of using the kernel command line, cloud-init supports -some use-cases. +resort, since many datasources do not support injecting kernel command line +arguments without modifying the bootloader. Note that this page describes kernel command line behavior that applies -to all clouds. To provide a local configuration with an image using kernel -command line, see :ref:`datasource NoCloud` which provides -more configuration options. +to all clouds. The :ref:`NoCloud datasource` provides more +configuration options. .. _kernel_datasource_override: diff --git a/doc/rtd/explanation/vendordata.rst b/doc/rtd/explanation/vendordata.rst index a2340c2fab9..0e5e1881694 100644 --- a/doc/rtd/explanation/vendordata.rst +++ b/doc/rtd/explanation/vendordata.rst @@ -29,9 +29,10 @@ Input formats ============= ``Cloud-init`` will download and cache to filesystem any vendor data that it -finds. Vendor data is handled exactly like user data. This means that the -vendor can supply multi-part input and have those parts acted on in the same -way as with user data. +finds. Vendor data is handled exactly like +:ref:`user data`. This means that the vendor can supply +multi-part input and have those parts acted on in the same way as with user +data. The only differences are: diff --git a/doc/rtd/reference/datasources/vmware.rst b/doc/rtd/reference/datasources/vmware.rst index 1d4bbd7fd50..cea24a4a82f 100644 --- a/doc/rtd/reference/datasources/vmware.rst +++ b/doc/rtd/reference/datasources/vmware.rst @@ -389,7 +389,7 @@ this datasource using the GuestInfo keys transport: Otherwise ``cloud-init`` may not run in first-boot mode. For more information on how the boot mode is determined, please see the - :ref:`First Boot Documentation `. + :ref:`first boot documentation `. .. raw:: html From ee3c340590b85171113d7c7381e50b34f551c5fd Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 12 Aug 2024 11:06:39 -0600 Subject: [PATCH 086/131] fix: invalid quilt patch no-single-process.patch Patch diff was invalid representing a 57 line diff for cloud-init.service in the diff header. But, the actual diff context was only 56 lines. This broke the ability to apply all quilt patches with quilt push -a. Resulting in broken daily build recipe for focal with the following error message: patch: **** malformed patch at line 252: Patch no-single-process.patch does not apply (enforce with -f) --- debian/patches/no-single-process.patch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/patches/no-single-process.patch b/debian/patches/no-single-process.patch index 18a58cb4fae..3c4470efb81 100644 --- a/debian/patches/no-single-process.patch +++ b/debian/patches/no-single-process.patch @@ -193,7 +193,7 @@ Last-Update: 2024-08-02 -WantedBy=cloud-init.target --- /dev/null +++ b/systemd/cloud-init.service.tmpl -@@ -0,0 +1,57 @@ +@@ -0,0 +1,56 @@ +## template:jinja +[Unit] +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html From 4c96055e5c714f5cb2bc709f410dcd495bb9f330 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 12 Aug 2024 21:46:52 -0600 Subject: [PATCH 087/131] chore(actions): add doc label for any doc related subdir file matches (#5602) Recently noticed that doc file changes in nested subdirs were not triggering documentation auto label. Example of subdir match at https://github.com/actions/labeler?tab=readme-ov-file#basic-examples --- .github/labeler.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/labeler.yml b/.github/labeler.yml index eaf08134c34..b17d7a4a131 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -2,6 +2,6 @@ documentation: - all: - changed-files: - any-glob-to-any-file: - - 'doc/*' - - 'cloudinit/config/schemas/*' + - 'doc/**' + - 'cloudinit/config/schemas/**' - base-branch: ['main'] From 8b11d99d415d7e3bb96180329662d3cfb04476be Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Tue, 13 Aug 2024 17:15:25 +0200 Subject: [PATCH 088/131] chore: explain other use of oauth (#5616) --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index eabd7a22cd8..3c6bf49e432 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,8 @@ # Used for untemplating any files or strings with parameters. jinja2 -# This one is currently only used by the MAAS datasource. If that -# datasource is removed, this is no longer needed +# This one is currently only used by the MAAS datasource and the Event +# reporting feature when configured to use webhooks. oauthlib # This one is currently used only by the CloudSigma and SmartOS datasources. From 14edf67c2b80f1ed5698ad2230d93eefb902d932 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 13 Aug 2024 09:02:30 -0600 Subject: [PATCH 089/131] docs: Drop Python 3.6 and 3.7 support (#5617) --- doc/rtd/development/contribute_code.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/rtd/development/contribute_code.rst b/doc/rtd/development/contribute_code.rst index c6bc399c898..41bc927450b 100644 --- a/doc/rtd/development/contribute_code.rst +++ b/doc/rtd/development/contribute_code.rst @@ -54,7 +54,7 @@ We generally adhere to `PEP 8`_, and this is enforced by our use of ``black``, Python support -------------- -Cloud-init upstream currently supports Python 3.6 and above. +Cloud-init upstream currently supports Python 3.8 and above. Cloud-init upstream will stay compatible with a particular Python version for 6 years after release. After 6 years, we will stop testing upstream changes @@ -70,6 +70,8 @@ version changed: * - Cloud-init version - Python version + * - 24.3 + - 3.8+ * - 22.1 - 3.6+ * - 20.3 From 07d0384bedde1c8a80d72dafa8e3963005488b31 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 13 Aug 2024 09:14:19 -0600 Subject: [PATCH 090/131] docs: Remove unnecessary section, add feature flag page (#5617) --- doc/rtd/development/contribute_code.rst | 55 +++++-------------------- doc/rtd/development/feature_flags.rst | 6 +++ 2 files changed, 17 insertions(+), 44 deletions(-) create mode 100644 doc/rtd/development/feature_flags.rst diff --git a/doc/rtd/development/contribute_code.rst b/doc/rtd/development/contribute_code.rst index 41bc927450b..46b6755c5df 100644 --- a/doc/rtd/development/contribute_code.rst +++ b/doc/rtd/development/contribute_code.rst @@ -1,6 +1,17 @@ Contribute to the code ********************** +.. toctree:: + :maxdepth: 1 + :hidden: + + testing.rst + integration_tests.rst + module_creation.rst + datasource_creation.rst + dir_layout.rst + feature_flags.rst + For a run-through of the entire process, the following pages will be your best starting point: @@ -17,34 +28,6 @@ Testing Submissions to cloud-init must include testing. Unit testing and integration testing are integral parts of contributing code. -.. toctree:: - :maxdepth: 1 - :hidden: - - testing.rst - integration_tests.rst - -* :doc:`Unit testing overview and design principles` -* :doc:`Integration testing` - -Popular contributions -===================== - -.. toctree:: - :maxdepth: 1 - :hidden: - - module_creation.rst - datasource_creation.rst - -The two most popular contributions we receive are new cloud config -:doc:`modules ` and new -:doc:`datasources `; these pages will provide instructions -on how to create them. - -Note that any new modules should use underscores in any new config options and -not hyphens (e.g. ``new_option`` and *not* ``new-option``). - Code style and design ===================== @@ -86,22 +69,6 @@ The cloud-init codebase uses Python's annotation support for storing type annotations in the style specified by `PEP-484`_ and `PEP-526`_. Their use in the codebase is encouraged. -Other resources -=============== - -.. toctree:: - :maxdepth: 1 - :hidden: - - dir_layout.rst - -* :doc:`Explanation of the directory structure` - -Feature flags -------------- - -.. automodule:: cloudinit.features - :members: .. LINKS: .. include:: ../links.txt diff --git a/doc/rtd/development/feature_flags.rst b/doc/rtd/development/feature_flags.rst new file mode 100644 index 00000000000..c87a7982aab --- /dev/null +++ b/doc/rtd/development/feature_flags.rst @@ -0,0 +1,6 @@ +Feature flags +************* + +.. automodule:: cloudinit.features + :members: + From dfcc2b86745ee27302ba83ec27fca6bece68f52b Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 13 Aug 2024 12:41:40 -0600 Subject: [PATCH 091/131] fix(btrfs): Version parsing (#5618) Fixes GH-5614 --- cloudinit/config/cc_resizefs.py | 5 +++- tests/unittests/config/test_cc_resizefs.py | 31 +++++++++++++++++----- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b90db58ff88..87d278710b8 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -58,7 +58,10 @@ def _resize_btrfs(mount_point, devpth): # the resize operation can be queued btrfs_with_queue = lifecycle.Version.from_str("5.10") system_btrfs_ver = lifecycle.Version.from_str( - subp.subp(["btrfs", "--version"])[0].split("v")[-1].strip() + subp.subp(["btrfs", "--version"]) + .stdout.split("\n")[0] + .split("v")[-1] + .strip() ) if system_btrfs_ver >= btrfs_with_queue: idx = cmd.index("resize") diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py index 36daf9f8952..5a3057f40a2 100644 --- a/tests/unittests/config/test_cc_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -22,7 +22,7 @@ get_schema, validate_cloudconfig_schema, ) -from cloudinit.subp import ProcessExecutionError +from cloudinit.subp import ProcessExecutionError, SubpResult from tests.unittests.helpers import ( CiTestCase, mock, @@ -62,7 +62,7 @@ def test_cannot_skip_ufs_resize(self, m_subp): fs_type = "ufs" resize_what = "/" devpth = "/dev/da0p2" - m_subp.return_value = ( + m_subp.return_value = SubpResult( "stdout: super-block backups (for fsck_ffs -b #) at:\n\n", "growfs: no room to allocate last cylinder group; " "leaving 364KB unused\n", @@ -457,7 +457,7 @@ def test_resize_btrfs_mount_is_ro(self, m_subp, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = False m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v4.19 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v4.19 \n", "") self.assertEqual( ("btrfs", "filesystem", "resize", "max", "//.snapshots"), _resize_btrfs("/", "/dev/sda1"), @@ -470,7 +470,7 @@ def test_resize_btrfs_mount_is_rw(self, m_subp, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = True m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v4.19 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v4.19 \n", "") self.assertEqual( ("btrfs", "filesystem", "resize", "max", "/"), _resize_btrfs("/", "/dev/sda1"), @@ -485,7 +485,24 @@ def test_resize_btrfs_mount_is_rw_has_queue( """Queue the resize request if btrfs >= 5.10""" m_is_rw.return_value = True m_is_dir.return_value = True - m_subp.return_value = ("btrfs-progs v5.10 \n", "") + m_subp.return_value = SubpResult("btrfs-progs v5.10 \n", "") + self.assertEqual( + ("btrfs", "filesystem", "resize", "--enqueue", "max", "/"), + _resize_btrfs("/", "/dev/sda1"), + ) + + @mock.patch("cloudinit.util.mount_is_read_write") + @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir") + @mock.patch("cloudinit.subp.subp") + def test_resize_btrfs_version(self, m_subp, m_is_dir, m_is_rw): + """Queue the resize request if btrfs >= 6.10""" + m_is_rw.return_value = True + m_is_dir.return_value = True + m_subp.return_value = SubpResult( + "btrfs-progs v6.10 \n\n-EXPERIMENTAL -INJECT -STATIC +LZO +ZSTD " + "+UDEV +FSVERITY +ZONED CRYPTO=libgcrypt", + "", + ) self.assertEqual( ("btrfs", "filesystem", "resize", "--enqueue", "max", "/"), _resize_btrfs("/", "/dev/sda1"), @@ -555,12 +572,12 @@ def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os): @mock.patch(M_PATH + "os") @mock.patch("cloudinit.subp.subp") - def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): + def test_get_device_info_from_zpool_on_error(self, m_subp, m_os): # mock /dev/zfs exists m_os.path.exists.return_value = True # mock subp command from get_mount_info_fs_on_zpool - zpool_output.return_value = ( + m_subp.return_value = SubpResult( readResource("zpool_status_simple.txt"), "error", ) From ac94539ab400eed21219c6b9ac629e2dc62efad4 Mon Sep 17 00:00:00 2001 From: Noah Meyerhans Date: Tue, 13 Aug 2024 11:46:15 -0700 Subject: [PATCH 092/131] fix(systemd): Correct location of installed drop-in files(#5615) As noted in the systemd documentation, /etc is reserved for "System units created by the administrator" while the lib directory should be used by "System units installed by the distribution package manager". Fixes GH-5613 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3e33d0062bd..9ca4a8a2a40 100644 --- a/setup.py +++ b/setup.py @@ -312,7 +312,7 @@ def finalize_options(self): [ (RULES_PATH + "/rules.d", [f for f in glob("udev/*.rules")]), ( - ETC + "/systemd/system/sshd-keygen@.service.d/", + INITSYS_ROOTS["systemd"] + "/sshd-keygen@.service.d/", ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], ), ] From 19a9cca474a1b5a30090bb9af86ab8bf892a3319 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 7 Aug 2024 13:46:08 -0600 Subject: [PATCH 093/131] chore(mypy): Lint log module (#5607) --- cloudinit/log.py | 4 ++-- pyproject.toml | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cloudinit/log.py b/cloudinit/log.py index 983b426b7ce..fd83c994c88 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -176,7 +176,7 @@ def setup_backup_logging(): which may ease debugging. """ fallback_handler = logging.StreamHandler(sys.stderr) - fallback_handler.handleError = lambda record: None + setattr(fallback_handler, "handleError", lambda record: None) fallback_handler.setFormatter( logging.Formatter( "FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s" @@ -189,7 +189,7 @@ def handleError(self, record): fallback_handler.handle(record) fallback_handler.flush() - logging.Handler.handleError = handleError + setattr(logging.Handler, "handleError", handleError) class CloudInitLogRecord(logging.LogRecord): diff --git a/pyproject.toml b/pyproject.toml index df969290451..dbf31f33e55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,6 @@ module = [ "cloudinit.distros.ubuntu", "cloudinit.distros.ug_util", "cloudinit.helpers", - "cloudinit.log", "cloudinit.mergers", "cloudinit.net", "cloudinit.net.cmdline", From 56dc23cbecf4d9e708221ad91c2e57faf1a3f1c9 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 12 Aug 2024 13:25:23 -0600 Subject: [PATCH 094/131] chore(tox.ini): Simplify configuration, fix minor bugs (#5607) When referencing a command from another environment, it will cause errors when the other environment already exists. Fix it by avoiding indirection in environment command definitions. Additionally, simplify envoronment dependency management by defining two lists of dependencies: a default one with pinned versions for all environments, and an unpinned on for "tip" environments. Several dependencies have been missed in the mypy envornments, so this should make it easier by standardizing environment dependencies to be consistent across environments. --- pyproject.toml | 1 - tox.ini | 251 ++++++++++++++++++------------------------------- 2 files changed, 91 insertions(+), 161 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dbf31f33e55..da98e92b39c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ build-backend = "setuptools.build_meta" line-length = 79 include = '(brpm|bddeb|\.py)$' - [tool.isort] profile = "black" line_length = 79 diff --git a/tox.ini b/tox.ini index d6982cbe382..3c95c91001b 100644 --- a/tox.ini +++ b/tox.ini @@ -15,28 +15,47 @@ package = skip basepython = python3 setenv = LC_ALL = en_US.utf-8 -passenv= +passenv = PYTEST_ADDOPTS HYPOTHESIS_PROFILE +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt -[format_deps] -black==22.3.0 -hypothesis==6.31.6 -hypothesis_jsonschema==0.20.1 -isort==5.10.1 -mypy==0.950 -pylint==3.2.0 -pytest==7.0.1 -ruff==0.4.3 -types-jsonschema==4.4.2 -types-Jinja2==2.11.9 -types-oauthlib==3.1.6 -types-passlib==1.7.7.12 -types-PyYAML==6.0.4 -types-requests==2.27.8 -types-setuptools==57.4.9 -typing-extensions==4.1.1 +[types] +deps = + # each release of type stubs relates to a specific version of a library + # so leave these unpinned + types-jsonschema + types-Jinja2 + types-oauthlib + types-passlib + types-PyYAML + types-requests + types-setuptools + typing-extensions +[pinned_versions] +deps = + {[types]deps} + black==24.8.0 + hypothesis==6.111.0 + hypothesis_jsonschema==0.23.1 + isort==5.13.2 + mypy==1.11.1 + pylint==3.2.6 + ruff==0.5.7 + +[latest_versions] +deps = + {[types]deps} + black + hypothesis + hypothesis_jsonschema + isort + mypy + pylint + ruff [files] schema = cloudinit/config/schemas/schema-cloud-config-v1.json @@ -45,100 +64,54 @@ network_v1 = cloudinit/config/schemas/schema-network-config-v1.json network_v2 = cloudinit/config/schemas/schema-network-config-v2.json [testenv:ruff] -deps = - ruff=={[format_deps]ruff} +deps = {[pinned_versions]deps} commands = {envpython} -m ruff check {posargs:.} [testenv:pylint] -deps = - pylint=={[format_deps]pylint} - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/integration-requirements.txt +deps = {[pinned_versions]deps} commands = {envpython} -m pylint {posargs:.} [testenv:black] -deps = - black=={[format_deps]black} +deps = {[pinned_versions]deps} commands = {envpython} -m black --check {posargs:.} [testenv:isort] -deps = - isort=={[format_deps]isort} +deps = {[pinned_versions]deps} commands = {envpython} -m isort --check-only --diff {posargs:.} [testenv:mypy] deps = - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt - hypothesis=={[format_deps]hypothesis} - hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema} - mypy=={[format_deps]mypy} - types-jsonschema=={[format_deps]types-jsonschema} - types-Jinja2=={[format_deps]types-Jinja2} - types-passlib=={[format_deps]types-passlib} - types-pyyaml=={[format_deps]types-PyYAML} - types-oauthlib=={[format_deps]types-oauthlib} - types-requests=={[format_deps]types-requests} - types-setuptools=={[format_deps]types-setuptools} - typing-extensions=={[format_deps]typing-extensions} + {[testenv]deps} + {[pinned_versions]deps} commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:check_format] deps = - black=={[format_deps]black} - ruff=={[format_deps]ruff} - hypothesis=={[format_deps]hypothesis} - hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema} - isort=={[format_deps]isort} - mypy=={[format_deps]mypy} - pylint=={[format_deps]pylint} - types-jsonschema=={[format_deps]types-jsonschema} - types-Jinja2=={[format_deps]types-Jinja2} - types-oauthlib=={[format_deps]types-oauthlib} - types-passlib=={[format_deps]types-passlib} - types-pyyaml=={[format_deps]types-PyYAML} - types-oauthlib=={[format_deps]types-oauthlib} - types-requests=={[format_deps]types-requests} - types-setuptools=={[format_deps]types-setuptools} - typing-extensions=={[format_deps]typing-extensions} - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt + {[testenv]deps} + {[pinned_versions]deps} commands = - {[testenv:black]commands} - {[testenv:ruff]commands} - {[testenv:isort]commands} - {[testenv:mypy]commands} - {[testenv:pylint]commands} + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:.} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:check_format_tip] deps = - black - ruff - hypothesis - hypothesis_jsonschema - isort - mypy - pylint - types-jsonschema - types-Jinja2 - types-oauthlib - types-passlib - types-pyyaml - types-oauthlib - types-requests - types-setuptools - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt + {[testenv]deps} + {[latest_versions]deps} commands = - {[testenv:check_format]commands} + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:.} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:do_format] -deps = - black=={[format_deps]black} - isort=={[format_deps]isort} +deps = {[pinned_versions]deps} commands = {envpython} -m isort . {envpython} -m black . @@ -148,35 +121,26 @@ commands = {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} [testenv:do_format_tip] -deps = - black - isort +deps = {[latest_versions]deps} commands = - {[testenv:do_format]commands} + {envpython} -m isort . + {envpython} -m black . + {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema} + {envpython} -m json.tool --indent 2 {[files]version} {[files]version} + {envpython} -m json.tool --indent 2 {[files]network_v1} {[files]network_v1} + {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} [testenv:py3] -deps = - -r{toxinidir}/test-requirements.txt -commands = {envpython} -m pytest \ - -vvvv --showlocals \ - --durations 10 \ - -m "not hypothesis_slow" \ - --cov=cloudinit --cov-branch \ - {posargs:tests/unittests} +commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} -# experimental [testenv:py3-fast] deps = - -r{toxinidir}/test-requirements.txt + {[testenv]deps} pytest-xdist -commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial"\ - {posargs:tests/unittests} +commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial" {posargs:tests/unittests} [testenv:hypothesis-slow] -deps = - hypothesis==6.31.6 - hypothesis_jsonschema==0.20.1 - -r{toxinidir}/test-requirements.txt +deps = {[pinned_versions]deps} commands = {envpython} -m pytest \ -m hypothesis_slow \ --hypothesis-show-statistics \ @@ -184,11 +148,7 @@ commands = {envpython} -m pytest \ #commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \ [testenv:py3-leak] -deps = {[testenv:py3]deps} -commands = {envpython} -X tracemalloc=40 -Wall -m pytest \ - --durations 10 \ - --cov=cloudinit --cov-branch \ - {posargs:tests/unittests} +commands = {envpython} -X tracemalloc=40 -Wall -m pytest {posargs:tests/unittests} [testenv:lowest-supported] @@ -217,20 +177,17 @@ deps = attrs==17.4.0 responses==0.5.1 passlib -commands = {[testenv:py3]commands} +commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} [testenv:doc] -deps = - -r{toxinidir}/doc-requirements.txt +deps = -r{toxinidir}/doc-requirements.txt commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} - doc8 doc/rtd + {envpython} -m doc8 doc/rtd [testenv:doc-spelling] -deps = - -r{toxinidir}/doc-requirements.txt -commands = - {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} +deps = -r{toxinidir}/doc-requirements.txt +commands = {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} # linkcheck shows false positives and has noisy output. # Despite these limitations, it is better than a manual search of the docs. @@ -240,61 +197,36 @@ commands = # # followed by manual verification of the links reported [testenv:linkcheck] -deps = - -r{toxinidir}/doc-requirements.txt +deps = -r{toxinidir}/doc-requirements.txt commands = {envpython} -m sphinx {posargs:-b linkcheck doc/rtd doc/rtd_html} [testenv:tip-ruff] -deps = ruff -commands = {[testenv:ruff]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m ruff check {posargs:.} [testenv:tip-mypy] deps = - -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt - -r{toxinidir}/doc-requirements.txt - hypothesis - hypothesis_jsonschema - mypy - pytest - types-Jinja2 - types-jsonschema - types-oauthlib - types-PyYAML - types-passlib - types-pyyaml - types-oauthlib - types-requests - types-setuptools - typing-extensions + {[testenv]deps} + {[latest_versions]deps} commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:tip-pylint] -deps = - # requirements - pylint - # test-requirements - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/integration-requirements.txt +deps = {[latest_versions]deps} commands = {envpython} -m pylint {posargs:.} - [testenv:tip-black] -deps = black -commands = {[testenv:black]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m black --check {posargs:.} [testenv:tip-isort] -deps = isort -commands = {[testenv:isort]commands} +deps = {[latest_versions]deps} +commands = {envpython} -m isort --check-only --diff {posargs:.} [testenv:integration-tests] -commands = {envpython} -m pytest -vv \ - --log-cli-level=INFO \ - --durations 10 \ - {posargs:tests/integration_tests} -deps = - -r{toxinidir}/integration-requirements.txt +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} passenv = CLOUD_INIT_* PYCLOUDLIB_* @@ -302,22 +234,21 @@ passenv = OS_* [testenv:integration-tests-ci] -commands = {[testenv:integration-tests]commands} -deps = {[testenv:integration-tests]deps} +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* - TRAVIS setenv = PYTEST_ADDOPTS="-m ci and not adhoc" [testenv:integration-tests-jenkins] # Pytest's RC=1 means "Tests were collected and run but some of the tests failed". # Do not fail in this case, but let Jenkins handle it using the junit report. +deps = -r{toxinidir}/integration-requirements.txt allowlist_externals = sh -commands = sh -c "{envpython} -m pytest --log-cli-level=INFO -vv {posargs:tests/integration_tests/none} || [ $? -eq 1 ]" -deps = {[testenv:integration-tests]deps} +commands = sh -c "{envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests/none} || [ $? -eq 1 ]" passenv = *_proxy CLOUD_INIT_* From 2bb49b4f8277d2c15cf8e867ee972e2b6385a56b Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 12 Aug 2024 16:35:00 -0600 Subject: [PATCH 095/131] chore(mypy): Fix failures on newer versions of mypy (#5607) --- cloudinit/config/schema.py | 4 +--- cloudinit/distros/aosc.py | 2 +- cloudinit/net/__init__.py | 12 +++++------- cloudinit/net/netops/__init__.py | 5 +++++ cloudinit/net/openbsd.py | 2 +- cloudinit/safeyaml.py | 4 ++-- cloudinit/url_helper.py | 3 +-- pyproject.toml | 1 + tests/unittests/config/test_cc_write_files.py | 5 +++-- tests/unittests/helpers.py | 7 ++++++- tests/unittests/test_net.py | 10 ++++------ 11 files changed, 30 insertions(+), 25 deletions(-) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index a2fceecabcb..f34db025457 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -46,9 +46,7 @@ ) try: - from jsonschema import ValidationError as _ValidationError - - ValidationError = _ValidationError + from jsonschema import ValidationError except ImportError: ValidationError = Exception # type: ignore diff --git a/cloudinit/distros/aosc.py b/cloudinit/distros/aosc.py index 0460c740d5c..5f126136a02 100644 --- a/cloudinit/distros/aosc.py +++ b/cloudinit/distros/aosc.py @@ -107,7 +107,7 @@ def package_command(self, command, args=None, pkgs=None): def install_packages(self, pkglist: PackageList): self.package_command("install", pkgs=pkglist) - def update_package_sources(self): + def update_package_sources(self, *, force=False): self._runner.run( "update-sources", self.package_command, diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index e38b6779dd4..6a213f78865 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -555,10 +555,8 @@ def find_fallback_nic_on_linux() -> Optional[str]: return None -def generate_fallback_config(config_driver=None): +def generate_fallback_config(config_driver=None) -> Optional[dict]: """Generate network cfg v2 for dhcp on the NIC most likely connected.""" - if not config_driver: - config_driver = False target_name = find_fallback_nic() if not target_name: @@ -572,16 +570,16 @@ def generate_fallback_config(config_driver=None): match = { "macaddress": read_sys_net_safe(target_name, "address").lower() } + if config_driver: + driver = device_driver(target_name) + if driver: + match["driver"] = driver cfg = { "dhcp4": True, "dhcp6": True, "set-name": target_name, "match": match, } - if config_driver: - driver = device_driver(target_name) - if driver: - cfg["match"]["driver"] = driver nconf = {"ethernets": {target_name: cfg}, "version": 2} return nconf diff --git a/cloudinit/net/netops/__init__.py b/cloudinit/net/netops/__init__.py index 7b95917874b..8c5e2bd13c6 100644 --- a/cloudinit/net/netops/__init__.py +++ b/cloudinit/net/netops/__init__.py @@ -1,3 +1,4 @@ +from abc import abstractmethod from typing import Optional from cloudinit.subp import SubpResult @@ -5,14 +6,17 @@ class NetOps: @staticmethod + @abstractmethod def link_up(interface: str) -> SubpResult: pass @staticmethod + @abstractmethod def link_down(interface: str) -> SubpResult: pass @staticmethod + @abstractmethod def link_rename(current_name: str, new_name: str): pass @@ -41,6 +45,7 @@ def del_route( pass @staticmethod + @abstractmethod def get_default_route() -> str: pass diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 83b33e0380c..6dea579b141 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -10,7 +10,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): - def write_config(self): + def write_config(self, target=None): for device_name, v in self.interface_configurations.items(): if_file = "etc/hostname.{}".format(device_name) fn = subp.target_path(self.target, if_file) diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 2d4612e9647..5ee626bda06 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -162,8 +162,8 @@ def _get_nested_path_prefix(self, node): return f"{mark.path}." return "" - def construct_mapping(self, node): - mapping = super().construct_mapping(node) + def construct_mapping(self, node, deep=False): + mapping = super().construct_mapping(node, deep=deep) nested_path_prefix = self._get_nested_path_prefix(node) for key_node, value_node in node.value: node_key_path = f"{nested_path_prefix}{key_node.value}" diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 9cb3d4a0088..f2c6eb2e634 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -293,8 +293,7 @@ def __init__(self, response: requests.Response): @property def contents(self) -> bytes: if self._response.content is None: - # typeshed bug: https://github.com/python/typeshed/pull/12180 - return b"" # type: ignore + return b"" return self._response.content @property diff --git a/pyproject.toml b/pyproject.toml index da98e92b39c..2adba376194 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ module = [ "paramiko.*", "pip.*", "pycloudlib.*", + "responses", "serial", "tests.integration_tests.user_settings", "uaclient.*", diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index ec0024971ad..7f7f1740112 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -138,13 +138,14 @@ def test_all_decodings(self): b64 = (base64.b64encode(data), b64_aliases) for content, aliases in (gz, gz_b64, b64): for enc in aliases: + path = "/tmp/file-%s-%s" % (name, enc) cur = { "content": content, - "path": "/tmp/file-%s-%s" % (name, enc), + "path": path, "encoding": enc, } files.append(cur) - expected.append((cur["path"], data)) + expected.append((path, data)) write_files("test_decoding", files, self.owner) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index ab97973e02a..b214170c3ef 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -599,11 +599,16 @@ def skipIfAptPkg(): try: + import importlib.metadata + import jsonschema assert jsonschema # avoid pyflakes error F401: import unused _jsonschema_version = tuple( - int(part) for part in jsonschema.__version__.split(".") # type: ignore + int(part) + for part in importlib.metadata.metadata("jsonschema") + .get("Version", "") + .split(".") ) _missing_jsonschema_dep = False except ImportError: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index dbae4f20267..08bf5aa64de 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3551,14 +3551,12 @@ def test_render( ) if network_cfg is None: - network_cfg = net.generate_fallback_config() + parsed_cfg = net.generate_fallback_config() else: - network_cfg = yaml.safe_load(network_cfg) - assert isinstance(network_cfg, dict) + parsed_cfg = yaml.safe_load(network_cfg) + assert isinstance(parsed_cfg, dict) - ns = network_state.parse_net_config_data( - network_cfg, skip_broken=False - ) + ns = network_state.parse_net_config_data(parsed_cfg, skip_broken=False) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) From 56658ec5f037748a11f107e77fb758fbe7fa8753 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 12 Aug 2024 16:47:39 -0600 Subject: [PATCH 096/131] chore(black): Bump version (#5607) --- cloudinit/cmd/devel/parser.py | 2 +- cloudinit/config/cc_ansible.py | 1 + cloudinit/config/cc_growpart.py | 6 +-- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_resizefs.py | 2 +- cloudinit/config/cc_ssh.py | 2 +- .../config/cc_ssh_authkey_fingerprints.py | 2 +- cloudinit/config/cc_ssh_import_id.py | 2 +- cloudinit/config/cc_users_groups.py | 4 +- cloudinit/config/cc_write_files.py | 2 +- cloudinit/config/cc_yum_add_repo.py | 8 ++-- cloudinit/config/cc_zypper_add_repo.py | 4 +- cloudinit/config/schema.py | 6 +-- cloudinit/distros/aosc.py | 2 +- .../package_management/package_manager.py | 3 +- cloudinit/distros/parsers/hostname.py | 6 +-- cloudinit/distros/parsers/hosts.py | 6 +-- cloudinit/distros/parsers/resolv_conf.py | 6 +-- cloudinit/distros/rhel_util.py | 2 +- cloudinit/helpers.py | 12 +++--- cloudinit/mergers/__init__.py | 4 +- cloudinit/mergers/m_dict.py | 2 +- cloudinit/net/__init__.py | 2 +- cloudinit/net/ephemeral.py | 8 ++-- cloudinit/net/sysconfig.py | 2 +- cloudinit/netinfo.py | 6 +-- cloudinit/safeyaml.py | 6 +-- cloudinit/sources/DataSourceAliYun.py | 1 + cloudinit/sources/DataSourceConfigDrive.py | 4 +- cloudinit/sources/DataSourceEc2.py | 8 ++-- cloudinit/sources/DataSourceGCE.py | 2 +- cloudinit/sources/DataSourceHetzner.py | 2 +- cloudinit/sources/DataSourceLXD.py | 17 +++++---- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceScaleway.py | 2 +- cloudinit/sources/DataSourceVultr.py | 6 +-- cloudinit/sources/__init__.py | 6 +-- cloudinit/sources/helpers/ec2.py | 2 +- cloudinit/sources/helpers/openstack.py | 8 ++-- .../sources/helpers/vmware/imc/config_file.py | 2 +- cloudinit/ssh_util.py | 2 +- cloudinit/stages.py | 12 +++--- cloudinit/url_helper.py | 2 +- conftest.py | 1 + tests/integration_tests/bugs/test_gh626.py | 1 + tests/integration_tests/bugs/test_gh632.py | 1 + tests/integration_tests/bugs/test_gh868.py | 1 + .../integration_tests/bugs/test_lp1835584.py | 1 + .../integration_tests/bugs/test_lp1886531.py | 1 + .../integration_tests/bugs/test_lp1898997.py | 1 + .../integration_tests/bugs/test_lp1901011.py | 1 + .../integration_tests/bugs/test_lp1910835.py | 1 + .../integration_tests/bugs/test_lp1912844.py | 1 + tests/integration_tests/cmd/test_schema.py | 1 + tests/integration_tests/cmd/test_status.py | 1 + .../datasources/test_none.py | 1 + .../modules/test_ca_certs.py | 1 + .../modules/test_command_output.py | 1 + .../modules/test_keys_to_console.py | 1 + tests/integration_tests/modules/test_lxd.py | 1 + .../modules/test_ntp_servers.py | 1 + .../integration_tests/modules/test_puppet.py | 1 + .../modules/test_ssh_auth_key_fingerprints.py | 1 + .../modules/test_users_groups.py | 1 + .../modules/test_wireguard.py | 1 + tests/unittests/config/test_cc_seed_random.py | 2 +- tests/unittests/config/test_cc_ubuntu_pro.py | 38 ++++++++++--------- tests/unittests/conftest.py | 2 +- tests/unittests/distros/test_netconfig.py | 6 +-- .../distros/test_user_data_normalize.py | 6 +-- tests/unittests/filters/test_launch_index.py | 2 +- tests/unittests/helpers.py | 10 ++--- tests/unittests/net/test_net_rendering.py | 1 + tests/unittests/sources/test_akamai.py | 6 +-- tests/unittests/sources/test_nocloud.py | 2 +- tests/unittests/sources/test_openstack.py | 2 +- tests/unittests/sources/test_oracle.py | 6 +-- tests/unittests/test_data.py | 6 +-- tests/unittests/test_merging.py | 6 +-- tools/mock-meta.py | 2 +- 80 files changed, 168 insertions(+), 141 deletions(-) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 7ddb8fc74e3..a5dffc010c3 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -39,7 +39,7 @@ def get_parser(parser=None): make_mime.handle_args, ), ] - for (subcmd, helpmsg, get_parser, handler) in subcmds: + for subcmd, helpmsg, get_parser, handler in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) get_parser(parser) parser.set_defaults(action=(subcmd, handler)) diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index 3b9e931a58d..b14781adf97 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -1,4 +1,5 @@ """ansible enables running on first boot either ansible-pull""" + import abc import logging import os diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 459f0a3cded..8f2978b0579 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -65,12 +65,10 @@ def __init__(self, distro: Distro): self._distro = distro @abstractmethod - def available(self, devices: list) -> bool: - ... + def available(self, devices: list) -> bool: ... @abstractmethod - def resize(self, diskdev, partnum, partdev, fs): - ... + def resize(self, diskdev, partnum, partdev, fs): ... class ResizeGrowPart(Resizer): diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 50cecc03bd2..b9dc22a4cfb 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -95,7 +95,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } - for (n, path) in pubkeys.items(): + for n, path in pubkeys.items(): try: all_keys[n] = util.load_text_file(path) except Exception: @@ -117,7 +117,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # Get them read to be posted real_submit_keys = {} - for (k, v) in submit_keys.items(): + for k, v in submit_keys.items(): if v is None: real_submit_keys[k] = "N/A" else: diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 87d278710b8..70bd4e17f3b 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -293,7 +293,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: return fstype_lc = fs_type.lower() - for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: + for pfix, root_cmd in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): resizer = root_cmd break diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 947469b5b6d..4c4f0c33e18 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -111,7 +111,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if "ssh_keys" in cfg: # if there are keys and/or certificates in cloud-config, use them cert_config = [] - for (key, val) in cfg["ssh_keys"].items(): + for key, val in cfg["ssh_keys"].items(): if key not in CONFIG_KEY_TO_FILE: if pattern_unsupported_config_keys.match(key): reason = "unsupported" diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 106b3cbd0c3..8b02789b739 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -112,7 +112,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256") (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) - for (user_name, _cfg) in users.items(): + for user_name, _cfg in users.items(): if _cfg.get("no_create_home") or _cfg.get("system"): LOG.debug( "Skipping printing of ssh fingerprints for user '%s' because " diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 8abf3914fc6..7c1422dee3d 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -59,7 +59,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: # import for cloudinit created users (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) elist = [] - for (user, user_cfg) in users.items(): + for user, user_cfg in users.items(): import_ids = [] if user_cfg["default"]: import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", []) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index ace17733c3a..0f3b2121cf4 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -37,10 +37,10 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: (default_user, _user_config) = ug_util.extract_default(users) cloud_keys = cloud.get_public_ssh_keys() or [] - for (name, members) in groups.items(): + for name, members in groups.items(): cloud.distro.create_group(name, members) - for (user, config) in users.items(): + for user, config in users.items(): no_home = [key for key in NO_HOME if config.get(key)] need_home = [key for key in NEED_HOME if config.get(key)] diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 004ede438d9..c05f227a40b 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -78,7 +78,7 @@ def write_files(name, files, owner: str, ssl_details: Optional[dict] = None): if not files: return - for (i, f_info) in enumerate(files): + for i, f_info in enumerate(files): path = f_info.get("path") if not path: LOG.warning( diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 548c83bab6d..d857d89d30d 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -89,7 +89,7 @@ def _format_repository_config(repo_id, repo_config): to_be = ConfigParser() to_be.add_section(repo_id) # Do basic translation of the items -> values - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) @@ -114,7 +114,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: ) repo_locations = {} repo_configs = {} - for (repo_id, repo_config) in repos.items(): + for repo_id, repo_config in repos.items(): canon_repo_id = _canonicalize_id(repo_id) repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) if os.path.exists(repo_fn_pth): @@ -135,7 +135,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: repo_config = {} # Do some basic sanity checks/cleaning n_repo_config = {} - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): k = k.lower().strip().replace("-", "_") if k: n_repo_config[k] = v @@ -157,7 +157,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth - for (c_repo_id, path) in repo_locations.items(): + for c_repo_id, path in repo_locations.items(): repo_blob = _format_repository_config( c_repo_id, repo_configs.get(c_repo_id) ) diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index 02b372dcc70..fa55e09b07c 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -54,7 +54,7 @@ def _format_repository_config(repo_id, repo_config): to_be = configobj.ConfigObj() to_be[repo_id] = {} # Do basic translation of the items -> values - for (k, v) in repo_config.items(): + for k, v in repo_config.items(): # For now assume that people using this know the format # of zypper repos and don't verify keys/values further to_be[repo_id][k] = _format_repo_value(v) @@ -115,7 +115,7 @@ def _write_repos(repos, repo_base_path): valid_repos[repo_id] = (repo_fn_pth, repo_config) - for (repo_id, repo_data) in valid_repos.items(): + for repo_id, repo_data in valid_repos.items(): repo_blob = _format_repository_config(repo_id, repo_data[-1]) util.write_file(repo_data[0], repo_blob) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index f34db025457..de9547b987c 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -1557,9 +1557,9 @@ def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str: LOG.warning("Unable to render property_doc due to invalid schema") meta_copy["property_doc"] = "" if not meta_copy.get("property_doc", ""): - meta_copy[ - "property_doc" - ] = " No schema definitions for this module" + meta_copy["property_doc"] = ( + " No schema definitions for this module" + ) meta_copy["examples"] = textwrap.indent(_get_examples(meta), " ") if not meta_copy["examples"]: meta_copy["examples"] = " No examples for this module" diff --git a/cloudinit/distros/aosc.py b/cloudinit/distros/aosc.py index 5f126136a02..96fa48b8b6e 100644 --- a/cloudinit/distros/aosc.py +++ b/cloudinit/distros/aosc.py @@ -131,7 +131,7 @@ def update_locale_conf(sys_path, locale_cfg): return (exists, contents) = read_locale_conf(sys_path) updated_am = 0 - for (k, v) in locale_cfg.items(): + for k, v in locale_cfg.items(): if v is None: continue v = str(v) diff --git a/cloudinit/distros/package_management/package_manager.py b/cloudinit/distros/package_management/package_manager.py index 32c4cac246c..9b45bb870b4 100644 --- a/cloudinit/distros/package_management/package_manager.py +++ b/cloudinit/distros/package_management/package_manager.py @@ -22,8 +22,7 @@ def available(self) -> bool: """Return if package manager is installed on system.""" @abstractmethod - def update_package_sources(self, *, force=False): - ... + def update_package_sources(self, *, force=False): ... @abstractmethod def install_packages(self, pkglist: Iterable) -> UninstalledPackages: diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 7e498a5fab1..7250b6a8eb2 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -22,7 +22,7 @@ def parse(self): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("%s\n" % (components[0])) elif line_type == "all_comment": @@ -39,7 +39,7 @@ def __str__(self): @property def hostname(self): self.parse() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "hostname": return components[0] return None @@ -50,7 +50,7 @@ def set_hostname(self, your_hostname): return self.parse() replaced = False - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "hostname": components[0] = str(your_hostname) replaced = True diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index d907e8b67f8..8d2f73ac91f 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -24,7 +24,7 @@ def parse(self): def get_entry(self, ip): self.parse() options = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "option": (pieces, _tail) = components if len(pieces) and pieces[0] == ip: @@ -34,7 +34,7 @@ def get_entry(self, ip): def del_entries(self, ip): self.parse() n_entries = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type != "option": n_entries.append((line_type, components)) continue @@ -68,7 +68,7 @@ def _parse(self, contents): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("%s\n" % (components[0])) elif line_type == "all_comment": diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 000d0b577ab..6884c740989 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -57,7 +57,7 @@ def search_domains(self): def __str__(self): self.parse() contents = StringIO() - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "blank": contents.write("\n") elif line_type == "all_comment": @@ -72,7 +72,7 @@ def __str__(self): def _retr_option(self, opt_name): found = [] - for (line_type, components) in self._contents: + for line_type, components in self._contents: if line_type == "option": (cfg_opt, cfg_value, _comment_tail) = components if cfg_opt == opt_name: @@ -134,7 +134,7 @@ def add_search_domain(self, search_domain): def _parse(self, contents): entries = [] - for (i, line) in enumerate(contents.splitlines()): + for i, line in enumerate(contents.splitlines()): sline = line.strip() if not sline: entries.append(("blank", [line])) diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 426335f9b96..6a1b28163fd 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -22,7 +22,7 @@ def update_sysconfig_file(fn, adjustments, allow_empty=False): return (exists, contents) = read_sysconfig_file(fn) updated_am = 0 - for (k, v) in adjustments.items(): + for k, v in adjustments.items(): if v is None: continue v = str(v) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index d12944258b1..470a5b2013f 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -369,13 +369,13 @@ def _unpickle(self, ci_pkl_version: int) -> None: if "instance_data" not in self.lookups: self.lookups["instance_data"] = "instance-data.json" if "instance_data_sensitive" not in self.lookups: - self.lookups[ - "instance_data_sensitive" - ] = "instance-data-sensitive.json" + self.lookups["instance_data_sensitive"] = ( + "instance-data-sensitive.json" + ) if "combined_cloud_config" not in self.lookups: - self.lookups[ - "combined_cloud_config" - ] = "combined-cloud-config.json" + self.lookups["combined_cloud_config"] = ( + "combined-cloud-config.json" + ) if "hotplug.enabled" not in self.lookups: self.lookups["hotplug.enabled"] = "hotplug.enabled" diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index dcbd5c82e60..ce1d8ebce5f 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -127,7 +127,7 @@ def default_mergers(): def construct(parsed_mergers): mergers_to_be = [] - for (m_name, m_ops) in parsed_mergers: + for m_name, m_ops in parsed_mergers: if not m_name.startswith(MERGER_PREFIX): m_name = MERGER_PREFIX + str(m_name) merger_locs, looked_locs = importer.find_module( @@ -147,6 +147,6 @@ def construct(parsed_mergers): # Now form them... mergers = [] root = LookupMerger(mergers) - for (attr, opts) in mergers_to_be: + for attr, opts in mergers_to_be: mergers.append(attr(root, opts)) return root diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 9c51bc982b2..20d2716ffa4 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -61,7 +61,7 @@ def merge_same_key(old_v, new_v): # Otherwise leave it be... return old_v - for (k, v) in merge_with.items(): + for k, v in merge_with.items(): if k in value: if v is None and self._allow_delete: value.pop(k) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 6a213f78865..28ac814b6e9 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -668,7 +668,7 @@ def _get_current_rename_info(check_downable=True): }} """ cur_info = {} - for (name, mac, driver, device_id) in get_interfaces(): + for name, mac, driver, device_id in get_interfaces(): cur_info[name] = { "downable": None, "device_id": device_id, diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index c8730fb1e8a..39dd8ba3c8e 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -356,10 +356,10 @@ def obtain_lease(self): kwargs["prefix_or_mask"], kwargs["ip"] ) if kwargs["static_routes"]: - kwargs[ - "static_routes" - ] = self.distro.dhcp_client.parse_static_routes( - kwargs["static_routes"] + kwargs["static_routes"] = ( + self.distro.dhcp_client.parse_static_routes( + kwargs["static_routes"] + ) ) ephipv4 = EphemeralIPv4Network( self.distro, diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 015c4494928..0684116c0b9 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -675,7 +675,7 @@ def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor): @classmethod def _render_bonding_opts(cls, iface_cfg, iface, flavor): bond_opts = [] - for (bond_key, value_tpl) in cls.bond_tpl_opts: + for bond_key, value_tpl in cls.bond_tpl_opts: # Seems like either dash or underscore is possible? bond_keys = [bond_key, bond_key.replace("_", "-")] for bond_key in bond_keys: diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 6aee531638d..be455b889c7 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -579,7 +579,7 @@ def netdev_pformat(): return "\n" fields = ["Device", "Up", "Address", "Mask", "Scope", "Hw-Address"] tbl = SimpleTable(fields) - for (dev, data) in sorted(netdev.items()): + for dev, data in sorted(netdev.items()): for addr in data.get("ipv4"): tbl.add_row( ( @@ -635,7 +635,7 @@ def route_pformat(): "Flags", ] tbl_v4 = SimpleTable(fields_v4) - for (n, r) in enumerate(routes.get("ipv4")): + for n, r in enumerate(routes.get("ipv4")): route_id = str(n) try: tbl_v4.add_row( @@ -663,7 +663,7 @@ def route_pformat(): "Flags", ] tbl_v6 = SimpleTable(fields_v6) - for (n, r) in enumerate(routes.get("ipv6")): + for n, r in enumerate(routes.get("ipv6")): route_id = str(n) if r["iface"] == "lo": continue diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 5ee626bda06..a4328068f66 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -137,9 +137,9 @@ class _CustomSafeLoaderWithMarks(yaml.SafeLoader): def __init__(self, stream): super().__init__(stream) - self.schemamarks_by_line: Dict[ - int, List[SchemaPathMarks] - ] = defaultdict(list) + self.schemamarks_by_line: Dict[int, List[SchemaPathMarks]] = ( + defaultdict(list) + ) def _get_nested_path_prefix(self, node): if node.start_mark.line in self.schemamarks_by_line: diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 727477df462..d674e1fc081 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -94,6 +94,7 @@ class DataSourceAliYunLocal(DataSourceAliYun): (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] + # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 5ca6c27d176..6ddfff429dd 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -208,7 +208,7 @@ def read_config_drive(source_dir): (reader.read_v1, [], {}), ] excps = [] - for (functor, args, kwargs) in finders: + for functor, args, kwargs in finders: try: return functor(*args, **kwargs) except openstack.NonReadable as e: @@ -244,7 +244,7 @@ def on_first_boot(data, distro=None, network=True): def write_injected_files(files): if files: LOG.debug("Writing %s injected files", len(files)) - for (filename, content) in files.items(): + for filename, content in files.items(): if not filename.startswith(os.sep): filename = os.sep + filename try: diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index e1ab1c5fe03..526520be2c0 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -943,9 +943,11 @@ def _get_key_as_int_or(dikt, key, alt_value): _get_key_as_int_or( mmd[1], "device-number", float("infinity") ), - mmd[2] - if fallback_nic_order == NicOrder.NIC_NAME - else mmd[0], + ( + mmd[2] + if fallback_nic_order == NicOrder.NIC_NAME + else mmd[0] + ), ), ) ) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 1e2a422cd03..f3bcd5315a2 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -292,7 +292,7 @@ def read_md(address=None, url_params=None, platform_check=True): ) md = {} # Iterate over url_map keys to get metadata items. - for (mkey, paths, required, is_text, is_recursive) in url_map: + for mkey, paths, required, is_text, is_recursive in url_map: value = None for path in paths: new_value = metadata_fetcher.get_value(path, is_text, is_recursive) diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 7ef5a5c31fd..6529e2ff1c6 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -76,7 +76,7 @@ def _get_data(self): sec_between=self.wait_retry, retries=self.retries, ) - except (NoDHCPLeaseError) as e: + except NoDHCPLeaseError as e: LOG.error("Bailing, DHCP Exception: %s", e) raise diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 43be28e0a15..cb3f5ece009 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -176,14 +176,15 @@ class DataSourceLXD(sources.DataSource): _network_config: Union[Dict, str] = sources.UNSET _crawled_metadata: Optional[Union[Dict, str]] = sources.UNSET - sensitive_metadata_keys: Tuple[ - str, ... - ] = sources.DataSource.sensitive_metadata_keys + ( - "user.meta-data", - "user.vendor-data", - "user.user-data", - "cloud-init.user-data", - "cloud-init.vendor-data", + sensitive_metadata_keys: Tuple[str, ...] = ( + sources.DataSource.sensitive_metadata_keys + + ( + "user.meta-data", + "user.vendor-data", + "user.user-data", + "cloud-init.user-data", + "cloud-init.vendor-data", + ) ) skip_hotplug_detect = True diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 136cea82495..933d95c924b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -372,7 +372,7 @@ def main(): ) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") - for (name, help) in ( + for name, help in ( ("crawl", "crawl the datasource"), ("get", "do a single GET of provided url"), ("check-seed", "read and verify seed at url"), diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 05b50b98ee2..1ee7bf25287 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -337,7 +337,7 @@ def _get_data(self): func=self._crawl_metadata, ) self.metadata["net_in_use"] = "ipv6" - except (ConnectionError): + except ConnectionError: return False return True diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index 2d7f1f31a1e..d62fbe72ea5 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -30,9 +30,9 @@ class DataSourceVultr(sources.DataSource): dsname = "Vultr" - sensitive_metadata_keys: Tuple[ - str, ... - ] = sources.DataSource.sensitive_metadata_keys + ("startup-script",) + sensitive_metadata_keys: Tuple[str, ...] = ( + sources.DataSource.sensitive_metadata_keys + ("startup-script",) + ) def __init__(self, sys_cfg, distro, paths): super(DataSourceVultr, self).__init__(sys_cfg, distro, paths) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a3958d9b918..d8182086fa0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -764,7 +764,7 @@ def _remap_device(self, short_name): # we want to return the correct value for what will actually # exist in this instance mappings = {"sd": ("vd", "xvd", "vtb")} - for (nfrom, tlist) in mappings.items(): + for nfrom, tlist in mappings.items(): if not short_name.startswith(nfrom): continue for nto in tlist: @@ -1014,7 +1014,7 @@ def normalize_pubkey_data(pubkey_data): return list(pubkey_data) if isinstance(pubkey_data, (dict)): - for (_keyname, klist) in pubkey_data.items(): + for _keyname, klist in pubkey_data.items(): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. @@ -1170,7 +1170,7 @@ class BrokenMetadata(IOError): def list_from_depends(depends, ds_list): ret_list = [] depset = set(depends) - for (cls, deps) in ds_list: + for cls, deps in ds_list: if depset == set(deps): ret_list.append(cls) return ret_list diff --git a/cloudinit/sources/helpers/ec2.py b/cloudinit/sources/helpers/ec2.py index ffb41dbfd7a..a3590a6e4b2 100644 --- a/cloudinit/sources/helpers/ec2.py +++ b/cloudinit/sources/helpers/ec2.py @@ -121,7 +121,7 @@ def _materialize(self, blob, base_url): child_blob = self._caller(child_url) child_contents[c] = self._materialize(child_blob, child_url) leaf_contents = {} - for (field, resource) in leaves.items(): + for field, resource in leaves.items(): leaf_url = url_helper.combine_url(base_url, resource) leaf_blob = self._caller(leaf_url) leaf_contents[field] = self._leaf_decoder(field, leaf_blob) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9b46a22c37d..97ec18faf98 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -87,7 +87,7 @@ def _ec2_name_to_device(self, name): if not self.ec2_metadata: return None bdm = self.ec2_metadata.get("block-device-mapping", {}) - for (ent_name, device) in bdm.items(): + for ent_name, device in bdm.items(): if name == ent_name: return device return None @@ -266,7 +266,7 @@ def datafiles(version): "version": 2, } data = datafiles(self._find_working_version()) - for (name, (path, required, translator)) in data.items(): + for name, (path, required, translator) in data.items(): path = self._path_join(self.base_path, path) data = None found = False @@ -346,7 +346,7 @@ def datafiles(version): results["ec2-metadata"] = self._read_ec2_metadata() # Perform some misc. metadata key renames... - for (target_key, source_key, is_required) in KEY_COPIES: + for target_key, source_key, is_required in KEY_COPIES: if is_required and source_key not in metadata: raise BrokenMetadata("No '%s' entry in metadata" % source_key) if source_key in metadata: @@ -412,7 +412,7 @@ def read_v1(self): raise NonReadable("%s: no files found" % (self.base_path)) md = {} - for (name, (key, translator, default)) in FILES_V1.items(): + for name, (key, translator, default) in FILES_V1.items(): if name in found: path = found[name] try: diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 9f86838964f..ec17cbc736c 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -66,7 +66,7 @@ def _loadConfigFile(self, filename): for category in config.sections(): logger.debug("FOUND CATEGORY = '%s'", category) - for (key, value) in config.items(category): + for key, value in config.items(category): self._insertKey(category + "|" + key, value) def get_count_with_prefix(self, prefix): diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 70002086738..f7661929e49 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -593,7 +593,7 @@ def update_ssh_config_lines(lines, updates): # Keywords are case-insensitive and arguments are case-sensitive casemap = dict([(k.lower(), k) for k in updates.keys()]) - for (i, line) in enumerate(lines, start=1): + for i, line in enumerate(lines, start=1): if not line.key: continue if line.key in casemap: diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 1d911aaf3ac..854e318e992 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -81,9 +81,9 @@ def update_event_enabled( case, we only have the data source's `default_update_events`, so an event that should be enabled in userdata may be denied. """ - default_events: Dict[ - EventScope, Set[EventType] - ] = datasource.default_update_events + default_events: Dict[EventScope, Set[EventType]] = ( + datasource.default_update_events + ) user_events: Dict[EventScope, Set[EventType]] = userdata_to_events( cfg.get("updates", {}) ) @@ -985,9 +985,9 @@ def _find_networking_config( } if self.datasource and hasattr(self.datasource, "network_config"): - available_cfgs[ - NetworkConfigSource.DS - ] = self.datasource.network_config + available_cfgs[NetworkConfigSource.DS] = ( + self.datasource.network_config + ) if self.datasource: order = self.datasource.network_config_sources diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f2c6eb2e634..f404d1130bd 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -462,7 +462,7 @@ def readurl( req_args["headers"] = headers filtered_req_args = {} - for (k, v) in req_args.items(): + for k, v in req_args.items(): if k == "data": continue if k == "headers" and headers_redact: diff --git a/conftest.py b/conftest.py index 11aeae8facd..77111a81cab 100644 --- a/conftest.py +++ b/conftest.py @@ -7,6 +7,7 @@ any of these tests run: that is to say, they must be listed in ``integration-requirements.txt`` and in ``test-requirements.txt``. """ + # If we don't import this early, lru_cache may get applied before we have the # chance to patch. This is also too early for the pytest-antilru plugin # to work. diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index 204161e2c2e..a1f3b97e13c 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -3,6 +3,7 @@ Ensure if wakeonlan is specified in the network config that it is rendered in the /etc/network/interfaces or netplan config. """ + import pytest import yaml diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py index 9e67fe593a4..bd26e6b39d2 100644 --- a/tests/integration_tests/bugs/test_gh632.py +++ b/tests/integration_tests/bugs/test_gh632.py @@ -3,6 +3,7 @@ Verify that if cloud-init is using DataSourceRbxCloud, there is no traceback if the metadata disk cannot be found. """ + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 67ac9b3a6af..7c0af6b2ff8 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -1,4 +1,5 @@ """Ensure no Traceback when 'chef_license' is set""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py index b2d1b1bd866..f44edca80e3 100644 --- a/tests/integration_tests/bugs/test_lp1835584.py +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -25,6 +25,7 @@ https://bugs.launchpad.net/cloud-init/+bug/1835584 """ + import re import pytest diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py index d56ca320e2b..d170a133d35 100644 --- a/tests/integration_tests/bugs/test_lp1886531.py +++ b/tests/integration_tests/bugs/test_lp1886531.py @@ -9,6 +9,7 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531 """ + import pytest from tests.integration_tests.util import verify_clean_log diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 631285955d7..d183223b9ac 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -9,6 +9,7 @@ network configuration, and confirms that the bridge can be used to ping the default gateway. """ + import pytest from tests.integration_tests import random_mac_address diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py index e94caf9b520..4a25c602c14 100644 --- a/tests/integration_tests/bugs/test_lp1901011.py +++ b/tests/integration_tests/bugs/test_lp1901011.py @@ -4,6 +4,7 @@ See https://github.com/canonical/cloud-init/pull/800 """ + import pytest from tests.integration_tests.clouds import IntegrationCloud diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py index aa0fb75c27b..ff8390f7e0a 100644 --- a/tests/integration_tests/bugs/test_lp1910835.py +++ b/tests/integration_tests/bugs/test_lp1910835.py @@ -17,6 +17,7 @@ material: if the Azure datasource has removed the CRLFs correctly, then they will match. """ + import pytest from tests.integration_tests.integration_settings import PLATFORM diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py index b5aafa76797..15fcf81635f 100644 --- a/tests/integration_tests/bugs/test_lp1912844.py +++ b/tests/integration_tests/bugs/test_lp1912844.py @@ -14,6 +14,7 @@ the traceback that they cause. We work around this by calling ``get_interfaces_by_mac` directly in the test code. """ + import pytest from tests.integration_tests import random_mac_address diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index c954484012a..b019e4c2f4f 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -1,4 +1,5 @@ """Tests for `cloud-init status`""" + from textwrap import dedent import pytest diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index 50396be709c..fe9946b06a0 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -1,4 +1,5 @@ """Tests for `cloud-init status`""" + import json import pytest diff --git a/tests/integration_tests/datasources/test_none.py b/tests/integration_tests/datasources/test_none.py index 6d7216e3dca..d79c30404d8 100644 --- a/tests/integration_tests/datasources/test_none.py +++ b/tests/integration_tests/datasources/test_none.py @@ -1,4 +1,5 @@ """DataSourceNone integration tests on LXD.""" + import json from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index 03b362302f0..352dad164ce 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -6,6 +6,7 @@ * Mark this as running on Debian and Alpine (once we have marks for that) * Implement testing for the RHEL-specific paths """ + import os.path import pytest diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py index 96525cac0c4..5179b11150d 100644 --- a/tests/integration_tests/modules/test_command_output.py +++ b/tests/integration_tests/modules/test_command_output.py @@ -4,6 +4,7 @@ (This is ported from ``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index a6d644396f4..70f5c1c7007 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -2,6 +2,7 @@ (This is ported from ``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)""" + import pytest from tests.integration_tests.decorators import retry diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py index 308ef185357..a4ff5906a23 100644 --- a/tests/integration_tests/modules/test_lxd.py +++ b/tests/integration_tests/modules/test_lxd.py @@ -3,6 +3,7 @@ (This is ported from ``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.) """ + import warnings import pytest diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index fc62e63b346..217140c5268 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -7,6 +7,7 @@ ``tests/cloud_tests/testcases/modules/ntp_pools.yaml``, and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``) """ + import re import pytest diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py index 796f316a711..9598b8ec971 100644 --- a/tests/integration_tests/modules/test_puppet.py +++ b/tests/integration_tests/modules/test_puppet.py @@ -1,4 +1,5 @@ """Test installation configuration of puppet module.""" + import pytest from tests.integration_tests.instances import IntegrationInstance diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index 158eb880e09..f13672a6c47 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -8,6 +8,7 @@ ``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml``, ``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml``. )""" + import re import pytest diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index f1352f860cf..a904cd9f6f2 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -4,6 +4,7 @@ * This module assumes that the "ubuntu" user will be created when "default" is specified; this will need modification to run on other OSes. """ + import re import pytest diff --git a/tests/integration_tests/modules/test_wireguard.py b/tests/integration_tests/modules/test_wireguard.py index e685a269cbe..9ff1a21b2dc 100644 --- a/tests/integration_tests/modules/test_wireguard.py +++ b/tests/integration_tests/modules/test_wireguard.py @@ -1,4 +1,5 @@ """Integration test for the wireguard module.""" + import pytest from pycloudlib.lxd.instance import LXDInstance diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py index 76b9b796a65..15c59523466 100644 --- a/tests/unittests/config/test_cc_seed_random.py +++ b/tests/unittests/config/test_cc_seed_random.py @@ -225,7 +225,7 @@ def test_file_in_environment_for_command(self): def apply_patches(patches): ret = [] - for (ref, name, replace) in patches: + for ref, name, replace in patches: if replace is None: continue orig = getattr(ref, name) diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index 40f8035b30d..07ba8c69bc8 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -450,12 +450,14 @@ class TestUbuntuProSchema: # If __version__ no longer exists on jsonschema, that means # we're using a high enough version of jsonschema to not need # to skip this test. - JSONSCHEMA_SKIP_REASON - if lifecycle.Version.from_str( - getattr(jsonschema, "__version__", "999") - ) - < lifecycle.Version(4) - else "", + ( + JSONSCHEMA_SKIP_REASON + if lifecycle.Version.from_str( + getattr(jsonschema, "__version__", "999") + ) + < lifecycle.Version(4) + else "" + ), id="deprecation_of_ubuntu_advantage_skip_old_json", ), # Strict keys @@ -1121,9 +1123,9 @@ def test_should_auto_attach_error(self, caplog, fake_uaclient): m_should_auto_attach.should_auto_attach.side_effect = ( FakeUserFacingError("Some error") # noqa: E501 ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.should_auto_attach.v1"] = ( + m_should_auto_attach + ) assert not _should_auto_attach({}) assert "Error during `should_auto_attach`: Some error" in caplog.text assert ( @@ -1145,9 +1147,9 @@ def test_happy_path( self, ua_section, expected_result, caplog, fake_uaclient ): m_should_auto_attach = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.should_auto_attach.v1" - ] = m_should_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.should_auto_attach.v1"] = ( + m_should_auto_attach + ) should_auto_attach_value = object() m_should_auto_attach.should_auto_attach.return_value.should_auto_attach = ( # noqa: E501 should_auto_attach_value @@ -1174,9 +1176,9 @@ def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): m_full_auto_attach.full_auto_attach.side_effect = FakeUserFacingError( "Some error" ) - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = m_full_auto_attach + sys.modules["uaclient.api.u.pro.attach.auto.full_auto_attach.v1"] = ( + m_full_auto_attach + ) expected_msg = "Error during `full_auto_attach`: Some error" with pytest.raises(RuntimeError, match=re.escape(expected_msg)): _auto_attach(self.ua_section) @@ -1185,9 +1187,9 @@ def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient): def test_happy_path(self, caplog, mocker, fake_uaclient): mocker.patch.dict("sys.modules") sys.modules["uaclient.config"] = mock.Mock() - sys.modules[ - "uaclient.api.u.pro.attach.auto.full_auto_attach.v1" - ] = mock.Mock() + sys.modules["uaclient.api.u.pro.attach.auto.full_auto_attach.v1"] = ( + mock.Mock() + ) _auto_attach(self.ua_section) assert "Attaching to Ubuntu Pro took" in caplog.text diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 9401f2235ef..375e1d14840 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -85,7 +85,7 @@ def fake_filesystem(mocker, tmpdir): # exists, but then it fails because of the retargeting that happens here. tmpdir.mkdir("tmp") - for (mod, funcs) in FS_FUNCS.items(): + for mod, funcs in FS_FUNCS.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(str(tmpdir), nargs, func) diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index d0c64a24a39..b447757bef6 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -313,11 +313,11 @@ def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) b2 = dict(SysConf(blob2.strip().splitlines())) self.assertEqual(b1, b2) - for (k, v) in b1.items(): + for k, v in b1.items(): self.assertIn(k, b2) - for (k, v) in b2.items(): + for k, v in b2.items(): self.assertIn(k, b1) - for (k, v) in b1.items(): + for k, v in b1.items(): self.assertEqual(v, b2[k]) diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py index 8fd187b4754..3ff2c49fb14 100644 --- a/tests/unittests/distros/test_user_data_normalize.py +++ b/tests/unittests/distros/test_user_data_normalize.py @@ -302,7 +302,7 @@ def test_create_snap_user(self, mock_subp): ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) @@ -322,7 +322,7 @@ def test_create_snap_user_known(self, mock_subp): ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) @@ -353,7 +353,7 @@ def test_add_user_on_snappy_system( ], } users, _groups = self._norm(ug_cfg, distro) - for (user, config) in users.items(): + for user, config in users.items(): print("user=%s config=%s" % (user, config)) distro.add_user(user, **config) diff --git a/tests/unittests/filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py index 071cc66f563..1b2ebfb2e20 100644 --- a/tests/unittests/filters/test_launch_index.py +++ b/tests/unittests/filters/test_launch_index.py @@ -21,7 +21,7 @@ def count_messages(root): class TestLaunchFilter(helpers.ResourceUsingTestCase): def assertCounts(self, message, expected_counts): orig_message = copy.deepcopy(message) - for (index, count) in expected_counts.items(): + for index, count in expected_counts.items(): index = util.safe_int(index) filtered_message = launch_index.Filter(index).apply(message) self.assertEqual(count_messages(filtered_message), count) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index b214170c3ef..dfd9a508c25 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -313,7 +313,7 @@ def tearDown(self): def replicateTestRoot(self, example_root, target_root): real_root = resourceLocation() real_root = os.path.join(real_root, "roots", example_root) - for (dir_path, _dirnames, filenames) in os.walk(real_root): + for dir_path, _dirnames, filenames in os.walk(real_root): real_path = dir_path make_path = rebase_path(real_path[len(real_root) :], target_root) util.ensure_dir(make_path) @@ -340,8 +340,8 @@ def patchUtils(self, new_root): ("write_json", 1), ], } - for (mod, funcs) in patch_funcs.items(): - for (f, am) in funcs: + for mod, funcs in patch_funcs.items(): + for f, am in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, am, func) self.patched_funcs.enter_context( @@ -388,7 +388,7 @@ def patchOS(self, new_root): # py27 does not have scandir patch_funcs[os].append(("scandir", 1)) - for (mod, funcs) in patch_funcs.items(): + for mod, funcs in patch_funcs.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, nargs, func) @@ -511,7 +511,7 @@ def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) ret = [] - for (name, content) in files.items(): + for name, content in files.items(): p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: diff --git a/tests/unittests/net/test_net_rendering.py b/tests/unittests/net/test_net_rendering.py index 3e1490b2713..0f3c766fdeb 100644 --- a/tests/unittests/net/test_net_rendering.py +++ b/tests/unittests/net/test_net_rendering.py @@ -24,6 +24,7 @@ in `unittests/test_net.py`. While that file contains similar tests, it has become too large to be maintainable. """ + import glob from enum import Flag, auto from pathlib import Path diff --git a/tests/unittests/sources/test_akamai.py b/tests/unittests/sources/test_akamai.py index 2480269f6e6..e0472139037 100644 --- a/tests/unittests/sources/test_akamai.py +++ b/tests/unittests/sources/test_akamai.py @@ -38,9 +38,9 @@ def _get_datasource( return_value="", ): if local: - ds: Union[ - DataSourceAkamai, DataSourceAkamaiLocal - ] = DataSourceAkamaiLocal(sys_cfg, None, None) + ds: Union[DataSourceAkamai, DataSourceAkamaiLocal] = ( + DataSourceAkamaiLocal(sys_cfg, None, None) + ) else: ds = DataSourceAkamai(sys_cfg, None, None) diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py index b98ff73c9ac..8701b9bd131 100644 --- a/tests/unittests/sources/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -384,7 +384,7 @@ def test_parse_cmdline_data_valid(self): ), ) - for (fmt, expected) in pairs: + for fmt, expected in pairs: fill = {} cmdline = fmt % {"ds_id": ds_id} ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 380fe340890..154a7620759 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -99,7 +99,7 @@ def match_ec2_url(uri, headers): return (200, headers, ec2_files.get(path)) if path == "latest/meta-data/": buf = StringIO() - for (k, v) in ec2_meta.items(): + for k, v in ec2_meta.items(): if isinstance(v, (list, tuple)): buf.write("%s/" % (k)) else: diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index f1625fbf9f2..2372ca5ecfc 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -1076,9 +1076,9 @@ def test_secondary_nic_addition( """ if configure_secondary_nics is not None: - oracle_ds.ds_cfg[ - "configure_secondary_nics" - ] = configure_secondary_nics + oracle_ds.ds_cfg["configure_secondary_nics"] = ( + configure_secondary_nics + ) oracle_ds._vnics_data = "DummyData" with mock.patch.object( diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 14be6fa48e3..7621c5f6c80 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -362,9 +362,9 @@ def test_merging_cloud_config(self, tmpdir): - morestuff """ message2 = MIMEBase("text", "cloud-config") - message2[ - "X-Merge-Type" - ] = "dict(recurse_array,recurse_str)+list(append)+str(append)" + message2["X-Merge-Type"] = ( + "dict(recurse_array,recurse_str)+list(append)+str(append)" + ) message2.set_payload(blob2) blob3 = """ diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 81c878d2ee2..efb71618ce3 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -31,7 +31,7 @@ def _old_mergedict(src, cand): Nested dictionaries are merged recursively. """ if isinstance(src, dict) and isinstance(cand, dict): - for (k, v) in cand.items(): + for k, v in cand.items(): if k not in src: src[k] = v else: @@ -145,10 +145,10 @@ def test_merge_cc_samples(self): paths = c_helpers.Paths({}) cc_handler = cloud_config.CloudConfigPartHandler(paths) cc_handler.cloud_fn = None - for (payloads, (expected_merge, expected_fn)) in tests: + for payloads, (expected_merge, expected_fn) in tests: cc_handler.handle_part(None, CONTENT_START, None, None, None, None) merging_fns = [] - for (fn, contents) in payloads: + for fn, contents in payloads: cc_handler.handle_part( None, None, "%s.yaml" % (fn), contents, None, {} ) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a52536165f9..f8fcd776284 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -280,7 +280,7 @@ def get_data(self, params, who, **kwargs): return result else: contents = [] - for (i, key_id) in enumerate(key_ids): + for i, key_id in enumerate(key_ids): contents.append("%s=%s" % (i, key_id)) return "\n".join(contents) elif action == "placement": From e1845be5dccaffe4cd3970542e709f1c591560bd Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 12 Aug 2024 17:04:25 -0600 Subject: [PATCH 097/131] ci: Drop Python 3.6 and 3.7 (#5607) Bump Ubuntu version for better pip dependency resolution. --- .github/workflows/check_format.yml | 2 +- .github/workflows/cla.yml | 2 +- .github/workflows/labeler.yaml | 2 +- .github/workflows/unit.yml | 10 ++++++---- tox.ini | 26 +++++++++++++------------- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml index ba44e665811..c2f5808a53e 100644 --- a/.github/workflows/check_format.yml +++ b/.github/workflows/check_format.yml @@ -19,7 +19,7 @@ jobs: matrix: env: [ruff, mypy, pylint, black, isort] name: Check ${{ matrix.env }} - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 steps: - name: "Checkout #1" uses: actions/checkout@v3.0.0 diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index fd438b2106e..b15578864b6 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,4 @@ -name: Verify Contributor License Agreement +name: CLA Check on: [pull_request] diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml index 71171438900..a47c5a338e2 100644 --- a/.github/workflows/labeler.yaml +++ b/.github/workflows/labeler.yaml @@ -1,4 +1,4 @@ -name: "Pull Request Labeler" +name: PR Labeler on: - pull_request_target diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 5eda03c8e63..d704ae64d7b 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -14,19 +14,21 @@ jobs: unittests: strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] toxenv: [py3] + slug: [""] experimental: [false] check-latest: [false] continue-on-error: [false] include: - - python-version: "3.6" + - python-version: "3.8" toxenv: lowest-supported + slug: (lowest-supported) continue-on-error: false check-latest: false experimental: false - name: unittest / ${{ matrix.toxenv }} / python ${{matrix.python-version}} - runs-on: ubuntu-20.04 + name: Python ${{matrix.python-version}} ${{ matrix.slug }} + runs-on: ubuntu-22.04 continue-on-error: ${{ matrix.experimental }} steps: - name: "Checkout" diff --git a/tox.ini b/tox.ini index 3c95c91001b..be5e1d647d2 100644 --- a/tox.ini +++ b/tox.ini @@ -160,23 +160,23 @@ commands = {envpython} -X tracemalloc=40 -Wall -m pytest {posargs:tests/unittest # To obtain these versions, check the versions of these libraries # in the oldest support Ubuntu distro. Theses versions are from bionic. deps = - jinja2==2.10 - oauthlib==2.0.6 + jinja2==2.10.1 + oauthlib==3.1.0 pyserial==3.4 configobj==5.0.6 - pyyaml==3.12 - requests==2.18.4 - jsonpatch==1.16 - jsonschema==2.6.0 + pyyaml==5.3.1 + requests==2.22.0 + jsonpatch==1.23 + jsonschema==3.2.0 # test-requirements - pytest==3.3.2 - pytest-cov==2.5.1 - pytest-mock==1.7.1 - setuptools==44.0.0 - # Needed by pytest and default causes failures - attrs==17.4.0 - responses==0.5.1 + pytest==4.6.9 + pytest-cov==2.8.1 + pytest-mock==1.10.4 + setuptools==45.2.0 + responses==0.9.0 passlib + # required for this version of jinja2 + markupsafe==2.0.1 commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} [testenv:doc] From 90a319046362cdb68d203d1ee7bf59219afb9296 Mon Sep 17 00:00:00 2001 From: Curt Moore Date: Wed, 14 Aug 2024 03:29:42 -0500 Subject: [PATCH 098/131] Update behavior of base bond interface with NetworkManager (#5385) When using NetworkManager, if the base bond interface does not have subnet information configured, ensure it is disabled with respect to ipv4 and ipv6. Otherwise, the base bond interface defaults to 'auto' and will try to configure itself via DHCP. This is problematic when using a tagged VLAN interface on top of the bond as the base interface will try to configure itself via DHCP on the untagged VLAN. --- cloudinit/net/network_manager.py | 19 ++++++++++++++++--- tests/unittests/net/test_network_manager.py | 16 ++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index b5b9697e5f0..b643d40d858 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -145,11 +145,13 @@ def _set_ip_method(self, family, subnet_type): "dhcp": "auto", } - # Ensure we got an [ipvX] section - self._set_default(family, "method", "disabled") + # Ensure we have an [ipvX] section, default to disabled + method = "disabled" + self._set_default(family, "method", method) try: - method = method_map[subnet_type] + if subnet_type: + method = method_map[subnet_type] except KeyError: # What else can we do method = "auto" @@ -360,6 +362,17 @@ def render_interface(self, iface, network_state, renderer): found_dns_search = [] # Deal with Layer 3 configuration + if if_type == "bond" and not iface["subnets"]: + # If there is no L3 subnet config for a given connection, + # ensure it is disabled. Without this, the interface + # defaults to 'auto' which implies DHCP. This is problematic + # for certain configurations such as bonds where the root + # device itself may not have a subnet config and should be + # disabled while a separate VLAN interface on the bond holds + # the subnet information. + for family in ["ipv4", "ipv6"]: + self._set_ip_method(family, None) + for subnet in iface["subnets"]: family = "ipv6" if subnet_is_ipv6(subnet) else "ipv4" diff --git a/tests/unittests/net/test_network_manager.py b/tests/unittests/net/test_network_manager.py index 2aa476d7d15..d9afb78d936 100644 --- a/tests/unittests/net/test_network_manager.py +++ b/tests/unittests/net/test_network_manager.py @@ -130,6 +130,14 @@ def test_bond_dns_baseline(self, tmpdir): [bond] mode=802.3ad + [ipv4] + method=disabled + may-fail=false + + [ipv6] + method=disabled + may-fail=false + [ethernet] mtu=9000 @@ -279,6 +287,14 @@ def test_bond_dns_redacted_with_method_disabled(self, tmpdir): [bond] mode=802.3ad + [ipv4] + method=disabled + may-fail=false + + [ipv6] + method=disabled + may-fail=false + [ethernet] mtu=9000 From d79050d1b9c73a1dd577159000752e0080fd75c1 Mon Sep 17 00:00:00 2001 From: Curt Moore Date: Wed, 14 Aug 2024 04:25:16 -0500 Subject: [PATCH 099/131] fix(nm): Ensure bond property name formatting matches schema definition (#5383) The cloud-init network config version 1 schema defines the bond properties with underscores, prepended with 'bond-'. This change ensures consistency with the schema for the bond property names. GH-5366 --- cloudinit/net/network_manager.py | 12 +++-- tests/unittests/net/network_configs.py | 53 ++++++++------------- tests/unittests/net/test_network_manager.py | 4 ++ tests/unittests/test_net.py | 6 +-- 4 files changed, 34 insertions(+), 41 deletions(-) diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py index b643d40d858..30a56ebffdd 100644 --- a/cloudinit/net/network_manager.py +++ b/cloudinit/net/network_manager.py @@ -333,16 +333,18 @@ def render_interface(self, iface, network_state, renderer): # These are the interface properties that map nicely # to NetworkManager properties + # NOTE: Please ensure these items are formatted so as + # to match the schema in schema-network-config-v1.json _prop_map = { "bond": { "mode": "bond-mode", - "miimon": "bond_miimon", - "xmit_hash_policy": "bond-xmit-hash-policy", - "num_grat_arp": "bond-num-grat-arp", + "miimon": "bond-miimon", + "xmit_hash_policy": "bond-xmit_hash_policy", + "num_grat_arp": "bond-num_grat_arp", "downdelay": "bond-downdelay", "updelay": "bond-updelay", - "fail_over_mac": "bond-fail-over-mac", - "primary_reselect": "bond-primary-reselect", + "fail_over_mac": "bond-fail_over_mac", + "primary_reselect": "bond-primary_reselect", "primary": "bond-primary", }, "bridge": { diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py index 2b55bbf421a..0779c5809a9 100644 --- a/tests/unittests/net/network_configs.py +++ b/tests/unittests/net/network_configs.py @@ -2160,8 +2160,6 @@ [bond] mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 [ipv6] method=auto @@ -3055,13 +3053,13 @@ params: bond-mode: active-backup bond_miimon: 100 - bond-xmit-hash-policy: "layer3+4" - bond-num-grat-arp: 5 + bond-xmit_hash_policy: "layer3+4" + bond-num_grat_arp: 5 bond-downdelay: 10 bond-updelay: 20 - bond-fail-over-mac: active + bond-fail_over_mac: active bond-primary: bond0s0 - bond-primary-reselect: always + bond-primary_reselect: always subnets: - type: static address: 192.168.0.2/24 @@ -3138,27 +3136,27 @@ auto bond0s0 iface bond0s0 inet manual bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-master bond0 bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 auto bond0s1 iface bond0s1 inet manual bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-master bond0 bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 auto bond0 @@ -3166,14 +3164,14 @@ address 192.168.0.2/24 gateway 192.168.0.1 bond-downdelay 10 - bond-fail-over-mac active + bond-fail_over_mac active bond-mode active-backup - bond-num-grat-arp 5 + bond-num_grat_arp 5 bond-primary bond0s0 - bond-primary-reselect always + bond-primary_reselect always bond-slaves none bond-updelay 20 - bond-xmit-hash-policy layer3+4 + bond-xmit_hash_policy layer3+4 bond_miimon 100 hwaddress aa:bb:cc:dd:e8:ff mtu 9000 @@ -3199,12 +3197,8 @@ "ifcfg-bond0": textwrap.dedent( """\ BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + BONDING_MODULE_OPTS="mode=active-backup miimon=100 """ + """downdelay=10 updelay=20 primary=bond0s0" BONDING_SLAVE_0=bond0s0 BONDING_SLAVE_1=bond0s1 BOOTPROTO=static @@ -3237,12 +3231,8 @@ "ifcfg-bond0": textwrap.dedent( """\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + BONDING_OPTS="mode=active-backup miimon=100 """ + """downdelay=10 updelay=20 primary=bond0s0" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -3361,7 +3351,6 @@ [bond] mode=active-backup - miimon=100 xmit_hash_policy=layer3+4 num_grat_arp=5 downdelay=10 @@ -3719,8 +3708,6 @@ [bond] mode=active-backup - miimon=100 - xmit_hash_policy=layer3+4 num_grat_arp=5 downdelay=10 updelay=20 diff --git a/tests/unittests/net/test_network_manager.py b/tests/unittests/net/test_network_manager.py index d9afb78d936..4551698daba 100644 --- a/tests/unittests/net/test_network_manager.py +++ b/tests/unittests/net/test_network_manager.py @@ -129,6 +129,8 @@ def test_bond_dns_baseline(self, tmpdir): [bond] mode=802.3ad + miimon=100 + xmit_hash_policy=layer3+4 [ipv4] method=disabled @@ -286,6 +288,8 @@ def test_bond_dns_redacted_with_method_disabled(self, tmpdir): [bond] mode=802.3ad + miimon=100 + xmit_hash_policy=layer3+4 [ipv4] method=disabled diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 08bf5aa64de..68e44fa8021 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -200,7 +200,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - address: 10.101.10.47/23 gateway: 10.101.11.254 @@ -254,7 +254,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - type: manual type: bond @@ -296,7 +296,7 @@ bond-miimon: 100 bond-mode: 802.3ad bond-updelay: 0 - bond-xmit-hash-policy: layer3+4 + bond-xmit_hash_policy: layer3+4 subnets: - address: 10.101.8.65/26 routes: From 2e4c39b75082a8664db50da436d512b123011319 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 14 Aug 2024 22:16:12 -0600 Subject: [PATCH 100/131] tests: fix test_ca_certs.py for gcp (#5621) Avoid exclusive expectations that cloud-init is the only agent registering certificates in a system to /etc/ssl/certs/ca-certificates.crt. On Google Cloud Platform, Google Guest Agent does setup root certs which makes performing a checksum of ca-certificates.crt incorrect due to extra certs present in ca-certificates.crt. Adapt test to assert that cloud-init's cert is contained in ca-certificates.crt but not exclusive content of the file. Fixes GH-5609 --- .../modules/test_ca_certs.py | 86 +++++++++---------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index 352dad164ce..9e740dda228 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -8,6 +8,7 @@ """ import os.path +from textwrap import indent import pytest @@ -15,47 +16,51 @@ from tests.integration_tests.releases import IS_UBUNTU from tests.integration_tests.util import get_inactive_modules, verify_clean_log -USER_DATA = """\ +CERT_CONTENT = """\ +-----BEGIN CERTIFICATE----- +MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx +DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP +d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl +bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW +E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz +MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93 +d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl +MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc +k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ +yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX +RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6 +q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2 +uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S +vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o +6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4 +Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF +z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1 +SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3 +Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT +TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5 +ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3 +DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr +mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf +PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ +70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM +slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L +ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT +Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro +RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586 +CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l +hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i +DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== +-----END CERTIFICATE----- +""" + +USER_DATA = f"""\ #cloud-config ca_certs: remove_defaults: true trusted: - | - -----BEGIN CERTIFICATE----- - MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx - DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP - d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl - bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW - E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz - MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93 - d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl - MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG - 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc - k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ - yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX - RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6 - q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2 - uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S - vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o - 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4 - Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF - z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1 - SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3 - Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT - TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5 - ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3 - DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr - mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf - PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ - 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM - slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L - ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT - Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro - RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586 - CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l - hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i - DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== - -----END CERTIFICATE----- +{indent(CERT_CONTENT, " ")} """ @@ -88,13 +93,8 @@ def test_certs_updated(self, class_client: IntegrationInstance): def test_cert_installed(self, class_client: IntegrationInstance): """Test that our specified cert has been installed""" - checksum = class_client.execute( - "sha256sum /etc/ssl/certs/ca-certificates.crt" - ) - assert ( - "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062" - in checksum - ) + certs = class_client.execute("cat /etc/ssl/certs/ca-certificates.crt") + assert CERT_CONTENT in certs def test_clean_log(self, class_client: IntegrationInstance): """Verify no errors, no deprecations and correct inactive modules in From c28092fa615961c7ae2f56738a19ea28331b84a7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 15 Aug 2024 12:48:13 -0400 Subject: [PATCH 101/131] feat: collect-logs improvements (#5619) * Collect sensitive data by default since we ask for it more often than not * Output warning that we're collecting sensitive data * Glob most of /run/cloud-init, /etc/cloud, and /var/lib/cloud * Stop creating empty directories in the tarball * Require running as root given that the logs are root read-only * Update apport accordingly Fixes GH-5297 --- cloudinit/apport.py | 113 ++----- cloudinit/cmd/devel/logs.py | 362 +++++++++++++-------- cloudinit/stages.py | 4 +- cloudinit/util.py | 65 +++- tests/unittests/cmd/devel/test_logs.py | 427 ++++++++++++------------- tests/unittests/conftest.py | 18 -- tests/unittests/test_apport.py | 135 ++++---- tests/unittests/test_cli.py | 5 +- 8 files changed, 590 insertions(+), 539 deletions(-) diff --git a/cloudinit/apport.py b/cloudinit/apport.py index d52b79ab579..8d16db9cf78 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -9,7 +9,6 @@ import os from typing import Dict -from cloudinit.cmd.devel import read_cfg_paths from cloudinit.cmd.devel.logs import ( INSTALLER_APPORT_FILES, INSTALLER_APPORT_SENSITIVE_FILES, @@ -69,12 +68,7 @@ ] -def _get_user_data_file() -> str: - paths = read_cfg_paths() - return paths.get_ipath_cur("userdata_raw") - - -def attach_cloud_init_logs(report, ui=None): +def attach_cloud_init_logs(report, ui=None, include_sensitive=False): """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" attach_root_command_outputs( # pyright: ignore report, @@ -82,15 +76,18 @@ def attach_cloud_init_logs(report, ui=None): "cloud-init-log-warnings": ( 'egrep -i "warn|error" /var/log/cloud-init.log' ), - "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", }, ) - root_command_output( # pyright: ignore - ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] - ) - attach_file( # pyright: ignore - report, "/tmp/cloud-init-logs.tgz", "logs.tgz" - ) + command = [ + "cloud-init", + "collect-logs", + "-t", + "/tmp/cloud-init-logs.tgz", + ] + if not include_sensitive: + command.append("--redact") + root_command_output(command) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): @@ -104,47 +101,7 @@ def attach_hwinfo(report, ui=None): attach_root_command_outputs(report, {"lshw.txt": "lshw"}) -def attach_cloud_info(report, ui=None): - """Prompt for cloud details if instance-data unavailable. - - When we have valid _get_instance_data, apport/generic-hooks/cloud_init.py - provides CloudName, CloudID, CloudPlatform and CloudSubPlatform. - - Apport/generic-hooks are delivered by cloud-init's downstream branches - ubuntu/(devel|kinetic|jammy|focal|bionic) so they will not be represented - in upstream main. - - In absence of viable instance-data.json format, prompt for the cloud below. - """ - - if ui: - paths = read_cfg_paths() - try: - with open(paths.get_runpath("instance_data")) as file: - instance_data = json.load(file) - assert instance_data.get("v1", {}).get("cloud_name") - return # Valid instance-data means generic-hooks will report - except (IOError, json.decoder.JSONDecodeError, AssertionError): - pass - - # No valid /run/cloud/instance-data.json on system. Prompt for cloud. - prompt = "Is this machine running in a cloud environment?" - response = ui.yesno(prompt) - if response is None: - raise StopIteration # User cancelled - if response: - prompt = ( - "Please select the cloud vendor or environment in which" - " this instance is running" - ) - response = ui.choice(prompt, KNOWN_CLOUD_NAMES) - if response: - report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] - else: - report["CloudName"] = "None" - - -def attach_installer_files(report, ui=None): +def attach_installer_files(report, ui=None, include_sensitive=False): """Attach any subiquity installer logs config. To support decoupling apport integration from installer config/logs, @@ -155,6 +112,10 @@ def attach_installer_files(report, ui=None): for apport_file in INSTALLER_APPORT_FILES: realpath = os.path.realpath(apport_file.path) attach_file_if_exists(report, realpath, apport_file.label) + if include_sensitive: + for apport_file in INSTALLER_APPORT_SENSITIVE_FILES: + realpath = os.path.realpath(apport_file.path) + attach_file_if_exists(report, realpath, apport_file.label) def attach_ubuntu_pro_info(report, ui=None): @@ -168,27 +129,22 @@ def attach_ubuntu_pro_info(report, ui=None): report["Tags"] += "ubuntu-pro" -def attach_user_data(report, ui=None): +def can_attach_sensitive(report, ui=None) -> bool: """Optionally provide user-data if desired.""" - if ui: - user_data_file = _get_user_data_file() - prompt = ( - "Your user-data, cloud-config or autoinstall files can optionally " - " be provided from {0} and could be useful to developers when" - " addressing this bug. Do you wish to attach user-data to this" - " bug?".format(user_data_file) - ) - response = ui.yesno(prompt) - if response is None: - raise StopIteration # User cancelled - if response: - realpath = os.path.realpath(user_data_file) - attach_file(report, realpath, "user_data.txt") # pyright: ignore - for apport_file in INSTALLER_APPORT_SENSITIVE_FILES: - realpath = os.path.realpath(apport_file.path) - attach_file_if_exists( # pyright: ignore - report, realpath, apport_file.label - ) + if not ui: + return False + prompt = ( + "Your user data, cloud-config, network config, or autoinstall " + "files can optionally be provided and could be useful to " + "developers when addressing this bug. However, this data should " + "not be included if it contains any sensitive data such as " + "passwords and secrets. Gathering it requires admin privileges. " + "Would you like to include this info?" + ) + response = ui.yesno(prompt) + if response is None: + raise StopIteration # User cancelled + return response def add_bug_tags(report): @@ -222,11 +178,10 @@ def add_info(report, ui): raise RuntimeError( "No apport imports discovered. Apport functionality disabled" ) - attach_cloud_init_logs(report, ui) + include_sensitive = can_attach_sensitive(report, ui) + attach_cloud_init_logs(report, ui, include_sensitive) attach_hwinfo(report, ui) - attach_cloud_info(report, ui) - attach_user_data(report, ui) - attach_installer_files(report, ui) + attach_installer_files(report, ui, include_sensitive) attach_ubuntu_pro_info(report, ui) add_bug_tags(report) return True diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index f5ae53ce26a..f18bfbed6ab 100755 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -7,49 +7,25 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse +import itertools import logging import os import pathlib -import shutil +import stat import subprocess import sys from datetime import datetime, timezone -from typing import List, NamedTuple, Optional, cast +from typing import Any, Dict, Iterator, List, NamedTuple, Optional, cast from cloudinit import log -from cloudinit.cmd.devel import read_cfg_paths from cloudinit.stages import Init from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.util import ( - chdir, - copy, - ensure_dir, - get_config_logfiles, - write_file, -) +from cloudinit.util import copy, get_config_logfiles, write_file LOG = cast(log.CustomLoggerType, logging.getLogger(__name__)) -class LogPaths(NamedTuple): - userdata_raw: str - cloud_data: str - run_dir: str - instance_data_sensitive: str - - -def get_log_paths(init: Optional[Init] = None) -> LogPaths: - """Return a Paths object based on the system configuration on disk.""" - paths = init.paths if init else read_cfg_paths() - return LogPaths( - userdata_raw=paths.get_ipath_cur("userdata_raw"), - cloud_data=paths.get_cpath("data"), - run_dir=paths.run_dir, - instance_data_sensitive=paths.lookups["instance_data_sensitive"], - ) - - class ApportFile(NamedTuple): path: str label: str @@ -140,24 +116,23 @@ def get_parser( action="store_true", dest="userdata", help=( - "Optionally include user-data from {0} which could contain" - " sensitive information.".format(get_log_paths().userdata_raw) + "DEPRECATED: This is default behavior and this flag does nothing" + ), + ) + parser.add_argument( + "--redact-sensitive", + "-r", + default=False, + action="store_true", + help=( + "Redact potentially sensitive data from logs. Sensitive data " + "may include passwords or keys in user data and " + "root read-only files." ), ) return parser -def _get_copytree_ignore_files(paths: LogPaths) -> List[str]: - """Return a list of files to ignore for /run/cloud-init directory""" - ignored_files = [ - "hook-hotplug-cmd", # named pipe for hotplug - ] - if os.getuid() != 0: - # Ignore root-permissioned files - ignored_files.append(paths.instance_data_sensitive) - return ignored_files - - def _write_command_output_to_file( cmd: List[str], file_path: pathlib.Path, @@ -196,131 +171,213 @@ def _stream_command_output_to_file( LOG.debug("collected %s to file '%s'", msg, file_path.stem) -def _collect_file(path: str, out_dir: str) -> None: - if os.path.isfile(path): - copy(path, out_dir) - LOG.debug("collected file: %s", path) +def _collect_file( + path: pathlib.Path, out_dir: pathlib.Path, include_sensitive: bool +) -> None: + """Collect a file into what will be the tarball.""" + if path.is_file(): + if include_sensitive or path.stat().st_mode & stat.S_IROTH: + out_dir.mkdir(parents=True, exist_ok=True) + copy(path, out_dir) + LOG.debug("collected file: %s", path) + else: + LOG.trace("sensitive file %s was not collected", path) else: LOG.trace("file %s did not exist", path) -def _collect_installer_logs(log_dir: str, include_userdata: bool) -> None: +def _collect_installer_logs( + log_dir: pathlib.Path, include_sensitive: bool +) -> None: """Obtain subiquity logs and config files.""" for src_file in INSTALLER_APPORT_FILES: - destination_dir = pathlib.Path(log_dir + src_file.path).parent - if not destination_dir.exists(): - ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir)) - if include_userdata: + destination_dir = pathlib.Path(log_dir, src_file.path[1:]).parent + _collect_file( + pathlib.Path(src_file.path), + destination_dir, + include_sensitive=True, # Because this function does check + ) + if include_sensitive: for src_file in INSTALLER_APPORT_SENSITIVE_FILES: - destination_dir = pathlib.Path(log_dir + src_file.path).parent - if not destination_dir.exists(): - ensure_dir(str(destination_dir)) - _collect_file(src_file.path, str(destination_dir)) + destination_dir = pathlib.Path(log_dir, src_file.path[1:]).parent + _collect_file( + pathlib.Path(src_file.path), + destination_dir, + include_sensitive=True, # Because this function does check + ) -def _collect_version_info(log_dir: str) -> None: +def _collect_version_info(log_dir: pathlib.Path) -> None: + """Include cloud-init version and dpkg version in the logs.""" version = _write_command_output_to_file( cmd=["cloud-init", "--version"], - file_path=pathlib.Path(log_dir, "version"), + file_path=log_dir / "version", msg="cloud-init --version", ) dpkg_ver = _write_command_output_to_file( cmd=["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], - file_path=pathlib.Path(log_dir, "dpkg-version"), + file_path=log_dir / "dpkg-version", msg="dpkg version", ) if not version: version = dpkg_ver or "not-available" -def _collect_system_logs(log_dir: str) -> None: - _stream_command_output_to_file( - cmd=["dmesg"], - file_path=pathlib.Path(log_dir, "dmesg.txt"), - msg="dmesg output", - ) +def _collect_system_logs( + log_dir: pathlib.Path, include_sensitive: bool +) -> None: + """Include dmesg and journalctl output in the logs.""" + if include_sensitive: + _stream_command_output_to_file( + cmd=["dmesg"], + file_path=log_dir / "dmesg.txt", + msg="dmesg output", + ) _stream_command_output_to_file( cmd=["journalctl", "--boot=0", "-o", "short-precise"], - file_path=pathlib.Path(log_dir, "journal.txt"), + file_path=log_dir / "journal.txt", msg="systemd journal of current boot", ) + _stream_command_output_to_file( + cmd=["journalctl", "--boot=-1", "-o", "short-precise"], + file_path=pathlib.Path(log_dir, "journal-previous.txt"), + msg="systemd journal of previous boot", + ) -def _collect_cloudinit_logs( - log_dir: str, - init: Init, - paths: LogPaths, - include_userdata: bool, -) -> None: - for logfile in get_config_logfiles(init.cfg): - _collect_file(logfile, log_dir) - if include_userdata: - user_data_file = paths.userdata_raw - _collect_file(user_data_file, log_dir) - - -def _collect_run_dir(log_dir: str, paths: LogPaths) -> None: - run_dir = os.path.join(log_dir, "run") - ensure_dir(run_dir) - if os.path.exists(paths.run_dir): - try: - shutil.copytree( - paths.run_dir, - os.path.join(run_dir, "cloud-init"), - ignore=lambda _, __: _get_copytree_ignore_files(paths), - ) - except shutil.Error as e: - LOG.warning("Failed collecting file(s) due to error: %s", e) - LOG.debug("collected directory: %s", paths.run_dir) - else: - LOG.debug("directory '%s' did not exist", paths.run_dir) - if os.path.exists(os.path.join(paths.run_dir, "disabled")): - # Fallback to grab previous cloud/data - cloud_data_dir = pathlib.Path(paths.cloud_data) - if cloud_data_dir.exists(): - shutil.copytree( - str(cloud_data_dir), - pathlib.Path(log_dir + str(cloud_data_dir)), - ) +def _get_cloudinit_logs( + log_cfg: Dict[str, Any], +) -> Iterator[pathlib.Path]: + """Get paths for cloud-init.log and cloud-init-output.log.""" + for path in get_config_logfiles(log_cfg): + yield pathlib.Path(path) -def collect_logs(tarfile: str, include_userdata: bool) -> int: - """Collect all cloud-init logs and tar them up into the provided tarfile. +def _get_etc_cloud( + etc_cloud_dir: pathlib.Path = pathlib.Path("/etc/cloud"), +) -> Iterator[pathlib.Path]: + """Get paths for all files in /etc/cloud. - @param tarfile: The path of the tar-gzipped file to create. - @param include_userdata: Boolean, true means include user-data. - @return: 0 on success, 1 on failure. + Excludes: + /etc/cloud/keys because it may contain non-useful sensitive data. + /etc/cloud/templates because we already know its contents """ - if include_userdata and os.getuid() != 0: - LOG.error( - "To include userdata, root user is required. " - "Try sudo cloud-init collect-logs" + ignore = [ + etc_cloud_dir / "keys", + etc_cloud_dir / "templates", + # Captured by the installer apport files + "99-installer.cfg", + ] + yield from ( + path + for path in etc_cloud_dir.glob("**/*") + if path.name not in ignore and path.parent not in ignore + ) + + +def _get_var_lib_cloud(cloud_dir: pathlib.Path) -> Iterator[pathlib.Path]: + """Get paths for files in /var/lib/cloud. + + Skip user-provided scripts, semaphores, and old instances. + """ + return itertools.chain( + cloud_dir.glob("data/*"), + cloud_dir.glob("handlers/*"), + cloud_dir.glob("seed/*"), + (p for p in cloud_dir.glob("instance/*") if p.is_file()), + cloud_dir.glob("instance/handlers"), + ) + + +def _get_run_dir(run_dir: pathlib.Path) -> Iterator[pathlib.Path]: + """Get all paths under /run/cloud-init except for hook-hotplug-cmd. + + Note that this only globs the top-level directory as there are currently + no relevant files within subdirectories. + """ + return (p for p in run_dir.glob("*") if p.name != "hook-hotplug-cmd") + + +def _collect_logs_into_tmp_dir( + log_dir: pathlib.Path, + log_cfg: Dict[str, Any], + run_dir: pathlib.Path, + cloud_dir: pathlib.Path, + include_sensitive: bool, +) -> None: + """Collect all cloud-init logs into the provided directory.""" + _collect_version_info(log_dir) + _collect_system_logs(log_dir, include_sensitive) + _collect_installer_logs(log_dir, include_sensitive) + + for logfile in _get_cloudinit_logs(log_cfg): + # Even though log files are root read-only, the logs tarball + # would be useless without them and we've been careful to not + # include sensitive data in them. + _collect_file( + logfile, + log_dir / pathlib.Path(logfile).parent.relative_to("/"), + True, + ) + for logfile in itertools.chain( + _get_etc_cloud(), + _get_var_lib_cloud(cloud_dir=cloud_dir), + _get_run_dir(run_dir=run_dir), + ): + _collect_file( + logfile, + log_dir / pathlib.Path(logfile).parent.relative_to("/"), + include_sensitive, ) - return 1 + +def collect_logs( + tarfile: str, + log_cfg: Dict[str, Any], + run_dir: pathlib.Path = pathlib.Path("/run/cloud-init"), + cloud_dir: pathlib.Path = pathlib.Path("/var/lib/cloud"), + include_sensitive: bool = True, +) -> None: + """Collect all cloud-init logs and tar them up into the provided tarfile. + + :param tarfile: The path of the tar-gzipped file to create. + :param log_cfg: The cloud-init base configuration containing logging cfg. + :param run_dir: The path to the cloud-init run directory. + :param cloud_dir: The path to the cloud-init cloud directory. + :param include_sensitive: Boolean, true means include sensitive data. + """ tarfile = os.path.abspath(tarfile) - log_dir = ( + dir_name = ( datetime.now(timezone.utc).date().strftime("cloud-init-logs-%Y-%m-%d") ) - with tempdir(dir="/tmp") as tmp_dir: - log_dir = os.path.join(tmp_dir, log_dir) - init = Init(ds_deps=[]) - init.read_cfg() - paths = get_log_paths(init) - - _collect_version_info(log_dir) - _collect_system_logs(log_dir) - _collect_cloudinit_logs(log_dir, init, paths, include_userdata) - _collect_installer_logs(log_dir, include_userdata) - _collect_run_dir(log_dir, paths) - with chdir(tmp_dir): - subp(["tar", "czvf", tarfile, log_dir.replace(f"{tmp_dir}/", "")]) + with tempdir(dir=run_dir) as tmp_dir: + log_dir = pathlib.Path(tmp_dir, dir_name) + _collect_logs_into_tmp_dir( + log_dir=log_dir, + log_cfg=log_cfg, + run_dir=run_dir, + cloud_dir=cloud_dir, + include_sensitive=include_sensitive, + ) + subp( + [ + "tar", + "czf", + tarfile, + "-C", + tmp_dir, + str(log_dir).replace(f"{tmp_dir}/", ""), + ] + ) LOG.info("Wrote %s", tarfile) - return 0 def _setup_logger(verbosity: int) -> None: + """Set up the logger for CLI use. + + The verbosity controls which level gets printed to stderr. By default, + DEBUG and TRACE are hidden. + """ log.reset_logging() if verbosity == 0: level = logging.INFO @@ -334,13 +391,56 @@ def _setup_logger(verbosity: int) -> None: LOG.addHandler(handler) -def handle_collect_logs_args(_name: str, args: argparse.Namespace) -> int: +def collect_logs_cli( + tarfile: str, + verbosity: int = 0, + redact_sensitive: bool = True, + include_userdata: bool = False, +) -> None: """Handle calls to 'cloud-init collect-logs' as a subcommand.""" - _setup_logger(args.verbosity) - return collect_logs( - tarfile=args.tarfile, - include_userdata=args.userdata, + _setup_logger(verbosity) + if os.getuid() != 0: + raise RuntimeError("This command must be run as root.") + if include_userdata: + LOG.warning( + "The --include-userdata flag is deprecated and does nothing." + ) + init = Init(ds_deps=[]) + init.read_cfg() + + collect_logs( + tarfile=tarfile, + log_cfg=init.cfg, + run_dir=pathlib.Path(init.paths.run_dir), + cloud_dir=pathlib.Path(init.paths.cloud_dir), + include_sensitive=not redact_sensitive, ) + if not redact_sensitive: + LOG.warning( + "WARNING:\n" + "Sensitive data may have been included in the collected logs.\n" + "Please review the contents of the tarball before sharing or\n" + "rerun with --redact-sensitive to redact sensitive data." + ) + + +def handle_collect_logs_args(_name: str, args: argparse.Namespace) -> int: + """Handle the CLI interface to the module. + + Parse CLI args, redirect all exceptions to stderr, and return an exit code. + """ + args = get_parser().parse_args() + try: + collect_logs_cli( + verbosity=args.verbosity, + tarfile=args.tarfile, + redact_sensitive=args.redact_sensitive, + include_userdata=args.userdata, + ) + return 0 + except Exception as e: + print(e, file=sys.stderr) + return 1 if __name__ == "__main__": diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 854e318e992..b6394ffbd3c 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -12,7 +12,7 @@ from collections import namedtuple from contextlib import suppress from pathlib import Path -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union from cloudinit import ( atomic_helper, @@ -139,7 +139,7 @@ def __init__(self, ds_deps: Optional[List[str]] = None, reporter=None): else: self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] # Created on first use - self._cfg: Dict = {} + self._cfg: Dict[str, Any] = {} self._paths: Optional[helpers.Paths] = None self._distro: Optional[distros.Distro] = None # Changed only when a fetch occurs diff --git a/cloudinit/util.py b/cloudinit/util.py index 31ba1c83574..9c90e35f5ee 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1706,16 +1706,45 @@ def chownbyname(fname, user=None, group=None): chownbyid(fname, uid, gid) -# Always returns well formatted values -# cfg is expected to have an entry 'output' in it, which is a dictionary -# that includes entries for 'init', 'config', 'final' or 'all' -# init: /var/log/cloud.out -# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ] -# final: -# output: "| logger -p" -# error: "> /dev/null" -# this returns the specific 'mode' entry, cleanly formatted, with value -def get_output_cfg(cfg, mode) -> List[Optional[str]]: +def get_output_cfg( + cfg: Dict[str, Any], mode: Optional[str] +) -> List[Optional[str]]: + """Get the output configuration for a given mode. + + The output config is a dictionary that specifies how to deal with stdout + and stderr for the cloud-init modules. It is a (frustratingly) flexible + format that can take multiple forms such as: + output: { all: "| tee -a /var/log/cloud-init-output.log" } + or + output: + init: + output: "> /var/log/cloud-init.out" + error: "> /var/log/cloud-init.err" + config: "tee -a /var/log/cloud-config.log" + final: + - ">> /var/log/cloud-final.out" + - "/var/log/cloud-final.err" + + Mode can be one of the configuration stages. If you pass a + non-existent mode, it will assume the "all" mode configuration if + defined. + + Stderr can be specified as &1 to indicate that it should + be the same as stdout. + + If a file is specified with no redirection, it will default to + appending to the file. + + If not overridden, output is provided in + '/etc/cloud/config/cloud.cfg.d/05_logging.cfg' and defaults to: + {"all": "| tee -a /var/log/cloud-init-output.log"} + + :param cfg: The base configuration that may or may not contain the + 'output' configuration dictionary + :param mode: The mode to get the output configuration for. + :return: A list of two strings (or Nones), the first for stdout for the + specified mode and the second for stderr. + """ ret: List[Optional[str]] = [None, None] if not cfg or "output" not in cfg: return ret @@ -1724,6 +1753,8 @@ def get_output_cfg(cfg, mode) -> List[Optional[str]]: if mode in outcfg: modecfg = outcfg[mode] else: + # TODO: This makes no sense. If they ask for "junk" mode we give + # them back "all" if it exists? if "all" not in outcfg: return ret # if there is a 'all' item in the output list @@ -1741,7 +1772,7 @@ def get_output_cfg(cfg, mode) -> List[Optional[str]]: if len(modecfg) > 1: ret[1] = modecfg[1] - # if it is a dictionary, expect 'out' and 'error' + # if it is a dictionary, expect 'output' and 'error' # items, which indicate out and error if isinstance(modecfg, dict): if "output" in modecfg: @@ -1773,9 +1804,19 @@ def get_output_cfg(cfg, mode) -> List[Optional[str]]: return ret -def get_config_logfiles(cfg): +def get_config_logfiles(cfg: Dict[str, Any]): """Return a list of log file paths from the configuration dictionary. + Obtains the paths from the 'def_log_file' and 'output' configuration + defined in the base configuration. + + If not provided in base configuration, 'def_log_file' is specified in + 'cloudinit/settings.py' and defaults to: + /var/log/cloud-init.log + If not overridden, output is provided in + '/etc/cloud/config/cloud.cfg.d/05_logging.cfg' and defaults to: + {"all": "| tee -a /var/log/cloud-init-output.log"} + @param cfg: The cloud-init merged configuration dictionary. """ logs: List = [] diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index b1d9f585d30..60f54e1a8cb 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -2,8 +2,9 @@ import glob import os -import re -from datetime import datetime +import pathlib +import tarfile +from datetime import datetime, timezone import pytest @@ -11,225 +12,235 @@ from cloudinit.cmd.devel.logs import ApportFile from cloudinit.subp import SubpResult, subp from cloudinit.util import ensure_dir, load_text_file, write_file -from tests.unittests.helpers import mock M_PATH = "cloudinit.cmd.devel.logs." INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json" -@mock.patch("cloudinit.cmd.devel.logs.os.getuid") -class TestCollectLogs: - def test_collect_logs_with_userdata_requires_root_user( - self, m_getuid, tmpdir, caplog - ): - """collect-logs errors when non-root user collects userdata .""" - m_getuid.return_value = 100 # non-root - output_tarfile = tmpdir.join("logs.tgz") - assert 1 == logs.collect_logs(output_tarfile, include_userdata=True) - assert ( - "To include userdata, root user is required." - " Try sudo cloud-init collect-logs" in caplog.text - ) +def fake_subp(cmd): + if cmd[0] == "tar" and cmd[1] == "czf": + subp(cmd) # Pass through tar cmd so we can check output + return SubpResult("", "") - def test_collect_logs_creates_tarfile( - self, m_getuid, m_log_paths, mocker, tmpdir, caplog - ): - """collect-logs creates a tarfile with all related cloud-init info.""" - m_getuid.return_value = 100 - log1 = tmpdir.join("cloud-init.log") - write_file(log1, "cloud-init-log") - log1_rotated = tmpdir.join("cloud-init.log.1.gz") - write_file(log1_rotated, "cloud-init-log-rotated") - log2 = tmpdir.join("cloud-init-output.log") - write_file(log2, "cloud-init-output-log") - log2_rotated = tmpdir.join("cloud-init-output.log.1.gz") - write_file(log2_rotated, "cloud-init-output-log-rotated") - run_dir = m_log_paths.run_dir - write_file(str(run_dir / "results.json"), "results") - write_file( - str(m_log_paths.instance_data_sensitive), - "sensitive", + expected_subp = { + ( + "dpkg-query", + "--show", + "-f=${Version}\n", + "cloud-init", + ): "0.7fake\n", + ("cloud-init", "--version"): "over 9000\n", + } + cmd_tuple = tuple(cmd) + if cmd_tuple not in expected_subp: + raise AssertionError( + "Unexpected command provided to subp: {0}".format(cmd) ) - output_tarfile = str(tmpdir.join("logs.tgz")) - mocker.patch(M_PATH + "Init", autospec=True) - mocker.patch( - M_PATH + "get_config_logfiles", - return_value=[log1, log1_rotated, log2, log2_rotated], - ) + return SubpResult(expected_subp[cmd_tuple], "") - date = datetime.utcnow().date().strftime("%Y-%m-%d") - date_logdir = "cloud-init-logs-{0}".format(date) - version_out = "/usr/bin/cloud-init 18.2fake\n" - expected_subp = { - ( - "dpkg-query", - "--show", - "-f=${Version}\n", - "cloud-init", - ): "0.7fake\n", - ("cloud-init", "--version"): version_out, - ("dmesg",): "dmesg-out\n", - ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n", - ("tar", "czvf", output_tarfile, date_logdir): "", - } - - def fake_subp(cmd): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - "Unexpected command provided to subp: {0}".format(cmd) - ) - if cmd == ["tar", "czvf", output_tarfile, date_logdir]: - subp(cmd) # Pass through tar cmd so we can check output - return SubpResult(expected_subp[cmd_tuple], "") - - # the new _stream_command_output_to_file function uses subprocess.call - # instead of subp, so we need to mock that as well - def fake_subprocess_call(cmd, stdout=None, stderr=None): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - "Unexpected command provided to subprocess: {0}".format( - cmd - ) - ) - stdout.write(expected_subp[cmd_tuple]) - - mocker.patch(M_PATH + "subp", side_effect=fake_subp) - mocker.patch( - M_PATH + "subprocess.call", side_effect=fake_subprocess_call +# the new _stream_command_output_to_file function uses subprocess.call +# instead of subp, so we need to mock that as well +def fake_subprocess_call(cmd, stdout=None, stderr=None): + expected_calls = { + ("dmesg",): "dmesg-out\n", + ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n", + ("journalctl", "--boot=-1", "-o", "short-precise"): "journal-prev\n", + } + cmd_tuple = tuple(cmd) + if cmd_tuple not in expected_calls: + raise AssertionError( + "Unexpected command provided to subprocess: {0}".format(cmd) ) - mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) - mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) - logs.collect_logs(output_tarfile, include_userdata=False) - # unpack the tarfile and check file contents - subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)]) - out_logdir = tmpdir.join(date_logdir) - assert not os.path.exists( - os.path.join( - out_logdir, - "run", - "cloud-init", - INSTANCE_JSON_SENSITIVE_FILE, + stdout.write(expected_calls[cmd_tuple]) + + +def patch_subiquity_paths(mocker, tmp_path): + mocker.patch( + "cloudinit.cmd.devel.logs.INSTALLER_APPORT_FILES", + [ + ApportFile( + str(tmp_path / "subiquity-server-debug.log"), + "subiquityServerDebug", ) - ), ( - "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE - ) - assert "0.7fake\n" == load_text_file( - os.path.join(out_logdir, "dpkg-version") - ) - assert version_out == load_text_file( - os.path.join(out_logdir, "version") - ) - assert "cloud-init-log" == load_text_file( - os.path.join(out_logdir, "cloud-init.log") - ) - assert "cloud-init-log-rotated" == load_text_file( - os.path.join(out_logdir, "cloud-init.log.1.gz") - ) - assert "cloud-init-output-log" == load_text_file( - os.path.join(out_logdir, "cloud-init-output.log") - ) - assert "cloud-init-output-log-rotated" == load_text_file( - os.path.join(out_logdir, "cloud-init-output.log.1.gz") - ) - assert "dmesg-out\n" == load_text_file( - os.path.join(out_logdir, "dmesg.txt") - ) - assert "journal-out\n" == load_text_file( - os.path.join(out_logdir, "journal.txt") + ], + ) + mocker.patch( + "cloudinit.cmd.devel.logs.INSTALLER_APPORT_SENSITIVE_FILES", + [ + ApportFile( + str(tmp_path / "autoinstall-user-data"), "AutoInstallUserData" + ) + ], + ) + + +class TestCollectLogs: + def test_collect_logs_requires_root_user(self, mocker): + """collect-logs errors when non-root user collects userdata .""" + # 100 is non-root + mocker.patch("cloudinit.cmd.devel.logs.os.getuid", retrn_value=100) + # If we don't mock this, we can change logging for future tests + mocker.patch("cloudinit.cmd.devel.logs._setup_logger") + with pytest.raises( + RuntimeError, match="This command must be run as root" + ): + logs.collect_logs_cli("") + + def test_collect_logs_end_to_end(self, mocker, tmp_path): + mocker.patch(f"{M_PATH}subp", side_effect=fake_subp) + mocker.patch( + f"{M_PATH}subprocess.call", side_effect=fake_subprocess_call ) - assert "results" == load_text_file( - os.path.join(out_logdir, "run", "cloud-init", "results.json") + mocker.patch( + f"{M_PATH}_get_etc_cloud", + return_value=[ + tmp_path / "etc/cloud/cloud.cfg", + tmp_path / "etc/cloud/cloud.cfg.d/90-dpkg.cfg", + ], ) - assert f"Wrote {output_tarfile}" in caplog.text + patch_subiquity_paths(mocker, tmp_path) + today = datetime.now(timezone.utc).date().strftime("%Y-%m-%d") - def test_collect_logs_includes_optional_userdata( - self, m_getuid, mocker, tmpdir, m_log_paths, caplog - ): - """collect-logs include userdata when --include-userdata is set.""" - m_getuid.return_value = 0 - log1 = tmpdir.join("cloud-init.log") - write_file(log1, "cloud-init-log") - log2 = tmpdir.join("cloud-init-output.log") - write_file(log2, "cloud-init-output-log") - userdata = m_log_paths.userdata_raw - write_file(str(userdata), "user-data") - run_dir = m_log_paths.run_dir - write_file(str(run_dir / "results.json"), "results") - write_file( - str(m_log_paths.instance_data_sensitive), - "sensitive", + # This list isn't exhaustive + to_collect = [ + "etc/cloud/cloud.cfg", + "etc/cloud/cloud.cfg.d/90-dpkg.cfg", + "var/lib/cloud/instance/instance-id", + "var/lib/cloud/instance/user-data.txt", + "var/lib/cloud/instance/user-data.txt.i", + "var/lib/cloud/handlers/wtf-i-wrote-a-handler.py", + "var/log/cloud-init.log", + "var/log/cloud-init-output.log", + "var/log/cloud-init.log.1.gz", + "var/log/cloud-init-output.log.1.gz", + "run/cloud-init/results.json", + "run/cloud-init/status.json", + "run/cloud-init/instance-data-sensitive.json", + "run/cloud-init/instance-data.json", + "subiquity-server-debug.log", + "autoinstall-user-data", + ] + for to_write in to_collect: + write_file(tmp_path / to_write, pathlib.Path(to_write).name) + + # logs.collect_logs("cloud-init.tar.gz", {}) + logs.collect_logs( + tarfile=tmp_path / "cloud-init.tar.gz", + log_cfg={ + "def_log_file": str(tmp_path / "var/log/cloud-init.log"), + "output": { + "all": f"| tee -a {tmp_path}/var/log/cloud-init-output.log" + }, + }, + run_dir=tmp_path / "run/cloud-init", + cloud_dir=tmp_path / "var/lib/cloud", + include_sensitive=True, ) - output_tarfile = str(tmpdir.join("logs.tgz")) + extract_to = tmp_path / "extracted" + extract_to.mkdir() + with tarfile.open(tmp_path / "cloud-init.tar.gz") as tar: + tar.extractall(extract_to) + extracted_dir = extract_to / f"cloud-init-logs-{today}" + + for name in to_collect: + # Since we've collected absolute paths, that means even though + # our extract contents are within the tmp_path, the files will + # include another layer of tmp_path directories + assert (extracted_dir / str(tmp_path)[1:] / name).exists() - mocker.patch(M_PATH + "Init", autospec=True) + assert (extracted_dir / "journal.txt").read_text() == "journal-out\n" + assert (extracted_dir / "dmesg.txt").read_text() == "dmesg-out\n" + assert (extracted_dir / "dpkg-version").read_text() == "0.7fake\n" + assert (extracted_dir / "version").read_text() == "over 9000\n" + + def test_logs_and_installer_ignore_sensitive_flag(self, mocker, tmp_path): + """Regardless of the sensitive flag, we always want these logs.""" + mocker.patch(f"{M_PATH}subp", side_effect=fake_subp) mocker.patch( - M_PATH + "get_config_logfiles", - return_value=[log1, log2], + f"{M_PATH}subprocess.call", side_effect=fake_subprocess_call ) + mocker.patch(f"{M_PATH}_get_etc_cloud", return_value=[]) + patch_subiquity_paths(mocker, tmp_path) - date = datetime.utcnow().date().strftime("%Y-%m-%d") - date_logdir = "cloud-init-logs-{0}".format(date) + to_collect = [ + "var/log/cloud-init.log", + "var/log/cloud-init-output.log", + "var/log/cloud-init.log.1.gz", + "var/log/cloud-init-output.log.1.gz", + "subiquity-server-debug.log", + ] - version_out = "/usr/bin/cloud-init 18.2fake\n" - expected_subp = { - ( - "dpkg-query", - "--show", - "-f=${Version}\n", - "cloud-init", - ): "0.7fake", - ("cloud-init", "--version"): version_out, - ("dmesg",): "dmesg-out\n", - ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n", - ("tar", "czvf", output_tarfile, date_logdir): "", - } - - def fake_subp(cmd): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - "Unexpected command provided to subp: {0}".format(cmd) - ) - if cmd == ["tar", "czvf", output_tarfile, date_logdir]: - subp(cmd) # Pass through tar cmd so we can check output - return SubpResult(expected_subp[cmd_tuple], "") - - def fake_subprocess_call(cmd, stdout=None, stderr=None): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - "Unexpected command provided to subprocess: {0}".format( - cmd - ) - ) - stdout.write(expected_subp[cmd_tuple]) - - mocker.patch(M_PATH + "subp", side_effect=fake_subp) - mocker.patch( - M_PATH + "subprocess.call", side_effect=fake_subprocess_call + for to_write in to_collect: + write_file( + tmp_path / to_write, pathlib.Path(to_write).name, mode=0x700 + ) + + collect_dir = tmp_path / "collect" + collect_dir.mkdir() + logs._collect_logs_into_tmp_dir( + log_dir=collect_dir, + log_cfg={ + "def_log_file": str(tmp_path / "var/log/cloud-init.log"), + "output": { + "all": f"| tee -a {tmp_path}/var/log/cloud-init-output.log" + }, + }, + run_dir=collect_dir, + cloud_dir=collect_dir, + include_sensitive=False, ) - mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", []) - mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", []) - logs.collect_logs(output_tarfile, include_userdata=True) - # unpack the tarfile and check file contents - subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)]) - out_logdir = tmpdir.join(date_logdir) - assert "user-data" == load_text_file( - os.path.join(out_logdir, userdata.name) + + for name in to_collect: + assert (collect_dir / str(tmp_path)[1:] / name).exists() + + def test_root_read_only_not_collected_on_redact(self, mocker, tmp_path): + """Don't collect root read-only files.""" + mocker.patch(f"{M_PATH}subp", side_effect=fake_subp) + mocker.patch( + f"{M_PATH}subprocess.call", side_effect=fake_subprocess_call ) - assert "sensitive" == load_text_file( - os.path.join( - out_logdir, - "run", - "cloud-init", - m_log_paths.instance_data_sensitive.name, + mocker.patch(f"{M_PATH}_get_etc_cloud", return_value=[]) + patch_subiquity_paths(mocker, tmp_path) + + to_collect = [ + "etc/cloud/cloud.cfg", + "etc/cloud/cloud.cfg.d/90-dpkg.cfg", + "var/lib/cloud/instance/instance-id", + "var/lib/cloud/instance/user-data.txt", + "var/lib/cloud/instance/user-data.txt.i", + "var/lib/cloud/handlers/wtf-i-wrote-a-handler.py", + "run/cloud-init/results.json", + "run/cloud-init/status.json", + "run/cloud-init/instance-data-sensitive.json", + "run/cloud-init/instance-data.json", + "autoinstall-user-data", + ] + + for to_write in to_collect: + write_file( + tmp_path / to_write, pathlib.Path(to_write).name, mode=0x700 ) + + collect_dir = tmp_path / "collect" + collect_dir.mkdir() + logs._collect_logs_into_tmp_dir( + log_dir=collect_dir, + log_cfg={ + "def_log_file": str(tmp_path / "var/log/cloud-init.log"), + "output": { + "all": f"| tee -a {tmp_path}/var/log/cloud-init-output.log" + }, + }, + run_dir=collect_dir, + cloud_dir=collect_dir, + include_sensitive=False, ) - assert f"Wrote {output_tarfile}" in caplog.text + + for name in to_collect: + assert not (collect_dir / str(tmp_path)[1:] / name).exists() + assert not (collect_dir / "dmsg.txt").exists() @pytest.mark.parametrize( "cmd, expected_file_contents, expected_return_value", @@ -255,13 +266,11 @@ def fake_subprocess_call(cmd, stdout=None, stderr=None): ) def test_write_command_output_to_file( self, - m_getuid, tmp_path, cmd, expected_file_contents, expected_return_value, ): - m_getuid.return_value = 100 output_file = tmp_path / "test-output-file.txt" return_output = logs._write_command_output_to_file( @@ -281,9 +290,8 @@ def test_write_command_output_to_file( ], ) def test_stream_command_output_to_file( - self, m_getuid, tmp_path, cmd, expected_file_contents + self, tmp_path, cmd, expected_file_contents ): - m_getuid.return_value = 100 output_file = tmp_path / "test-output-file.txt" logs._stream_command_output_to_file( @@ -297,7 +305,7 @@ def test_stream_command_output_to_file( class TestCollectInstallerLogs: @pytest.mark.parametrize( - "include_userdata, apport_files, apport_sensitive_files", + "include_sensitive, apport_files, apport_sensitive_files", ( pytest.param(True, [], [], id="no_files_include_userdata"), pytest.param(False, [], [], id="no_files_exclude_userdata"), @@ -323,7 +331,7 @@ class TestCollectInstallerLogs: ) def test_include_installer_logs_when_present( self, - include_userdata, + include_sensitive, apport_files, apport_sensitive_files, tmpdir, @@ -357,7 +365,7 @@ def test_include_installer_logs_when_present( apport_sensitive_files[-1].path, apport_sensitive_files[-1].label, ) - if include_userdata: + if include_sensitive: expected_files += [ destination_dir.join( os.path.basename(apport_sensitive_files[-1].path) @@ -369,9 +377,9 @@ def test_include_installer_logs_when_present( ) logs._collect_installer_logs( log_dir=tmpdir.strpath, - include_userdata=include_userdata, + include_sensitive=include_sensitive, ) - expect_userdata = bool(include_userdata and apport_sensitive_files) + expect_userdata = bool(include_sensitive and apport_sensitive_files) # when subiquity artifacts exist, and userdata set true, expect logs expect_subiquity_logs = any([apport_files, expect_userdata]) if expect_subiquity_logs: @@ -381,12 +389,3 @@ def test_include_installer_logs_when_present( ) else: assert not destination_dir.exists(), "Unexpected subiquity dir" - - -class TestParser: - def test_parser_help_has_userdata_file(self, m_log_paths, mocker, tmpdir): - # userdata = str(tmpdir.join("user-data.txt")) - userdata = m_log_paths.userdata_raw - assert str(userdata) in re.sub( - r"\s+", "", logs.get_parser().format_help() - ) diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 375e1d14840..60f2b675358 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -1,7 +1,6 @@ import builtins import glob import os -import pathlib import shutil from pathlib import Path from unittest import mock @@ -9,7 +8,6 @@ import pytest from cloudinit import atomic_helper, lifecycle, log, util -from cloudinit.cmd.devel import logs from cloudinit.gpg import GPG from tests.hypothesis import HAS_HYPOTHESIS from tests.unittests.helpers import example_netdev, retarget_many_wrapper @@ -169,19 +167,3 @@ def tmp_path(tmpdir): settings.register_profile("ci", max_examples=1000) settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default")) - - -@pytest.fixture -def m_log_paths(mocker, tmp_path): - """Define logs.LogPaths for testing and mock get_log_paths with it.""" - paths = logs.LogPaths( - userdata_raw=tmp_path / "userdata_raw", - cloud_data=tmp_path / "cloud_data", - run_dir=tmp_path / "run_dir", - instance_data_sensitive=tmp_path - / "run_dir" - / "instance_data_sensitive", - ) - pathlib.Path(paths.run_dir).mkdir() - mocker.patch.object(logs, "get_log_paths", return_value=paths) - yield paths diff --git a/tests/unittests/test_apport.py b/tests/unittests/test_apport.py index 88277c1daa9..0bf6d065190 100644 --- a/tests/unittests/test_apport.py +++ b/tests/unittests/test_apport.py @@ -1,4 +1,3 @@ -import os import sys from importlib import reload @@ -10,77 +9,59 @@ M_PATH = "cloudinit.apport." +@pytest.fixture +def m_hookutils(): + m_hookutils = mock.Mock() + with mock.patch.dict(sys.modules, {"apport.hookutils": m_hookutils}): + reload(sys.modules["cloudinit.apport"]) + yield m_hookutils + reload(sys.modules["cloudinit.apport"]) + + class TestApport: - @pytest.mark.parametrize( - "instance_data,choice_idx,expected_report", - ( - pytest.param( - '{"v1": {"cloud_name": "mycloud"}}', - None, - {}, - id="v1_cloud_name_exists", - ), - pytest.param( - '{"v1": {"cloud_id": "invalid"}}', - 1, - {"CloudName": "Azure"}, - id="v1_no_cloud_name_present", - ), - pytest.param("{}", 0, {"CloudName": "AliYun"}, id="no_v1_key"), - pytest.param( - "{", 22, {"CloudName": "Oracle"}, id="not_valid_json" - ), - ), - ) - def test_attach_cloud_info( - self, instance_data, choice_idx, expected_report, mocker, paths - ): - """Prompt for cloud name when instance-data.json is not-json/absent.""" - mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) - instance_data_file = paths.get_runpath("instance_data") - if instance_data is None: - assert not os.path.exists(instance_data_file) - else: - with open(instance_data_file, "w") as stream: - stream.write(instance_data) + def test_can_attach_sensitive(self): ui = mock.Mock() - ui.yesno.return_value = True - ui.choice.return_value = (choice_idx, "") - report = {} - apport.attach_cloud_info(report, ui) - if choice_idx is not None: - assert ui.choice.call_count == 1 - assert report["CloudName"] == apport.KNOWN_CLOUD_NAMES[choice_idx] - else: - assert ui.choice.call_count == 0 - def test_attach_user_data(self, mocker, paths): - user_data_file = paths.get_ipath_cur("userdata_raw") - ui = mock.Mock() ui.yesno.return_value = True - report = object() - m_hookutils = mock.Mock() + assert apport.can_attach_sensitive(object(), ui) is True - with mock.patch.dict(sys.modules, {"apport.hookutils": m_hookutils}): - reload(sys.modules["cloudinit.apport"]) - mocker.patch(M_PATH + "read_cfg_paths", return_value=paths) - apport.attach_user_data(report, ui) - assert [ - mock.call(report, user_data_file, "user_data.txt"), - ] == apport.attach_file.call_args_list - assert [ - mock.call( - report, - "/var/log/installer/autoinstall-user-data", - "AutoInstallUserData", - ), - mock.call(report, "/autoinstall.yaml", "AutoInstallYAML"), - mock.call( - report, - "/etc/cloud/cloud.cfg.d/99-installer.cfg", - "InstallerCloudCfg", - ), - ] == apport.attach_file_if_exists.call_args_list + ui.yesno.return_value = False + assert apport.can_attach_sensitive(object(), ui) is False + + ui.yesno.return_value = None + + with pytest.raises(StopIteration): + apport.can_attach_sensitive(object(), ui) + + @pytest.mark.parametrize("include_sensitive", (True, False)) + def test_attach_cloud_init_logs( + self, include_sensitive, mocker, m_hookutils + ): + mocker.patch(f"{M_PATH}attach_root_command_outputs") + mocker.patch(f"{M_PATH}attach_file") + m_root_command = mocker.patch(f"{M_PATH}root_command_output") + apport.attach_cloud_init_logs( + object(), include_sensitive=include_sensitive + ) + if include_sensitive: + m_root_command.assert_called_once_with( + [ + "cloud-init", + "collect-logs", + "-t", + "/tmp/cloud-init-logs.tgz", + ] + ) + else: + m_root_command.assert_called_once_with( + [ + "cloud-init", + "collect-logs", + "-t", + "/tmp/cloud-init-logs.tgz", + "--redact", + ] + ) @pytest.mark.parametrize( "report,tags", @@ -104,12 +85,9 @@ def test_add_bug_tags_assigns_proper_tags(self, report, tags): assert report.get("Tags", "") == tags @mock.patch(M_PATH + "os.path.exists", return_value=True) - def test_attach_ubuntu_pro_info(self, m_exists): - m_hookutils = mock.Mock() - with mock.patch.dict(sys.modules, {"apport.hookutils": m_hookutils}): - reload(sys.modules["cloudinit.apport"]) - report = {} - apport.attach_ubuntu_pro_info(report) + def test_attach_ubuntu_pro_info(self, m_exists, m_hookutils): + report = {} + apport.attach_ubuntu_pro_info(report) assert [ mock.call(report, "/var/log/ubuntu-advantage.log"), @@ -117,12 +95,11 @@ def test_attach_ubuntu_pro_info(self, m_exists): assert report.get("Tags", "") == "ubuntu-pro" @mock.patch(M_PATH + "os.path.exists", return_value=False) - def test_attach_ubuntu_pro_info_log_non_present(self, m_exists): - m_hookutils = mock.Mock() - with mock.patch.dict(sys.modules, {"apport.hookutils": m_hookutils}): - reload(sys.modules["cloudinit.apport"]) - report = {} - apport.attach_ubuntu_pro_info(report) + def test_attach_ubuntu_pro_info_log_non_present( + self, m_exists, m_hookutils + ): + report = {} + apport.attach_ubuntu_pro_info(report) assert [ mock.call(report, "/var/log/ubuntu-advantage.log"), diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index a7c3b1ba38b..58f003eb70f 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -249,7 +249,6 @@ def test_conditional_subcommands_from_entry_point_sys_argv( self, subcommand, capsys, - m_log_paths, mock_status_wrapper, ): """Subcommands from entry-point are properly parsed from sys.argv.""" @@ -271,9 +270,7 @@ def test_conditional_subcommands_from_entry_point_sys_argv( "status", ], ) - def test_subcommand_parser( - self, subcommand, m_log_paths, mock_status_wrapper - ): + def test_subcommand_parser(self, subcommand, mock_status_wrapper): """cloud-init `subcommand` calls its subparser.""" # Provide -h param to `subcommand` to avoid having to mock behavior. out = io.StringIO() From 56aa7063602e288eb5a7692446b9cc435ee6332b Mon Sep 17 00:00:00 2001 From: Carlos Nihelton Date: Fri, 16 Aug 2024 00:42:05 -0300 Subject: [PATCH 102/131] fix(wsl): Properly assemble multipart data (#5538) In the case of Pro, if either agent or user data is not cloud-config user-data, combine the parse in `self.userdata_raw` as a #include file so cloud-init transforms that internally into a multipart data. Avoid passing strings and lists directly, which confused the processor due the lack of a mime type. Being explicit about only loading text/cloud-config parts also allow other composition of cloud-init features to just work, like jinja templates. This error was surfaced when testing with empty Landscape data, but any non-text/cloud-config content type would trigger the same behavior. Add merge_agent_landscape_data to process agent.yaml or Landscape data and ignore any empty files present in .ubuntupro/.cloud-init/ --- cloudinit/sources/DataSourceWSL.py | 188 +++++++++++++++-------- tests/unittests/sources/test_wsl.py | 230 ++++++++++++++++++++++++++-- 2 files changed, 344 insertions(+), 74 deletions(-) diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index 7a75ff4e691..ddb31411681 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -9,12 +9,13 @@ import os import typing from pathlib import PurePath -from typing import Any, List, Optional, Tuple, Union, cast +from typing import List, Optional, Tuple import yaml from cloudinit import sources, subp, util from cloudinit.distros import Distro +from cloudinit.handlers import type_from_starts_with from cloudinit.helpers import Paths LOG = logging.getLogger(__name__) @@ -143,22 +144,21 @@ def candidate_user_data_file_names(instance_name) -> List[str]: ] -def load_yaml_or_bin(data_path: str) -> Optional[Union[dict, bytes]]: - """ - Tries to load a YAML file as a dict, otherwise returns the file's raw - binary contents as `bytes`. Returns `None` if no file is found. - """ - try: - bin_data = util.load_binary_file(data_path) - dict_data = util.load_yaml(bin_data) - if dict_data is None: - return bin_data +class ConfigData: + """Models a piece of configuration data as a dict if possible, while + retaining its raw representation alongside its file path""" - return dict_data - except FileNotFoundError: - LOG.debug("No data found at %s, ignoring.", data_path) + def __init__(self, path: PurePath): + self.raw: str = util.load_text_file(path) + self.path: PurePath = path + + self.config_dict: Optional[dict] = None - return None + if "text/cloud-config" == type_from_starts_with(self.raw): + self.config_dict = util.load_yaml(self.raw) + + def is_cloud_config(self) -> bool: + return self.config_dict is not None def load_instance_metadata( @@ -176,7 +176,7 @@ def load_instance_metadata( ) try: - metadata = util.load_yaml(util.load_binary_file(metadata_path)) + metadata = util.load_yaml(util.load_text_file(metadata_path)) except FileNotFoundError: LOG.debug( "No instance metadata found at %s. Using default instance-id.", @@ -196,7 +196,7 @@ def load_instance_metadata( def load_ubuntu_pro_data( user_home: PurePath, -) -> Tuple[Union[dict, bytes, None], Union[dict, bytes, None]]: +) -> Tuple[Optional[ConfigData], Optional[ConfigData]]: """ Read .ubuntupro user-data if present and return a tuple of agent and landscape user-data. @@ -205,13 +205,112 @@ def load_ubuntu_pro_data( if not os.path.isdir(pro_dir): return None, None - landscape_data = load_yaml_or_bin( + landscape_path = PurePath( os.path.join(pro_dir, LANDSCAPE_DATA_FILE % instance_name()) ) - agent_data = load_yaml_or_bin(os.path.join(pro_dir, AGENT_DATA_FILE)) + landscape_data = None + if os.path.isfile(landscape_path): + LOG.debug( + "Landscape configuration found: %s. Organization policy " + "ignores any local user-data in %s.", + landscape_path, + cloud_init_data_dir(user_home), + ) + landscape_data = ConfigData(landscape_path) + + agent_path = PurePath(os.path.join(pro_dir, AGENT_DATA_FILE)) + agent_data = None + if os.path.isfile(agent_path): + agent_data = ConfigData(agent_path) + return agent_data, landscape_data +def merge_agent_landscape_data( + agent_data: Optional[ConfigData], user_data: Optional[ConfigData] +) -> Optional[str]: + """Merge agent.yaml data provided by Ubuntu Pro for WSL + and user data provided either by Landscape or the local user, + according to the UP4W specific rules. + + When merging is not possible, provide #include directive to allow + cloud-init to merge separate parts. + """ + # Ignore agent_data if None or empty + if agent_data is None or len(agent_data.raw) == 0: + if user_data is None or len(user_data.raw) == 0: + return None + return user_data.raw + + # Ignore user_data if None or empty + if user_data is None or len(user_data.raw) == 0: + if agent_data is None or len(agent_data.raw) == 0: + return None + return agent_data.raw + + # If both are found but we cannot reliably model both data files as + # cloud-config dicts, then we cannot merge them ourselves, so we should + # pass the data as if the user had written an include file + # for cloud-init to handle internally. We explicitely prioritize the + # agent data, to ensure cloud-init would handle it even in the presence + # of syntax errors in user data (agent data is autogenerated). + # It's possible that the effects caused by the user data would override + # the agent data, but that's the user's ultimately responsibility. + # The alternative of writing the user data first would make it possible + # for the agent data to be skipped in the presence of syntax errors in + # user data. + + if not all([agent_data.is_cloud_config(), user_data.is_cloud_config()]): + LOG.debug( + "Unable to merge {agent_data.path} and {user_data.path}. " + "Providing as separate user-data #include." + ) + return "#include\n%s\n%s\n" % ( + agent_data.path.as_posix(), + user_data.path.as_posix(), + ) + + # We only care about overriding top-level config keys entirely, so we + # can just iterate over the top level keys and write over them if the + # agent provides them instead. + # That's the reason for not using util.mergemanydict(). + merged: dict = {} + user_tags: str = "" + overridden_keys: typing.List[str] = [] + if isinstance(user_data.config_dict, dict): + merged = user_data.config_dict + user_tags = ( + merged.get("landscape", {}).get("client", {}).get("tags", "") + ) + if isinstance(agent_data.config_dict, dict): + if user_data: + LOG.debug("Merging both user_data and agent.yaml configs.") + agent = agent_data.config_dict + for key in agent: + if key in merged: + overridden_keys.append(key) + merged[key] = agent[key] + if overridden_keys: + LOG.debug( + ( + " agent.yaml overrides config keys: " + ", ".join(overridden_keys) + ) + ) + if user_tags and merged.get("landscape", {}).get("client"): + LOG.debug( + "Landscape client conf updated with user-data" + " landscape.client.tags: %s", + user_tags, + ) + merged["landscape"]["client"]["tags"] = user_tags + + return ( + "#cloud-config\n# WSL datasouce Merged agent.yaml and user_data\n%s" + % yaml.dump(merged).strip() + ) + + class DataSourceWSL(sources.DataSource): dsname = "WSL" @@ -284,8 +383,8 @@ def _get_data(self) -> bool: return False seed_dir = cloud_init_data_dir(user_home) - agent_data = None - user_data: Optional[Union[dict, bytes]] = None + agent_data: Optional[ConfigData] = None + user_data: Optional[ConfigData] = None # Load any metadata try: @@ -303,8 +402,8 @@ def _get_data(self) -> bool: # Load regular user configs try: if user_data is None and seed_dir is not None: - file = self.find_user_data_file(seed_dir) - user_data = load_yaml_or_bin(file.as_posix()) + user_data = ConfigData(self.find_user_data_file(seed_dir)) + except (ValueError, IOError) as err: LOG.error( "Unable to load any user-data file in %s: %s", @@ -316,48 +415,7 @@ def _get_data(self) -> bool: if not any([user_data, agent_data]): return False - # If we cannot reliably model data files as dicts, then we cannot merge - # ourselves, so we can pass the data in ascending order as a list for - # cloud-init to handle internally - if isinstance(agent_data, bytes) or isinstance(user_data, bytes): - self.userdata_raw = cast(Any, [user_data, agent_data]) - return True - - # We only care about overriding modules entirely, so we can just - # iterate over the top level keys and write over them if the agent - # provides them instead. - # That's the reason for not using util.mergemanydict(). - merged: dict = {} - user_tags: str = "" - overridden_keys: typing.List[str] = [] - if user_data: - merged = user_data - user_tags = ( - merged.get("landscape", {}).get("client", {}).get("tags", "") - ) - if agent_data: - if user_data: - LOG.debug("Merging both user_data and agent.yaml configs.") - for key in agent_data: - if key in merged: - overridden_keys.append(key) - merged[key] = agent_data[key] - if overridden_keys: - LOG.debug( - ( - " agent.yaml overrides config keys: " - ", ".join(overridden_keys) - ) - ) - if user_tags and merged.get("landscape", {}).get("client"): - LOG.debug( - "Landscape client conf updated with user-data" - " landscape.client.tags: %s", - user_tags, - ) - merged["landscape"]["client"]["tags"] = user_tags - - self.userdata_raw = "#cloud-config\n%s" % yaml.dump(merged) + self.userdata_raw = merge_agent_landscape_data(agent_data, user_data) return True diff --git a/tests/unittests/sources/test_wsl.py b/tests/unittests/sources/test_wsl.py index 2f26d7fd565..1ba374e468f 100644 --- a/tests/unittests/sources/test_wsl.py +++ b/tests/unittests/sources/test_wsl.py @@ -53,6 +53,29 @@ SAMPLE_LINUX_DISTRO = ("ubuntu", "24.04", "noble") SAMPLE_LINUX_DISTRO_NO_VERSION_ID = ("debian", "", "trixie") +AGENT_SAMPLE = """\ +#cloud-config +landscape: + host: + url: landscape.canonical.com:6554 + client: + account_name: agenttest + url: https://landscape.canonical.com/message-system + ping_url: https://landscape.canonical.com/ping + tags: wsl +ubuntu_pro: + token: testtoken +""" + +LANDSCAPE_SAMPLE = """\ +#cloud-config +landscape: + client: + account_name: landscapetest + tags: tag_aiml,tag_dev +locale: en_GB.UTF-8 +""" + class TestWSLHelperFunctions: @mock.patch("cloudinit.util.subp.subp") @@ -246,11 +269,63 @@ def join_payloads_from_content_type( content = "" for p in part.walk(): if p.get_content_type() == content_type: - content = content + str(p.get_payload(decode=True)) + content = content + str(p.get_payload()) return content +class TestMergeAgentLandscapeData: + @pytest.mark.parametrize( + "agent_yaml,landscape_user_data,expected", + ( + pytest.param( + None, None, None, id="none_when_both_agent_and_ud_none" + ), + pytest.param( + None, "", None, id="none_when_agent_none_and_ud_empty" + ), + pytest.param( + "", None, None, id="none_when_agent_empty_and_ud_none" + ), + pytest.param("", "", None, id="none_when_both_agent_and_ud_empty"), + pytest.param( + AGENT_SAMPLE, "", AGENT_SAMPLE, id="agent_only_when_ud_empty" + ), + pytest.param( + "", + LANDSCAPE_SAMPLE, + LANDSCAPE_SAMPLE, + id="ud_only_when_agent_empty", + ), + pytest.param( + "#cloud-config\nlandscape:\n client: {account_name: agent}\n", + LANDSCAPE_SAMPLE, + "#cloud-config\n# WSL datasouce Merged agent.yaml and " + "user_data\n" + + "\n".join(LANDSCAPE_SAMPLE.splitlines()[1:]).replace( + "landscapetest", "agent" + ), + id="merge_agent_and_landscape_ud_when_both_present", + ), + ), + ) + def test_merged_data_excludes_empty_or_none( + self, agent_yaml, landscape_user_data, expected, tmpdir + ): + agent_data = user_data = None + if agent_yaml is not None: + agent_path = tmpdir.join("agent.yaml") + agent_path.write(agent_yaml) + agent_data = wsl.ConfigData(agent_path) + if landscape_user_data is not None: + landscape_ud_path = tmpdir.join("instance_name.user_data") + landscape_ud_path.write(landscape_user_data) + user_data = wsl.ConfigData(landscape_ud_path) + assert expected == wsl.merge_agent_landscape_data( + agent_data, user_data + ) + + class TestWSLDataSource: @pytest.fixture(autouse=True) def setup(self, mocker, tmpdir): @@ -353,6 +428,38 @@ def test_get_data_sh(self, m_lsb_release, tmpdir, paths): ) assert COMMAND in userdata + @mock.patch("cloudinit.util.lsb_release") + def test_get_data_jinja(self, m_lsb_release, paths, tmpdir): + """Assert we don't mistakenly treat jinja as final cloud-config""" + m_lsb_release.return_value = SAMPLE_LINUX_DISTRO + data_path = tmpdir.join(".cloud-init", f"{INSTANCE_NAME}.user-data") + data_path.dirpath().mkdir() + data_path.write( + """## template: jinja +#cloud-config +write_files: +- path: /etc/{{ v1.instance_name }}.conf +""" + ) + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is True + ud = ds.get_userdata(True) + print(ud) + + assert ud is not None + assert "write_files" in join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/jinja2" + ), "Jinja should not be treated as final cloud-config" + assert "write_files" not in join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), "No cloud-config part should exist" + @mock.patch("cloudinit.util.get_linux_distro") def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): """Validates the precedence of user-data files.""" @@ -473,14 +580,7 @@ def test_landscape_vs_local_user(self, m_get_linux_dist, tmpdir, paths): ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") os.makedirs(ubuntu_pro_tmp, exist_ok=True) landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) - landscape_file.write( - """#cloud-config -landscape: - client: - account_name: landscapetest - tags: tag_aiml,tag_dev -locale: en_GB.UTF-8""" - ) + landscape_file.write(LANDSCAPE_SAMPLE) # Run the datasource ds = wsl.DataSourceWSL( @@ -574,6 +674,118 @@ def test_landscape_provided_data(self, m_get_linux_dist, tmpdir, paths): ), "User-data should override agent data's Landscape computer tags" assert "wsl" not in userdata + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_empty_data(self, m_get_linux_dist, tmpdir, paths): + """Asserts that Pro for WSL data is present when Landscape is empty""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: hosted.com:6554 + client: + account_name: agent_test + url: https://hosted.com/message-system + ping_url: https://hosted.com/ping + ssl_public_key: C:\\Users\\User\\server.pem + tags: wsl +ubuntu_pro: + token: agent_token""" + ) + + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write("") + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + # Assert Landscape and Agent combine, with Agent taking precedence + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "agent_test" in userdata and "agent_token" in userdata + ), "Agent data should be present" + + @mock.patch("cloudinit.util.get_linux_distro") + def test_landscape_shell_script(self, m_get_linux_dist, tmpdir, paths): + """Asserts that Pro for WSL and Landscape goes multipart""" + + m_get_linux_dist.return_value = SAMPLE_LINUX_DISTRO + + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + + agent_file = ubuntu_pro_tmp.join("agent.yaml") + agent_file.write( + """#cloud-config +landscape: + host: + url: hosted.com:6554 + client: + account_name: agent_test + url: https://hosted.com/message-system + ping_url: https://hosted.com/ping + ssl_public_key: C:\\Users\\User\\server.pem + tags: wsl +ubuntu_pro: + token: agent_token""" + ) + + COMMAND = "echo Hello cloud-init on WSL!" + landscape_file = ubuntu_pro_tmp.join("%s.user-data" % INSTANCE_NAME) + landscape_file.write(f"#!/bin/sh\n{COMMAND}\n") + + # Run the datasource + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + # Assert Landscape and Agent combine, with Agent taking precedence + assert ds.get_data() is True + ud = ds.get_userdata() + + assert ud is not None + userdata = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/cloud-config" + ), + ) + + assert ( + "agent_test" in userdata and "agent_token" in userdata + ), "Agent data should be present" + + shell_script = cast( + str, + join_payloads_from_content_type( + cast(MIMEMultipart, ud), "text/x-shellscript" + ), + ) + + assert COMMAND in shell_script + @mock.patch("cloudinit.util.get_linux_distro") def test_with_landscape_no_tags(self, m_get_linux_dist, tmpdir, paths): """Validates the Pro For WSL default Landscape tags are applied""" From 0a8bf72b9090dece9ef74f662675ccfc329fec38 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 25 Jul 2024 11:45:54 -0600 Subject: [PATCH 103/131] doc(schema): schema descriptions should end with trailing stop (#5562) Also emphasize ''users''. --- .../schemas/schema-cloud-config-v1.json | 326 +++++++++--------- 1 file changed, 163 insertions(+), 163 deletions(-) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 4ae8b4a8f70..76e5e09bd47 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -131,7 +131,7 @@ "properties": { "disable_auto_attach": { "type": "boolean", - "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``", + "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``.", "default": false } } @@ -163,7 +163,7 @@ "null" ], "format": "uri", - "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``." }, "global_apt_https_proxy": { "type": [ @@ -171,7 +171,7 @@ "null" ], "format": "uri", - "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``." }, "ua_apt_http_proxy": { "type": [ @@ -179,7 +179,7 @@ "null" ], "format": "uri", - "description": "HTTP Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + "description": "HTTP Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``." }, "ua_apt_https_proxy": { "type": [ @@ -187,7 +187,7 @@ "null" ], "format": "uri", - "description": "HTTPS Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``" + "description": "HTTPS Proxy URL used only for Ubuntu Pro APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``." } } } @@ -198,7 +198,7 @@ "patternProperties": { "^.+$": { "label": "", - "description": "Optional string of single username or a list of usernames to add to the group", + "description": "Optional string of single username or a list of usernames to add to the group.", "type": [ "string", "array" @@ -239,12 +239,12 @@ }, "expiredate": { "default": null, - "description": "Optional. Date on which the user's account will be disabled. Default: ``null``", + "description": "Optional. Date on which the user's account will be disabled. Default: ``null``.", "type": "string", "format": "date" }, "gecos": { - "description": "Optional comment about the user, usually a comma-separated string of real name and contact information", + "description": "Optional comment about the user, usually a comma-separated string of real name and contact information.", "type": "string" }, "groups": { @@ -283,12 +283,12 @@ ] }, "homedir": { - "description": "Optional home dir for user. Default: ``/home/``", + "description": "Optional home dir for user. Default: ``/home/``.", "default": "``/home/``", "type": "string" }, "inactive": { - "description": "Optional string representing the number of days until the user is disabled. ", + "description": "Optional string representing the number of days until the user is disabled.", "type": "string" }, "lock-passwd": { @@ -299,7 +299,7 @@ }, "lock_passwd": { "default": true, - "description": "Disable password login. Default: ``true``", + "description": "Disable password login. Default: ``true``.", "type": "boolean" }, "no-create-home": { @@ -310,7 +310,7 @@ }, "no_create_home": { "default": false, - "description": "Do not create home directory. Default: ``false``", + "description": "Do not create home directory. Default: ``false``.", "type": "boolean" }, "no-log-init": { @@ -321,7 +321,7 @@ }, "no_log_init": { "default": false, - "description": "Do not initialize lastlog and faillog for user. Default: ``false``", + "description": "Do not initialize lastlog and faillog for user. Default: ``false``.", "type": "boolean" }, "no-user-group": { @@ -332,7 +332,7 @@ }, "no_user_group": { "default": false, - "description": "Do not create group named after user. Default: ``false``", + "description": "Do not create group named after user. Default: ``false``.", "type": "boolean" }, "passwd": { @@ -378,7 +378,7 @@ }, "primary_group": { "default": "````", - "description": "Primary group for user. Default: ````", + "description": "Primary group for user. Default: ````.", "type": "string" }, "selinux-user": { @@ -400,7 +400,7 @@ "type": "string" }, "ssh_authorized_keys": { - "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``", + "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``.", "type": "array", "items": { "type": "string" @@ -428,7 +428,7 @@ "deprecated_description": "Use ``ssh_import_id`` instead." }, "ssh_import_id": { - "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html", + "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html.", "type": "array", "items": { "type": "string" @@ -478,7 +478,7 @@ ] }, "uid": { - "description": "The user's ID. Default value [system default]", + "description": "The user's ID. Default value [system default].", "oneOf": [ { "type": "integer" @@ -548,7 +548,7 @@ "deprecated_description": "Use ``remove_defaults`` instead." }, "remove_defaults": { - "description": "Remove default CA certificates if true. Default: ``false``", + "description": "Remove default CA certificates if true. Default: ``false``.", "type": "boolean", "default": false }, @@ -670,7 +670,7 @@ "type": "object", "properties": { "autoinstall": { - "description": "Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference", + "description": "Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference.", "type": "object", "properties": { "version": { @@ -713,7 +713,7 @@ "distro", "pip" ], - "description": "The type of installation for ansible. It can be one of the following values:\n- ``distro``\n- ``pip``" + "description": "The type of installation for ansible. It can be one of the following values:\n- ``distro``\n- ``pip``." }, "run_user": { "type": "string", @@ -973,7 +973,7 @@ "base_url": { "type": "string", "default": "https://alpine.global.ssl.fastly.net/alpine", - "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``" + "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``." }, "community_enabled": { "type": "boolean", @@ -987,7 +987,7 @@ }, "version": { "type": "string", - "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)" + "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)." } }, "required": [ @@ -997,7 +997,7 @@ }, "local_repo_base_url": { "type": "string", - "description": "The base URL of an Alpine repository containing unofficial packages" + "description": "The base URL of an Alpine repository containing unofficial packages." } } } @@ -1026,16 +1026,16 @@ }, "primary": { "$ref": "#/$defs/apt_configure.mirror", - "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n- fqdn of this host per cloud metadata,\n- localdomain,\n- domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n- ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n- ``security`` => ``http://security.ubuntu.com/ubuntu``" + "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n- fqdn of this host per cloud metadata,\n- localdomain,\n- domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n- ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n- ``security`` => ``http://security.ubuntu.com/ubuntu``." }, "security": { "$ref": "#/$defs/apt_configure.mirror", - "description": "Please refer to the primary config documentation" + "description": "Please refer to the primary config documentation." }, "add_apt_repo_match": { "type": "string", "default": "^[\\w-]+:\\w", - "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``" + "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``." }, "debconf_selections": { "type": "object", @@ -1046,7 +1046,7 @@ "type": "string" } }, - "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n- ``pkgname`` is the name of the package.\n- ``question`` the name of the questions.\n- ``type`` is the type of question.\n- ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``" + "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n- ``pkgname`` is the name of the package.\n- ``question`` the name of the questions.\n- ``type`` is the type of question.\n- ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``." }, "sources_list": { "type": "string", @@ -1228,12 +1228,12 @@ "validation_key": { "type": "string", "default": "/etc/chef/validation.pem", - "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``" + "description": "Optional path for validation_cert. Default: ``/etc/chef/validation.pem``." }, "firstboot_path": { "type": "string", "default": "/etc/chef/firstboot.json", - "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``" + "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration. Default: ``/etc/chef/firstboot.json``." }, "exec": { "type": "boolean", @@ -1305,12 +1305,12 @@ }, "server_url": { "type": "string", - "description": "The URL for the chef server" + "description": "The URL for the chef server." }, "show_time": { "type": "boolean", "default": true, - "description": "Show time in chef logs" + "description": "Show time in chef logs." }, "ssl_verify_mode": { "type": "string", @@ -1352,7 +1352,7 @@ }, "chef_license": { "type": "string", - "description": "string that indicates if user accepts or not license related to some of chef products. See https://docs.chef.io/licensing/accept/", + "description": "string that indicates if user accepts or not license related to some of chef products. See https://docs.chef.io/licensing/accept/.", "enum": [ "accept", "accept-silent", @@ -1368,7 +1368,7 @@ "properties": { "disable_ec2_metadata": { "default": false, - "description": "Set true to disable IPv4 routes to EC2 metadata. Default: ``false``", + "description": "Set true to disable IPv4 routes to EC2 metadata. Default: ``false``.", "type": "boolean" } } @@ -1448,7 +1448,7 @@ "overwrite": { "type": "boolean", "default": false, - "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``" + "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``." } } } @@ -1466,7 +1466,7 @@ }, "filesystem": { "type": "string", - "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``" + "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``." }, "device": { "type": "string", @@ -1491,7 +1491,7 @@ }, "overwrite": { "type": "boolean", - "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``" + "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``." }, "replace_fs": { "type": "string", @@ -1534,12 +1534,12 @@ "properties": { "config": { "type": "string", - "description": "The fan configuration to use as a single multi-line string" + "description": "The fan configuration to use as a single multi-line string." }, "config_path": { "type": "string", "default": "/etc/network/fan", - "description": "The path to write the fan configuration to. Default: ``/etc/network/fan``" + "description": "The path to write the fan configuration to. Default: ``/etc/network/fan``." } } } @@ -1550,7 +1550,7 @@ "properties": { "final_message": { "type": "string", - "description": "The message to display at the end of the run" + "description": "The message to display at the end of the run." } } }, @@ -1563,7 +1563,7 @@ "properties": { "mode": { "default": "auto", - "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``'off'`` - Take no action", + "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``'off'`` - Take no action.", "oneOf": [ { "enum": [ @@ -1591,12 +1591,12 @@ "items": { "type": "string" }, - "description": "The devices to resize. Each entry can either be the path to the device's mountpoint in the filesystem or a path to the block device in '/dev'. Default: ``[/]``" + "description": "The devices to resize. Each entry can either be the path to the device's mountpoint in the filesystem or a path to the block device in '/dev'. Default: ``[/]``." }, "ignore_growroot_disabled": { "type": "boolean", "default": false, - "description": "If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file exists, then don't resize. Default: ``false``" + "description": "If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file exists, then don't resize. Default: ``false``." } } } @@ -1612,14 +1612,14 @@ "enabled": { "type": "boolean", "default": true, - "description": "Whether to configure which device is used as the target for grub installation. Default: ``true``" + "description": "Whether to configure which device is used as the target for grub installation. Default: ``true``." }, "grub-pc/install_devices": { "type": "string", - "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device" + "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device." }, "grub-pc/install_devices_empty": { - "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``", + "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``.", "oneOf": [ { "type": "boolean" @@ -1634,7 +1634,7 @@ }, "grub-efi/install_devices": { "type": "string", - "description": "Partition to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot/efi`` will be used to find the partition" + "description": "Partition to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot/efi`` will be used to find the partition." } } }, @@ -1825,11 +1825,11 @@ "properties": { "locale": { "type": "string", - "description": "The locale to set as the system's locale (e.g. ar_PS)" + "description": "The locale to set as the system's locale (e.g. ar_PS)." }, "locale_configfile": { "type": "string", - "description": "The file in which to write the locale configuration (defaults to the distro's default location)" + "description": "The file in which to write the locale configuration (defaults to the distro's default location)." } } }, @@ -1848,7 +1848,7 @@ "properties": { "network_address": { "type": "string", - "description": "IP address for LXD to listen on" + "description": "IP address for LXD to listen on." }, "network_port": { "type": "integer", @@ -1867,19 +1867,19 @@ }, "storage_create_device": { "type": "string", - "description": "Setup device based storage using DEVICE" + "description": "Setup device based storage using DEVICE." }, "storage_create_loop": { "type": "integer", - "description": "Setup loop based storage with SIZE in GB" + "description": "Setup loop based storage with SIZE in GB." }, "storage_pool": { "type": "string", - "description": "Name of storage pool to use or create" + "description": "Name of storage pool to use or create." }, "trust_password": { "type": "string", - "description": "The password required to add new clients" + "description": "The password required to add new clients." } } }, @@ -1907,7 +1907,7 @@ }, "mtu": { "type": "integer", - "description": "Bridge MTU, defaults to LXD's default value", + "description": "Bridge MTU, defaults to LXD's default value.", "default": -1, "minimum": -1 }, @@ -1976,11 +1976,11 @@ "properties": { "public-cert": { "type": "string", - "description": "Optional value of server public certificate which will be written to ``/etc/mcollective/ssl/server-public.pem``" + "description": "Optional value of server public certificate which will be written to ``/etc/mcollective/ssl/server-public.pem``." }, "private-cert": { "type": "string", - "description": "Optional value of server private certificate which will be written to ``/etc/mcollective/ssl/server-private.pem``" + "description": "Optional value of server private certificate which will be written to ``/etc/mcollective/ssl/server-private.pem``." } }, "patternProperties": { @@ -2022,7 +2022,7 @@ }, "mount_default_fields": { "type": "array", - "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev``", + "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev``.", "default": [ null, null, @@ -2050,10 +2050,10 @@ "properties": { "filename": { "type": "string", - "description": "Path to the swap file to create" + "description": "Path to the swap file to create." }, "size": { - "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format where units are one of B, K, M, G or T. **WARNING: Attempts to use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, however IEC values are used. In summary, assume 1KB == 1024B, not 1000B**", + "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format where units are one of B, K, M, G or T. **WARNING: Attempts to use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, however IEC values are used. In summary, assume 1KB == 1024B, not 1000B**.", "oneOf": [ { "enum": [ @@ -2079,7 +2079,7 @@ "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$" } ], - "description": "The maxsize in bytes of the swap file" + "description": "The maxsize in bytes of the swap file." } } } @@ -2128,7 +2128,7 @@ "type": "string" }, "uniqueItems": true, - "description": "List of CIDRs to allow" + "description": "List of CIDRs to allow." }, "ntp_client": { "type": "string", @@ -2138,7 +2138,7 @@ "enabled": { "type": "boolean", "default": true, - "description": "Attempt to enable ntp clients if set to True. If set to ``false``, ntp client will not be configured or installed" + "description": "Attempt to enable ntp clients if set to True. If set to ``false``, ntp client will not be configured or installed." }, "config": { "description": "Configuration settings or overrides for the ``ntp_client`` specified.", @@ -2212,17 +2212,17 @@ "package_update": { "type": "boolean", "default": false, - "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``" + "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``." }, "package_upgrade": { "type": "boolean", "default": false, - "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``" + "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``." }, "package_reboot_if_required": { "type": "boolean", "default": false, - "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``" + "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``." }, "apt_update": { "type": "boolean", @@ -2260,7 +2260,7 @@ "description": "The URL to send the phone home data to." }, "post": { - "description": "A list of keys to post or ``all``. Default: ``all``", + "description": "A list of keys to post or ``all``. Default: ``all``.", "oneOf": [ { "enum": [ @@ -2285,7 +2285,7 @@ }, "tries": { "type": "integer", - "description": "The number of times to try sending the phone home data. Default: ``10``", + "description": "The number of times to try sending the phone home data. Default: ``10``.", "default": 10 } } @@ -2303,7 +2303,7 @@ "additionalProperties": false, "properties": { "delay": { - "description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``", + "description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``.", "default": "now", "oneOf": [ { @@ -2338,12 +2338,12 @@ "type": "string" }, "timeout": { - "description": "Time in seconds to wait for the cloud-init process to finish before executing shutdown. Default: ``30``", + "description": "Time in seconds to wait for the cloud-init process to finish before executing shutdown. Default: ``30``.", "type": "integer", "default": 30 }, "condition": { - "description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``", + "description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``.", "default": true, "oneOf": [ { @@ -2371,7 +2371,7 @@ "install": { "type": "boolean", "default": true, - "description": "Whether or not to install puppet. Setting to ``false`` will result in an error if puppet is not already present on the system. Default: ``true``" + "description": "Whether or not to install puppet. Setting to ``false`` will result in an error if puppet is not already present on the system. Default: ``true``." }, "version": { "type": "string", @@ -2379,7 +2379,7 @@ }, "install_type": { "type": "string", - "description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``", + "description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``.", "enum": [ "packages", "aio" @@ -2397,32 +2397,32 @@ "cleanup": { "type": "boolean", "default": true, - "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``" + "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``." }, "conf_file": { "type": "string", - "description": "The path to the puppet config file. Default depends on ``install_type``" + "description": "The path to the puppet config file. Default depends on ``install_type``." }, "ssl_dir": { "type": "string", - "description": "The path to the puppet SSL directory. Default depends on ``install_type``" + "description": "The path to the puppet SSL directory. Default depends on ``install_type``." }, "csr_attributes_path": { "type": "string", - "description": "The path to the puppet csr attributes file. Default depends on ``install_type``" + "description": "The path to the puppet csr attributes file. Default depends on ``install_type``." }, "package_name": { "type": "string", - "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``" + "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``." }, "exec": { "type": "boolean", "default": false, - "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``" + "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``." }, "exec_args": { "type": "array", - "description": "A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']``", + "description": "A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']``.", "items": { "type": "string" } @@ -2430,7 +2430,7 @@ "start_service": { "type": "boolean", "default": true, - "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``" + "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``." }, "conf": { "type": "object", @@ -2456,7 +2456,7 @@ }, "csr_attributes": { "type": "object", - "description": "create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html", + "description": "create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html.", "additionalProperties": false, "properties": { "custom_attributes": { @@ -2480,7 +2480,7 @@ false, "noblock" ], - "description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``" + "description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``." } } }, @@ -2490,7 +2490,7 @@ "manage_resolv_conf": { "type": "boolean", "default": false, - "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``" + "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``." }, "resolv_conf": { "type": "object", @@ -2498,23 +2498,23 @@ "properties": { "nameservers": { "type": "array", - "description": "A list of nameservers to use to be added as ``nameserver`` lines" + "description": "A list of nameservers to use to be added as ``nameserver`` lines." }, "searchdomains": { "type": "array", - "description": "A list of domains to be added ``search`` line" + "description": "A list of domains to be added ``search`` line." }, "domain": { "type": "string", - "description": "The domain to be added as ``domain`` line" + "description": "The domain to be added as ``domain`` line." }, "sortlist": { "type": "array", - "description": "A list of IP addresses to be added to ``sortlist`` line" + "description": "A list of IP addresses to be added to ``sortlist`` line." }, "options": { "type": "object", - "description": "Key/value pairs of options to go under ``options`` heading. A unary option should be specified as ``true``" + "description": "Key/value pairs of options to go under ``options`` heading. A unary option should be specified as ``true``." } } } @@ -2529,18 +2529,18 @@ "properties": { "username": { "type": "string", - "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``" + "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``." }, "password": { "type": "string", - "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``" + "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``." }, "activation-key": { "type": "string", - "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``" + "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``." }, "org": { - "description": "The organization to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``", + "description": "The organization to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``.", "oneOf": [ { "type": "string" @@ -2555,40 +2555,40 @@ }, "auto-attach": { "type": "boolean", - "description": "Whether to attach subscriptions automatically" + "description": "Whether to attach subscriptions automatically." }, "service-level": { "type": "string", - "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used" + "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used." }, "add-pool": { "type": "array", - "description": "A list of pools ids add to the subscription", + "description": "A list of pools ids add to the subscription.", "items": { "type": "string" } }, "enable-repo": { "type": "array", - "description": "A list of repositories to enable", + "description": "A list of repositories to enable.", "items": { "type": "string" } }, "disable-repo": { "type": "array", - "description": "A list of repositories to disable", + "description": "A list of repositories to disable.", "items": { "type": "string" } }, "rhsm-baseurl": { "type": "string", - "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``" + "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." }, "server-hostname": { "type": "string", - "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``" + "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." } } } @@ -2603,11 +2603,11 @@ "properties": { "config_dir": { "type": "string", - "description": "The directory where rsyslog configuration files will be written. Default: ``/etc/rsyslog.d``" + "description": "The directory where rsyslog configuration files will be written. Default: ``/etc/rsyslog.d``." }, "config_filename": { "type": "string", - "description": "The name of the rsyslog configuration file. Default: ``20-cloud-config.conf``" + "description": "The name of the rsyslog configuration file. Default: ``20-cloud-config.conf``." }, "configs": { "type": "array", @@ -2657,12 +2657,12 @@ }, "install_rsyslog": { "default": false, - "description": "Install rsyslog. Default: ``false``", + "description": "Install rsyslog. Default: ``false``.", "type": "boolean" }, "check_exe": { "type": "string", - "description": "The executable name for the rsyslog daemon.\nFor example, ``rsyslogd``, or ``/opt/sbin/rsyslogd`` if the rsyslog binary is in an unusual path. This is only used if ``install_rsyslog`` is ``true``. Default: ``rsyslogd``" + "description": "The executable name for the rsyslog daemon.\nFor example, ``rsyslogd``, or ``/opt/sbin/rsyslogd`` if the rsyslog binary is in an unusual path. This is only used if ``install_rsyslog`` is ``true``. Default: ``rsyslogd``." }, "packages": { "type": "array", @@ -2670,7 +2670,7 @@ "type": "string" }, "uniqueItems": true, - "description": "List of packages needed to be installed for rsyslog. This is only used if ``install_rsyslog`` is ``true``. Default: ``[rsyslog]``" + "description": "List of packages needed to be installed for rsyslog. This is only used if ``install_rsyslog`` is ``true``. Default: ``[rsyslog]``." } } } @@ -2710,35 +2710,35 @@ "properties": { "pkg_name": { "type": "string", - "description": "Package name to install. Default: ``salt-minion``" + "description": "Package name to install. Default: ``salt-minion``." }, "service_name": { "type": "string", - "description": "Service name to enable. Default: ``salt-minion``" + "description": "Service name to enable. Default: ``salt-minion``." }, "config_dir": { "type": "string", - "description": "Directory to write config files to. Default: ``/etc/salt``" + "description": "Directory to write config files to. Default: ``/etc/salt``." }, "conf": { "type": "object", - "description": "Configuration to be written to `config_dir`/minion" + "description": "Configuration to be written to `config_dir`/minion." }, "grains": { "type": "object", - "description": "Configuration to be written to `config_dir`/grains" + "description": "Configuration to be written to `config_dir`/grains." }, "public_key": { "type": "string", - "description": "Public key to be used by the salt minion" + "description": "Public key to be used by the salt minion." }, "private_key": { "type": "string", - "description": "Private key to be used by salt minion" + "description": "Private key to be used by salt minion." }, "pki_dir": { "type": "string", - "description": "Directory to write key files. Default: `config_dir`/pki/minion" + "description": "Directory to write key files. Default: `config_dir`/pki/minion." } } } @@ -2752,7 +2752,7 @@ "additionalProperties": false, "properties": { "enabled": { - "description": "Whether vendor data is enabled or not. Default: ``true``", + "description": "Whether vendor data is enabled or not. Default: ``true``.", "oneOf": [ { "type": "boolean", @@ -2777,7 +2777,7 @@ "integer" ] }, - "description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run" + "description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run." } } } @@ -2793,11 +2793,11 @@ "file": { "type": "string", "default": "/dev/urandom", - "description": "File to write random data to. Default: ``/dev/urandom``" + "description": "File to write random data to. Default: ``/dev/urandom``." }, "data": { "type": "string", - "description": "This data will be written to ``file`` before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate" + "description": "This data will be written to ``file`` before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate." }, "encoding": { "type": "string", @@ -2809,7 +2809,7 @@ "gzip", "gz" ], - "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``" + "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``." }, "command": { "type": "array", @@ -2821,7 +2821,7 @@ "command_required": { "type": "boolean", "default": false, - "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``" + "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``." } } } @@ -2833,24 +2833,24 @@ "preserve_hostname": { "type": "boolean", "default": false, - "description": "If true, the hostname will not be changed. Default: ``false``" + "description": "If true, the hostname will not be changed. Default: ``false``." }, "hostname": { "type": "string", - "description": "The hostname to set" + "description": "The hostname to set." }, "fqdn": { "type": "string", - "description": "The fully qualified domain name to set" + "description": "The fully qualified domain name to set." }, "prefer_fqdn_over_hostname": { "type": "boolean", - "description": "If true, the fqdn will be used if it is set. If false, the hostname will be used. If unset, the result is distro-dependent" + "description": "If true, the fqdn will be used if it is set. If false, the hostname will be used. If unset, the result is distro-dependent." }, "create_hostname_file": { "type": "boolean", "default": true, - "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``" + "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``." } } }, @@ -2878,7 +2878,7 @@ "expire": { "type": "boolean", "default": true, - "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``" + "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``." }, "users": { "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated passwords may be insecure, use at your own risk.", @@ -2953,7 +2953,7 @@ }, "password": { "type": "string", - "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used" + "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used." } } }, @@ -2987,7 +2987,7 @@ "object", "array" ], - "description": "Snap commands to run on the target system", + "description": "Snap commands to run on the target system.", "items": { "oneOf": [ { @@ -3031,15 +3031,15 @@ "properties": { "server": { "type": "string", - "description": "The Spacewalk server to use" + "description": "The Spacewalk server to use." }, "proxy": { "type": "string", - "description": "The proxy to use when connecting to Spacewalk" + "description": "The proxy to use when connecting to Spacewalk." }, "activation_key": { "type": "string", - "description": "The activation key to use when registering with Spacewalk" + "description": "The activation key to use when registering with Spacewalk." } } } @@ -3051,12 +3051,12 @@ "no_ssh_fingerprints": { "type": "boolean", "default": false, - "description": "If true, SSH fingerprints will not be written. Default: ``false``" + "description": "If true, SSH fingerprints will not be written. Default: ``false``." }, "authkey_hash": { "type": "string", "default": "sha256", - "description": "The hash type to use when generating SSH fingerprints. Default: ``sha256``" + "description": "The hash type to use when generating SSH fingerprints. Default: ``sha256``." } } }, @@ -3067,7 +3067,7 @@ "type": "array", "items": { "type": "string", - "description": "The SSH public key to import" + "description": "The SSH public key to import." } } } @@ -3089,7 +3089,7 @@ "ssh_authorized_keys": { "type": "array", "minItems": 1, - "description": "The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory", + "description": "The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory.", "items": { "type": "string" } @@ -3097,11 +3097,11 @@ "ssh_deletekeys": { "type": "boolean", "default": true, - "description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``" + "description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``." }, "ssh_genkeytypes": { "type": "array", - "description": "The SSH key types to generate. Default: ``[rsa, ecdsa, ed25519]``", + "description": "The SSH key types to generate. Default: ``[rsa, ecdsa, ed25519]``.", "default": [ "ecdsa", "ed25519", @@ -3120,22 +3120,22 @@ "disable_root": { "type": "boolean", "default": true, - "description": "Disable root login. Default: ``true``" + "description": "Disable root login. Default: ``true``." }, "disable_root_opts": { "type": "string", "default": "``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``", - "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``" + "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``." }, "allow_public_ssh_keys": { "type": "boolean", "default": true, - "description": "If ``true``, will import the public SSH keys from the datasource's metadata to the user's ``.ssh/authorized_keys`` file. Default: ``true``" + "description": "If ``true``, will import the public SSH keys from the datasource's metadata to the user's ``.ssh/authorized_keys`` file. Default: ``true``." }, "ssh_quiet_keygen": { "type": "boolean", "default": false, - "description": "If ``true``, will suppress the output of key generation to the console. Default: ``false``" + "description": "If ``true``, will suppress the output of key generation to the console. Default: ``false``." }, "ssh_publish_hostkeys": { "type": "object", @@ -3144,11 +3144,11 @@ "enabled": { "type": "boolean", "default": true, - "description": "If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource (if supported). Default: ``true``" + "description": "If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource (if supported). Default: ``true``." }, "blacklist": { "type": "array", - "description": "The SSH key types to ignore when publishing. Default: ``[]`` to publish all SSH key types", + "description": "The SSH key types to ignore when publishing. Default: ``[]`` to publish all SSH key types.", "items": { "type": "string" } @@ -3162,7 +3162,7 @@ "properties": { "timezone": { "type": "string", - "description": "The timezone to use as represented in /usr/share/zoneinfo" + "description": "The timezone to use as represented in /usr/share/zoneinfo." } } }, @@ -3213,7 +3213,7 @@ "properties": { "manage_etc_hosts": { "default": false, - "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``", + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", "oneOf": [ { "enum": [ @@ -3258,7 +3258,7 @@ "create_hostname_file": { "type": "boolean", "default": true, - "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``" + "description": "If ``false``, the hostname file (e.g. /etc/hostname) will not be created if it does not exist. On systems that use systemd, setting create_hostname_file to ``false`` will set the hostname transiently. If ``true``, the hostname file will always be created and the hostname will be set statically on systemd systems. Default: ``true``." } } }, @@ -3294,7 +3294,7 @@ "$ref": "#/$defs/users_groups.user" } ], - "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema." + "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The ``user`` dictionary keys supported for the default_user are the same as the ``users`` schema." }, "users": { "type": [ @@ -3338,11 +3338,11 @@ "properties": { "name": { "type": "string", - "description": "Name of the interface. Typically wgx (example: wg0)" + "description": "Name of the interface. Typically wgx (example: wg0)." }, "config_path": { "type": "string", - "description": "Path to configuration file of Wireguard interface" + "description": "Path to configuration file of Wireguard interface." }, "content": { "type": "string", @@ -3384,16 +3384,16 @@ "properties": { "path": { "type": "string", - "description": "Path of the file to which ``content`` is decoded and written" + "description": "Path of the file to which ``content`` is decoded and written." }, "content": { "type": "string", "default": "''", - "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``" + "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``." }, "source": { "type": "object", - "description": "Optional specification for content loading from an arbitrary URI", + "description": "Optional specification for content loading from an arbitrary URI.", "additionalProperties": false, "properties": { "uri": { @@ -3403,7 +3403,7 @@ }, "headers": { "type": "object", - "description": "Optional HTTP headers to accompany load request, if applicable", + "description": "Optional HTTP headers to accompany load request, if applicable.", "additionalProperties": { "type": "string" } @@ -3416,12 +3416,12 @@ "owner": { "type": "string", "default": "root:root", - "description": "Optional owner:group to chown on the file and new directories. Default: ``root:root``" + "description": "Optional owner:group to chown on the file and new directories. Default: ``root:root``." }, "permissions": { "type": "string", "default": "'0o644'", - "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``" + "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``." }, "encoding": { "type": "string", @@ -3437,7 +3437,7 @@ "base64", "text/plain" ], - "description": "Optional encoding type of the content. Default: ``text/plain``. No decoding is performed by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64" + "description": "Optional encoding type of the content. Default: ``text/plain``. No decoding is performed by default. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64." }, "append": { "type": "boolean", @@ -3461,7 +3461,7 @@ "yum_repo_dir": { "type": "string", "default": "/etc/yum.repos.d", - "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``" + "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``." }, "yum_repos": { "type": "object", @@ -3477,17 +3477,17 @@ "baseurl": { "type": "string", "format": "uri", - "description": "URL to the directory where the yum repository's 'repodata' directory lives" + "description": "URL to the directory where the yum repository's 'repodata' directory lives." }, "metalink": { "type": "string", "format": "uri", - "description": "Specifies a URL to a metalink file for the repomd.xml" + "description": "Specifies a URL to a metalink file for the repomd.xml." }, "mirrorlist": { "type": "string", "format": "uri", - "description": "Specifies a URL to a file containing a baseurls list" + "description": "Specifies a URL to a file containing a baseurls list." }, "name": { "type": "string", @@ -3513,7 +3513,7 @@ "type": "string" } ], - "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf" + "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf." } }, "anyOf": [ @@ -3559,7 +3559,7 @@ "baseurl": { "type": "string", "format": "uri", - "description": "The base repositoy URL" + "description": "The base repositoy URL." } }, "required": [ @@ -3571,7 +3571,7 @@ }, "config": { "type": "object", - "description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``" + "description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``." } } } @@ -3585,7 +3585,7 @@ }, { "type": "array", - "description": "A list specifying filepath operation configuration for stdout and stderror", + "description": "A list specifying filepath operation configuration for stdout and stderror.", "items": { "type": [ "string" From 24cdaa75a54b0ba38afd0c9b823ec2d031c914e3 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 25 Jul 2024 16:00:30 -0600 Subject: [PATCH 104/131] fix(doc): object type check if patternProperties or properties (#5562) Without this fix, rendered module documentation was not rendering the following text for some objects: Each object in **** list supports the following keys: See Rsyslog Config schema tab. --- doc/rtd/templates/module_property.tmpl | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/rtd/templates/module_property.tmpl b/doc/rtd/templates/module_property.tmpl index f429203c8f8..895a8956581 100644 --- a/doc/rtd/templates/module_property.tmpl +++ b/doc/rtd/templates/module_property.tmpl @@ -6,7 +6,17 @@ {{prefix ~ ' '}}{{ line }} {% endfor -%} {%- endmacro -%} -{% if prop_cfg.get('items', {}).get('type') == 'object' %} -{% set description = description ~ " Each object in **" ~ name ~ "** list supports the following keys:" %} -{% endif %} -{{ print_prop(name, types, description, prefix ) }} +{% set ns = namespace(is_obj_type=false) -%} +{% if ('properties' in prop_cfg or 'patternProperties' in prop_cfg) %}{% set ns.is_obj_type = true -%}{% endif -%} +{% for key, val in prop_cfg.get('items', {}).items() -%} + {% if key in ('properties', 'patternProperties') -%}{% set ns.is_obj_type = true -%}{% endif -%} + {% if key == 'oneOf' -%} + {% for oneOf in val -%} + {% if ('properties' in oneOf or 'patternProperties' in oneOf ) -%}{% set ns.is_obj_type = true -%}{% endif -%} + {% endfor -%} + {% endif -%} +{% endfor -%} +{% if ns.is_obj_type -%} +{% set description = description ~ " Each object in **" ~ name ~ "** list supports the following keys:" -%} +{% endif -%} +{{ print_prop(name, types, description, prefix ) -}} From e14ce3d64b539546eb125ae3657157221f8fae41 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 25 Jul 2024 16:01:15 -0600 Subject: [PATCH 105/131] fix(doc): doc of nested objects under JSON schema items.oneOf (#5562) Document any keys of objects in a list which allows for objects as one of the alternative types allowed as a list item. Also, when documenting properties, ensure we skip documentation of either 'properties' or 'patternProperties' if those properties are declared in the hidden key. Fixes GH-5514 --- doc/rtd/conf.py | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index cfa1f63df63..8efe5e53a11 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -256,23 +256,28 @@ def render_property_template(prop_name, prop_cfg, prefix=""): def render_nested_properties(prop_cfg, defs, prefix): prop_str = "" + prop_types = set(["properties", "patternProperties"]) flatten_schema_refs(prop_cfg, defs) if "items" in prop_cfg: prop_str += render_nested_properties(prop_cfg["items"], defs, prefix) - if not set(["properties", "patternProperties"]).intersection(prop_cfg): - return prop_str - for prop_name, nested_cfg in prop_cfg.get("properties", {}).items(): - flatten_schema_all_of(nested_cfg) - flatten_schema_refs(nested_cfg, defs) - prop_str += render_property_template(prop_name, nested_cfg, prefix) - prop_str += render_nested_properties(nested_cfg, defs, prefix + " ") - for prop_name, nested_cfg in prop_cfg.get("patternProperties", {}).items(): - flatten_schema_all_of(nested_cfg) - flatten_schema_refs(nested_cfg, defs) - if nested_cfg.get("label"): - prop_name = nested_cfg.get("label") - prop_str += render_property_template(prop_name, nested_cfg, prefix) - prop_str += render_nested_properties(nested_cfg, defs, prefix + " ") + for alt_schema in prop_cfg["items"].get("oneOf", []): + if prop_types.intersection(alt_schema): + prop_str += render_nested_properties(alt_schema, defs, prefix) + + for hidden_key in prop_cfg.get("hidden", []): + prop_cfg.pop(hidden_key, None) + + # Render visible property types + for prop_type in prop_types.intersection(prop_cfg): + for prop_name, nested_cfg in prop_cfg.get(prop_type, {}).items(): + flatten_schema_all_of(nested_cfg) + flatten_schema_refs(nested_cfg, defs) + if nested_cfg.get("label"): + prop_name = nested_cfg.get("label") + prop_str += render_property_template(prop_name, nested_cfg, prefix) + prop_str += render_nested_properties( + nested_cfg, defs, prefix + " " + ) return prop_str From fdccc611381774163c1cc5b0069fcbc16e3e340b Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 26 Jul 2024 15:18:55 -0600 Subject: [PATCH 106/131] feat(doc): add env vars to debug config module doc builds (#5562) When running tox -e doc the following environment variables are supported: CLOUD_INIT_DEBUG_MODULE_DOC=cc_ CLOUD_INIT_DEBUG_MODULE_DOC_FILE= The env var CLOUD_INIT_DEBUG_MODULE_DOC can be set to either a specific module id, such as cc_rsyslog, or 'all'. When set the rendered module documentation RST format is printed inline to stdout to allow for quick analysis of rendered content. Optionally, if CLOUD_INIT_DEBUG_MODULE_DOC_FILE is set to a writable file path, the output of the rendered content is written to that file instead. This supports development of docs and quick comparison of docs generated before and after a changeset. --- doc/rtd/conf.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ tox.ini | 2 ++ 2 files changed, 57 insertions(+) diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 8efe5e53a11..c3a903af2c9 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -281,6 +281,56 @@ def render_nested_properties(prop_cfg, defs, prefix): return prop_str +def debug_module_docs( + module_id: str, mod_docs: dict, debug_file_path: str = None +): + """Print rendered RST module docs during build. + + The intent is to make rendered RST inconsistencies easier to see when + modifying jinja template files or JSON schema as white-space and format + inconsistencies can lead to significant sphinx rendering issues in RTD. + + To trigger this inline print of rendered docs, set the environment + variable CLOUD_INIT_DEBUG_MODULE_DOC. + + :param module_id: A specific 'cc_*' module name to print rendered RST for, + or provide 'all' to print out all rendered module docs. + :param mod_docs: A dict represnting doc metadata for each config module. + The dict is keyed on config module id (cc_*) and each value is a dict + with values such as: title, name, examples, schema_doc. + :param debug_file_path: A specific file to write the rendered RST content. + When unset, + """ + from cloudinit.util import load_text_file, load_yaml + + if not module_id: + return + if module_id == "all": + module_ids = mod_docs.keys() + else: + module_ids = [module_id] + rendered_content = "" + for mod_id in module_ids: + try: + data = load_yaml( + load_text_file(f"../module-docs/{mod_id}/data.yaml") + ) + except FileNotFoundError: + continue + with open("templates/modules.tmpl", "r") as stream: + tmpl_content = "## template: jinja\n" + stream.read() + params = {"data": data, "config": {"html_context": mod_docs}} + rendered_content += render_jinja_payload( + tmpl_content, "changed_modules_page", params + ) + if debug_file_path: + print(f"--- Writing rendered module docs: {debug_file_path} ---") + with open(debug_file_path, "w") as stream: + stream.write(rendered_content) + else: + print(rendered_content) + + def render_module_schemas(): from cloudinit.importer import import_module @@ -303,6 +353,11 @@ def render_module_schemas(): mod_docs[cc_key][ "schema_doc" ] = "No schema definitions for this module" + debug_module_docs( + os.environ.get("CLOUD_INIT_DEBUG_MODULE_DOC"), + mod_docs, + debug_file_path=os.environ.get("CLOUD_INIT_DEBUG_MODULE_DOC_FILE"), + ) return mod_docs diff --git a/tox.ini b/tox.ini index be5e1d647d2..7299b8513e9 100644 --- a/tox.ini +++ b/tox.ini @@ -184,6 +184,8 @@ deps = -r{toxinidir}/doc-requirements.txt commands = {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} {envpython} -m doc8 doc/rtd +passenv = + CLOUD_INIT_* [testenv:doc-spelling] deps = -r{toxinidir}/doc-requirements.txt From d85be37d260a28c1930b8d09bb80118ec2c4e606 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 2 Aug 2024 16:07:45 -0600 Subject: [PATCH 107/131] fix(doc): italics around deprecation prefix, description bolds key names (#5562) --- doc/rtd/templates/property_deprecation.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/templates/property_deprecation.tmpl b/doc/rtd/templates/property_deprecation.tmpl index f7934ef13dc..3fbff189295 100644 --- a/doc/rtd/templates/property_deprecation.tmpl +++ b/doc/rtd/templates/property_deprecation.tmpl @@ -1,2 +1,2 @@ -{{ '*Deprecated in version ' ~ deprecated_version ~ '.' ~ deprecated_description ~ '*' -}} +{{ '*Deprecated in version ' ~ deprecated_version ~ ':* ' ~ deprecated_description -}} From a2193da3eaec4df8945cf3657f94f7c66ff2417d Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 2 Aug 2024 16:08:31 -0600 Subject: [PATCH 108/131] chore: update schema docs to use RST bold for config key names (#5562) --- .../schemas/schema-cloud-config-v1.json | 184 +++++++++--------- tests/unittests/config/test_cc_ca_certs.py | 10 +- tests/unittests/config/test_cc_growpart.py | 4 +- tests/unittests/config/test_cc_grub_dpkg.py | 5 +- .../test_cc_package_update_upgrade_install.py | 13 +- tests/unittests/config/test_cc_ubuntu_pro.py | 2 +- .../config/test_cc_update_etc_hosts.py | 4 +- .../unittests/config/test_cc_users_groups.py | 36 ++-- tests/unittests/config/test_schema.py | 18 +- 9 files changed, 143 insertions(+), 133 deletions(-) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 76e5e09bd47..d1313726a3d 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -295,7 +295,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "22.3", - "deprecated_description": "Use ``lock_passwd`` instead." + "deprecated_description": "Use **lock_passwd** instead." }, "lock_passwd": { "default": true, @@ -306,7 +306,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``no_create_home`` instead." + "deprecated_description": "Use **no_create_home** instead." }, "no_create_home": { "default": false, @@ -317,7 +317,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``no_log_init`` instead." + "deprecated_description": "Use **no_log_init** instead." }, "no_log_init": { "default": false, @@ -328,7 +328,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``no_user_group`` instead." + "deprecated_description": "Use **no_user_group** instead." }, "no_user_group": { "default": false, @@ -343,7 +343,7 @@ "type": "string", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``hashed_passwd`` instead." + "deprecated_description": "Use **hashed_passwd** instead." }, "hashed_passwd": { "description": "Hash of user password to be applied. This will be applied even if the user is preexisting. To generate this hash, run: ``mkpasswd --method=SHA-512 --rounds=500000``. **Note:** Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", @@ -353,7 +353,7 @@ "type": "string", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``plain_text_passwd`` instead." + "deprecated_description": "Use **plain_text_passwd** instead." }, "plain_text_passwd": { "description": "Clear text of user password to be applied. This will be applied even if the user is preexisting. **Note:** SSH keys or certificates are a safer choice for logging in to your system. For local escalation, supplying a hashed password is a safer choice than plain text. Your password might possibly be visible to unprivileged users on your system, depending on your cloud's security model. An exposed plain text password is an immediate security concern. Check if your cloud's IMDS server is visible from an unprivileged user to evaluate risk.", @@ -363,7 +363,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``create_groups`` instead." + "deprecated_description": "Use **create_groups** instead." }, "create_groups": { "default": true, @@ -374,7 +374,7 @@ "type": "string", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``primary_group`` instead." + "deprecated_description": "Use **primary_group** instead." }, "primary_group": { "default": "````", @@ -385,7 +385,7 @@ "type": "string", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``selinux_user`` instead." + "deprecated_description": "Use **selinux_user** instead." }, "selinux_user": { "description": "SELinux user for user's login. Default: the default SELinux user.", @@ -400,7 +400,7 @@ "type": "string" }, "ssh_authorized_keys": { - "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``.", + "description": "List of SSH keys to add to user's authkeys file. Can not be combined with **ssh_redirect_user**.", "type": "array", "items": { "type": "string" @@ -415,7 +415,7 @@ "minItems": 1, "deprecated": true, "deprecated_version": "18.3", - "deprecated_description": "Use ``ssh_authorized_keys`` instead." + "deprecated_description": "Use **ssh_authorized_keys** instead." }, "ssh-import-id": { "type": "array", @@ -425,10 +425,10 @@ "minItems": 1, "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``ssh_import_id`` instead." + "deprecated_description": "Use **ssh_import_id** instead." }, "ssh_import_id": { - "description": "List of ssh ids to import for user. Can not be combined with ``ssh_redirect_user``. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html.", + "description": "List of ssh ids to import for user. Can not be combined with **ssh_redirect_user**. See the man page[1] for more details. [1] https://manpages.ubuntu.com/manpages/noble/en/man1/ssh-import-id.1.html.", "type": "array", "items": { "type": "string" @@ -439,12 +439,12 @@ "type": "boolean", "deprecated": true, "deprecated_version": "24.2", - "deprecated_description": "Use ``ssh_redirect_user`` instead." + "deprecated_description": "Use **ssh_redirect_user** instead." }, "ssh_redirect_user": { "type": "boolean", "default": false, - "description": "Boolean set to true to disable SSH logins for this user. When specified, all cloud meta-data public SSH keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the ``default_username`` for this instance. Default: ``false``. This key can not be combined with ``ssh_import_id`` or ``ssh_authorized_keys``." + "description": "Boolean set to true to disable SSH logins for this user. When specified, all cloud meta-data public SSH keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the **default_username** for this instance. Default: ``false``. This key can not be combined with **ssh_import_id** or **ssh_authorized_keys**." }, "system": { "description": "Optional. Create user as system user with no home directory. Default: ``false``.", @@ -545,7 +545,7 @@ "type": "boolean", "deprecated": true, "deprecated_version": "22.3", - "deprecated_description": "Use ``remove_defaults`` instead." + "deprecated_description": "Use **remove_defaults** instead." }, "remove_defaults": { "description": "Remove default CA certificates if true. Default: ``false``.", @@ -961,7 +961,7 @@ "preserve_repositories": { "type": "boolean", "default": false, - "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\nThe ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``." + "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set **preserve_repositories** to ``true``.\nThe **preserve_repositories** option overrides all other config keys that would alter ``/etc/apk/repositories``." }, "alpine_repo": { "type": [ @@ -1013,7 +1013,7 @@ "preserve_sources_list": { "type": "boolean", "default": false, - "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``." + "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set **preserve_sources_list** to ``true``.\n\nThe **preserve_sources_list** option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``." }, "disable_suites": { "type": "array", @@ -1022,11 +1022,11 @@ }, "minItems": 1, "uniqueItems": true, - "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n- ``updates`` => ``$RELEASE-updates``\n- ``backports`` => ``$RELEASE-backports``\n- ``security`` => ``$RELEASE-security``\n- ``proposed`` => ``$RELEASE-proposed``\n- ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out." + "description": "Entries in the sources list can be disabled using **disable_suites**, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the **disable_suites** list, it will be replaced with the release name. If a suite specified in **disable_suites** is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for **disable_suites**:\n- ``updates`` => ``$RELEASE-updates``\n- ``backports`` => ``$RELEASE-backports``\n- ``security`` => ``$RELEASE-security``\n- ``proposed`` => ``$RELEASE-proposed``\n- ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using **disable_suites**, its entry in ``sources.list`` is not deleted; it is just commented out." }, "primary": { "$ref": "#/$defs/apt_configure.mirror", - "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``-mirror`` in each of the following:\n- fqdn of this host per cloud metadata,\n- localdomain,\n- domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n- ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n- ``security`` => ``http://security.ubuntu.com/ubuntu``." + "description": "The primary and security archive mirrors can be specified using the **primary** and **security** keys, respectively. Both the **primary** and **security** keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for **arches**, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the **uri** key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by **uri** or **search**, **search_dns** may be used to search for dns names in the format ``-mirror`` in each of the following:\n- fqdn of this host per cloud metadata,\n- localdomain,\n- domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``-mirror``, then it is assumed that there is a distro mirror at ``http://-mirror./``. If the **primary** key is defined, but not the **security** key, then then configuration for **primary** is also used for **security**. If **search_dns** is used for the **security** key, the search pattern will be ``-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n- **keyid**: a key to import via shortid or fingerprint.\n- **key**: a raw PGP key.\n- **keyserver**: alternate keyserver to pull **keyid** key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n- **primary** => ``http://archive.ubuntu.com/ubuntu``.\n- **security** => ``http://security.ubuntu.com/ubuntu``." }, "security": { "$ref": "#/$defs/apt_configure.mirror", @@ -1035,7 +1035,7 @@ "add_apt_repo_match": { "type": "string", "default": "^[\\w-]+:\\w", - "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``." + "description": "All source entries in ``apt-sources`` that match regex in **add_apt_repo_match** will be added to the system using ``add-apt-repository``. If **add_apt_repo_match** is not specified, it defaults to ``^[\\w-]+:\\w``." }, "debconf_selections": { "type": "object", @@ -1046,11 +1046,11 @@ "type": "string" } }, - "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n- ``pkgname`` is the name of the package.\n- ``question`` the name of the questions.\n- ``type`` is the type of question.\n- ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``." + "description": "Debconf additional configurations can be specified as a dictionary under the **debconf_selections** config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to **debconf-set-selections**. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n- ``pkgname`` is the name of the package.\n- ``question`` the name of the questions.\n- ``type`` is the type of question.\n- ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``." }, "sources_list": { "type": "string", - "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n- ``$MIRROR``\n- ``$RELEASE``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$KEY_FILE``" + "description": "Specifies a custom template for rendering ``sources.list`` . If no **sources_list** template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n- ``$MIRROR``\n- ``$RELEASE``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$KEY_FILE``" }, "conf": { "type": "string", @@ -1103,7 +1103,7 @@ "minProperties": 1 } }, - "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n- ``source``: a sources.list entry (some variable replacements apply).\n- ``keyid``: a key to import via shortid or fingerprint.\n- ``key``: a raw PGP key.\n- ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n- ``filename``: specify the name of the list file.\n- ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe ``source`` key supports variable replacements for the following strings:\n- ``$MIRROR``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$RELEASE``\n- ``$KEY_FILE``" + "description": "Source list entries can be specified as a dictionary under the **sources** config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in **sources**, no file will be written, but the key may still be referred to as an id in other **sources** entries.\n\nEach entry under **sources** is a dictionary which may contain any of the following optional keys:\n- **source**: a sources.list entry (some variable replacements apply).\n- **keyid**: a key to import via shortid or fingerprint.\n- **key**: a raw PGP key.\n- **keyserver**: alternate keyserver to pull **keyid** key from.\n- **filename**: specify the name of the list file.\n- **append**: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe **source** key supports variable replacements for the following strings:\n- ``$MIRROR``\n- ``$PRIMARY``\n- ``$SECURITY``\n- ``$RELEASE``\n- ``$KEY_FILE``" } } } @@ -1131,7 +1131,7 @@ { "deprecated": true, "deprecated_version": "22.4", - "deprecated_description": "Use ``os`` instead.", + "deprecated_description": "Use **os** instead.", "enum": [ "none", "unchanged" @@ -1198,7 +1198,7 @@ { "deprecated": true, "deprecated_version": "22.3", - "deprecated_description": "Use ``ca_certs`` instead." + "deprecated_description": "Use **ca_certs** instead." } ] } @@ -1470,7 +1470,7 @@ }, "device": { "type": "string", - "description": "Specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. If specifying device using the ``.`` format, the value of ``partition`` will be overwritten." + "description": "Specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. If specifying device using the ``.`` format, the value of **partition** will be overwritten." }, "partition": { "type": [ @@ -1487,15 +1487,15 @@ ] } ], - "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``filesystem`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any filesystem that matches ``filesystem`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``." + "description": "The partition can be specified by setting **partition** to the desired partition number. The **partition** option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the **label**, **filesystem** and **device** of the **fs_setup** entry and will skip creating the filesystem if one is found. The **partition** option may also be set to ``any``, in which case any filesystem that matches **filesystem** and **device** will cause this module to skip filesystem creation for the **fs_setup** entry, regardless of **label** matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the **label** and **filesystem** are matched, and ``overwrite`` is ``false``." }, "overwrite": { "type": "boolean", - "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``." + "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in **fs_setup**. Default: ``false``." }, "replace_fs": { "type": "string", - "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``." + "description": "Ignored unless **partition** is ``auto`` or ``any``. Default ``false``." }, "extra_opts": { "type": [ @@ -1505,7 +1505,7 @@ "items": { "type": "string" }, - "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly." + "description": "Optional options to pass to the filesystem creation command. Ignored if you using **cmd** directly." }, "cmd": { "type": [ @@ -1515,7 +1515,7 @@ "items": { "type": "string" }, - "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command." + "description": "Optional command to run to create the filesystem. Can include string substitutions of the other **fs_setup** config keys. This is only necessary if you need to override the default command." } } } @@ -1579,7 +1579,7 @@ ], "changed": true, "changed_version": "22.3", - "changed_description": "Specifying a boolean ``false`` value for ``mode`` is deprecated. Use the string ``'off'`` instead." + "changed_description": "Specifying a boolean ``false`` value for **mode** is deprecated. Use the string ``'off'`` instead." } ] }, @@ -1619,7 +1619,7 @@ "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device." }, "grub-pc/install_devices_empty": { - "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``.", + "description": "Sets values for **grub-pc/install_devices_empty**. If unspecified, will be set to ``true`` if **grub-pc/install_devices** is empty, otherwise ``false``.", "oneOf": [ { "type": "boolean" @@ -1642,7 +1642,7 @@ "type": "object", "deprecated": true, "deprecated_version": "22.2", - "deprecated_description": "Use ``grub_dpkg`` instead." + "deprecated_description": "Use **grub_dpkg** instead." } } }, @@ -1844,7 +1844,7 @@ "init": { "type": "object", "additionalProperties": false, - "description": "LXD init configuration values to provide to `lxd init --auto` command. Can not be combined with ``lxd.preseed``.", + "description": "LXD init configuration values to provide to `lxd init --auto` command. Can not be combined with **lxd.preseed**.", "properties": { "network_address": { "type": "string", @@ -1889,11 +1889,11 @@ "mode" ], "additionalProperties": false, - "description": "LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with ``lxd.preseed``.", + "description": "LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with **lxd.preseed**.", "properties": { "mode": { "type": "string", - "description": "Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching ``name`` and `new` will create a new bridge.", + "description": "Whether to setup LXD bridge, use an existing bridge by **name** or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching **name** and `new` will create a new bridge.", "enum": [ "none", "existing", @@ -1913,19 +1913,19 @@ }, "ipv4_address": { "type": "string", - "description": "IPv4 address for the bridge. If set, ``ipv4_netmask`` key required." + "description": "IPv4 address for the bridge. If set, **ipv4_netmask** key required." }, "ipv4_netmask": { "type": "integer", - "description": "Prefix length for the ``ipv4_address`` key. Required when ``ipv4_address`` is set." + "description": "Prefix length for the **ipv4_address** key. Required when **ipv4_address** is set." }, "ipv4_dhcp_first": { "type": "string", - "description": "First IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_last`` key to set LXC ``ipv4.dhcp.ranges``." + "description": "First IPv4 address of the DHCP range for the network created. This value will combined with **ipv4_dhcp_last** key to set LXC **ipv4.dhcp.ranges**." }, "ipv4_dhcp_last": { "type": "string", - "description": "Last IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_first`` key to set LXC ``ipv4.dhcp.ranges``." + "description": "Last IPv4 address of the DHCP range for the network created. This value will combined with **ipv4_dhcp_first** key to set LXC **ipv4.dhcp.ranges**." }, "ipv4_dhcp_leases": { "type": "integer", @@ -1938,11 +1938,11 @@ }, "ipv6_address": { "type": "string", - "description": "IPv6 address for the bridge (CIDR notation). When set, ``ipv6_netmask`` key is required. When absent, no IPv6 will be configured." + "description": "IPv6 address for the bridge (CIDR notation). When set, **ipv6_netmask** key is required. When absent, no IPv6 will be configured." }, "ipv6_netmask": { "type": "integer", - "description": "Prefix length for ``ipv6_address`` provided. Required when ``ipv6_address`` is set." + "description": "Prefix length for **ipv6_address** provided. Required when **ipv6_address** is set." }, "ipv6_nat": { "type": "boolean", @@ -1957,7 +1957,7 @@ }, "preseed": { "type": "string", - "description": "Opaque LXD preseed YAML config passed via stdin to the command: lxd init --preseed. See: https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration or lxd init --dump for viable config. Can not be combined with either ``lxd.init`` or ``lxd.bridge``." + "description": "Opaque LXD preseed YAML config passed via stdin to the command: lxd init --preseed. See: https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#non-interactive-configuration or lxd init --dump for viable config. Can not be combined with either **lxd.init** or **lxd.bridge**." } } } @@ -2017,7 +2017,7 @@ "minItems": 1, "maxItems": 6 }, - "description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.", + "description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from **mount_default_fields**. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.", "minItems": 1 }, "mount_default_fields": { @@ -2141,18 +2141,18 @@ "description": "Attempt to enable ntp clients if set to True. If set to ``false``, ntp client will not be configured or installed." }, "config": { - "description": "Configuration settings or overrides for the ``ntp_client`` specified.", + "description": "Configuration settings or overrides for the **ntp_client** specified.", "type": "object", "minProperties": 1, "additionalProperties": false, "properties": { "confpath": { "type": "string", - "description": "The path to where the ``ntp_client`` configuration is written." + "description": "The path to where the **ntp_client** configuration is written." }, "check_exe": { "type": "string", - "description": "The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is 'ntpd' because it runs the ntpd binary." + "description": "The executable name for the **ntp_client**. For example, ntp service **check_exe** is 'ntpd' because it runs the ntpd binary." }, "packages": { "type": "array", @@ -2160,15 +2160,15 @@ "type": "string" }, "uniqueItems": true, - "description": "List of packages needed to be installed for the selected ``ntp_client``." + "description": "List of packages needed to be installed for the selected **ntp_client**." }, "service_name": { "type": "string", - "description": "The systemd or sysvinit service name used to start and stop the ``ntp_client`` service." + "description": "The systemd or sysvinit service name used to start and stop the **ntp_client** service." }, "template": { "type": "string", - "description": "Inline template allowing users to customize their ``ntp_client`` configuration with the use of the Jinja templating engine. The template content should start with ``## template:jinja``. Within the template, you can utilize any of the following ntp module config keys: ``servers``, ``pools``, ``allow``, and ``peers``. Each cc_ntp schema config key and expected value type is defined above." + "description": "Inline template allowing users to customize their **ntp_client** configuration with the use of the Jinja templating engine. The template content should start with ``## template:jinja``. Within the template, you can utilize any of the following ntp module config keys: **servers**, **pools**, **allow**, and **peers**. Each cc_ntp schema config key and expected value type is defined above." } } } @@ -2228,19 +2228,19 @@ "type": "boolean", "deprecated": true, "deprecated_version": "22.2", - "deprecated_description": "Use ``package_update`` instead." + "deprecated_description": "Use **package_update** instead." }, "apt_upgrade": { "type": "boolean", "deprecated": true, "deprecated_version": "22.2", - "deprecated_description": "Use ``package_upgrade`` instead." + "deprecated_description": "Use **package_upgrade** instead." }, "apt_reboot_if_required": { "type": "boolean", "deprecated": true, "deprecated_version": "22.2", - "deprecated_description": "Use ``package_reboot_if_required`` instead." + "deprecated_description": "Use **package_reboot_if_required** instead." } } }, @@ -2388,37 +2388,37 @@ }, "collection": { "type": "string", - "description": "Puppet collection to install if ``install_type`` is ``aio``. This can be set to one of ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in order to install specific release streams." + "description": "Puppet collection to install if **install_type** is ``aio``. This can be set to one of ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in order to install specific release streams." }, "aio_install_url": { "type": "string", - "description": "If ``install_type`` is ``aio``, change the url of the install script." + "description": "If **install_type** is ``aio``, change the url of the install script." }, "cleanup": { "type": "boolean", "default": true, - "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``." + "description": "Whether to remove the puppetlabs repo after installation if **install_type** is ``aio`` Default: ``true``." }, "conf_file": { "type": "string", - "description": "The path to the puppet config file. Default depends on ``install_type``." + "description": "The path to the puppet config file. Default depends on **install_type**." }, "ssl_dir": { "type": "string", - "description": "The path to the puppet SSL directory. Default depends on ``install_type``." + "description": "The path to the puppet SSL directory. Default depends on **install_type**." }, "csr_attributes_path": { "type": "string", - "description": "The path to the puppet csr attributes file. Default depends on ``install_type``." + "description": "The path to the puppet csr attributes file. Default depends on **install_type**." }, "package_name": { "type": "string", - "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``." + "description": "Name of the package to install if **install_type** is ``packages``. Default: ``puppet``." }, "exec": { "type": "boolean", "default": false, - "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``." + "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting **exec** to ``true``, and additional arguments can be passed to ``puppet agent`` via the **exec_args** key (by default the agent will execute with the ``--test`` flag). Default: ``false``." }, "exec_args": { "type": "array", @@ -2430,7 +2430,7 @@ "start_service": { "type": "boolean", "default": true, - "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``." + "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set **start_service** to ``false``." }, "conf": { "type": "object", @@ -2490,7 +2490,7 @@ "manage_resolv_conf": { "type": "boolean", "default": false, - "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``." + "description": "Whether to manage the resolv.conf file. **resolv_conf** block will be ignored unless this is set to ``true``. Default: ``false``." }, "resolv_conf": { "type": "object", @@ -2529,18 +2529,18 @@ "properties": { "username": { "type": "string", - "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``." + "description": "The username to use. Must be used with password. Should not be used with **activation-key** or **org**." }, "password": { "type": "string", - "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``." + "description": "The password to use. Must be used with username. Should not be used with **activation-key** or **org**." }, "activation-key": { "type": "string", - "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``." + "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." }, "org": { - "description": "The organization to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``.", + "description": "The organization to use. Must be used with **activation-key**. Should not be used with **username** or **password**.", "oneOf": [ { "type": "string" @@ -2611,7 +2611,7 @@ }, "configs": { "type": "array", - "description": "Each entry in ``configs`` is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the ``config_filename`` key is used. A file with the selected filename will be written inside the directory specified by ``config_dir``.", + "description": "Each entry in **configs** is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, **filename** sets the target filename and **content** specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the **config_filename** key is used. A file with the selected filename will be written inside the directory specified by **config_dir**.", "items": { "oneOf": [ { @@ -2670,7 +2670,7 @@ "type": "string" }, "uniqueItems": true, - "description": "List of packages needed to be installed for rsyslog. This is only used if ``install_rsyslog`` is ``true``. Default: ``[rsyslog]``." + "description": "List of packages needed to be installed for rsyslog. This is only used if **install_rsyslog** is ``true``. Default: ``[rsyslog]``." } } } @@ -2797,7 +2797,7 @@ }, "data": { "type": "string", - "description": "This data will be written to ``file`` before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate." + "description": "This data will be written to **file** before data from the datasource. When using a multi-line value or specifying binary data, be sure to follow YAML syntax and use the ``|`` and ``!binary`` YAML format specifiers when appropriate." }, "encoding": { "type": "string", @@ -2809,19 +2809,19 @@ "gzip", "gz" ], - "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``." + "description": "Used to decode **data** provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``." }, "command": { "type": "array", "items": { "type": "string" }, - "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above." + "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of **file** above." }, "command_required": { "type": "boolean", "default": false, - "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``." + "description": "If true, and **command** is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``." } } } @@ -2881,7 +2881,7 @@ "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``." }, "users": { - "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated passwords may be insecure, use at your own risk.", + "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: **name** and **password** or in the case of a randomly generated password, **name** and **type**. The **type** key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``. Randomly generated passwords may be insecure, use at your own risk.", "type": "array", "items": { "minItems": 1, @@ -2947,13 +2947,13 @@ "minItems": 1, "deprecated": true, "deprecated_version": "22.2", - "deprecated_description": "Use ``users`` instead." + "deprecated_description": "Use **users** instead." } } }, "password": { "type": "string", - "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used." + "description": "Set the default user's password. Ignored if **chpasswd** ``list`` is used." } } }, @@ -2966,7 +2966,7 @@ "additionalProperties": false, "properties": { "assertions": { - "description": "Properly-signed snap assertions which will run before and snap ``commands``.", + "description": "Properly-signed snap assertions which will run before and snap **commands**.", "type": [ "object", "array" @@ -3077,7 +3077,7 @@ "properties": { "ssh_keys": { "type": "object", - "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multi-line private host keys and certificates, use YAML multi-line syntax. **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, depending on your cloud's security model.", + "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the **ssh_keys** config dict should have keys in the format ``_private``, ``_public``, and, optionally, ``_certificate``, e.g. ``rsa_private: ``, ``rsa_public: ``, and ``rsa_certificate: ``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multi-line private host keys and certificates, use YAML multi-line syntax. **Note:** Your ssh keys might possibly be visible to unprivileged users on your system, depending on your cloud's security model.", "additionalProperties": false, "patternProperties": { "^(ecdsa|ed25519|rsa)_(public|private|certificate)$": { @@ -3125,7 +3125,7 @@ "disable_root_opts": { "type": "string", "default": "``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``", - "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``." + "description": "Disable root login options. If **disable_root_opts** is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``." }, "allow_public_ssh_keys": { "type": "boolean", @@ -3204,7 +3204,7 @@ "$ref": "#/$defs/ubuntu_pro.properties", "deprecated": true, "deprecated_version": "24.1", - "deprecated_description": "Use ``ubuntu_pro`` instead." + "deprecated_description": "Use **ubuntu_pro** instead." } } }, @@ -3226,7 +3226,7 @@ "enum": [ "template" ], - "changed_description": "Use of ``template`` is deprecated, use ``true`` instead.", + "changed_description": "Use of **template** is deprecated, use ``true`` instead.", "changed": true, "changed_version": "22.3" } @@ -3234,11 +3234,11 @@ }, "fqdn": { "type": "string", - "description": "Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over ``hostname`` if both are provided. In absence of ``hostname`` and ``fqdn`` in cloud-config, the ``local-hostname`` value will be used from datasource metadata." + "description": "Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over **hostname** if both are provided. In absence of **hostname** and **fqdn** in cloud-config, the ``local-hostname`` value will be used from datasource metadata." }, "hostname": { "type": "string", - "description": "Hostname to set when rendering ``/etc/hosts``. If ``fqdn`` is set, the hostname extracted from ``fqdn`` overrides ``hostname``." + "description": "Hostname to set when rendering ``/etc/hosts``. If **fqdn** is set, the hostname extracted from **fqdn** overrides **hostname**." } } }, @@ -3253,7 +3253,7 @@ "prefer_fqdn_over_hostname": { "type": "boolean", "default": null, - "description": "By default, it is distro-dependent whether cloud-init uses the short hostname or fully qualified domain name when both ``local-hostname` and ``fqdn`` are both present in instance metadata. When set ``true``, use fully qualified domain name if present as hostname instead of short hostname. When set ``false``, use ``hostname`` config value if present, otherwise fallback to ``fqdn``." + "description": "By default, it is distro-dependent whether cloud-init uses the short hostname or fully qualified domain name when both ``local-hostname` and ``fqdn`` are both present in instance metadata. When set ``true``, use fully qualified domain name if present as hostname instead of short hostname. When set ``false``, use **hostname** config value if present, otherwise fallback to **fqdn**." }, "create_hostname_file": { "type": "boolean", @@ -3294,7 +3294,7 @@ "$ref": "#/$defs/users_groups.user" } ], - "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The ``user`` dictionary keys supported for the default_user are the same as the ``users`` schema." + "description": "The **user** dictionary values override the **default_user** configuration from ``/etc/cloud/cloud.cfg``. The **user** dictionary keys supported for the default_user are the same as the **users** schema." }, "users": { "type": [ @@ -3384,12 +3384,12 @@ "properties": { "path": { "type": "string", - "description": "Path of the file to which ``content`` is decoded and written." + "description": "Path of the file to which **content** is decoded and written." }, "content": { "type": "string", "default": "''", - "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``." + "description": "Optional content to write to the provided **path**. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``." }, "source": { "type": "object", @@ -3399,7 +3399,7 @@ "uri": { "type": "string", "format": "uri", - "description": "URI from which to load file content. If loading fails repeatedly, ``content`` is used instead." + "description": "URI from which to load file content. If loading fails repeatedly, **content** is used instead." }, "headers": { "type": "object", @@ -3421,7 +3421,7 @@ "permissions": { "type": "string", "default": "'0o644'", - "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``." + "description": "Optional file permissions to set on **path** represented as an octal string '0###'. Default: ``0o644``." }, "encoding": { "type": "string", @@ -3442,7 +3442,7 @@ "append": { "type": "boolean", "default": false, - "description": "Whether to append ``content`` to existing file if ``path`` exists. Default: ``false``." + "description": "Whether to append **content** to existing file if **path** exists. Default: ``false``." }, "defer": { "type": "boolean", diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py index 7013a95dbe8..7b811caf43c 100644 --- a/tests/unittests/config/test_cc_ca_certs.py +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -390,10 +390,12 @@ class TestCACertsSchema: # Valid, yet deprecated schemas ( {"ca-certs": {"remove-defaults": True}}, - "Cloud config schema deprecations: ca-certs: " - "Deprecated in version 22.3. Use ``ca_certs`` instead.," - " ca-certs.remove-defaults: Deprecated in version 22.3" - ". Use ``remove_defaults`` instead.", + re.escape( + "Cloud config schema deprecations: ca-certs: " + "Deprecated in version 22.3. Use **ca_certs** instead.," + " ca-certs.remove-defaults: Deprecated in version 22.3" + ". Use **remove_defaults** instead." + ), ), # Invalid schemas ( diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py index 2be728b15c9..8137ac508a5 100644 --- a/tests/unittests/config/test_cc_growpart.py +++ b/tests/unittests/config/test_cc_growpart.py @@ -774,11 +774,11 @@ class TestGrowpartSchema: {"growpart": {"mode": False}}, pytest.raises( SchemaValidationError, - match=( + match=re.escape( "Cloud config schema deprecations: " "growpart.mode: Changed in version 22.3. " "Specifying a boolean ``false`` value for " - "``mode`` is deprecated. Use the string ``'off'`` " + "**mode** is deprecated. Use the string ``'off'`` " "instead." ), ), diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py index 36ef7fd9821..dfefca47429 100644 --- a/tests/unittests/config/test_cc_grub_dpkg.py +++ b/tests/unittests/config/test_cc_grub_dpkg.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import re from unittest import mock import pytest @@ -299,10 +300,10 @@ class TestGrubDpkgSchema: {"grub-dpkg": {"grub-pc/install_devices_empty": False}}, pytest.raises( SchemaValidationError, - match=( + match=re.escape( "Cloud config schema deprecations: grub-dpkg:" " Deprecated in version 22.2. Use " - "``grub_dpkg`` instead." + "**grub_dpkg** instead." ), ), False, diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py index c1ede2bc574..1cc82d71fa7 100644 --- a/tests/unittests/config/test_cc_package_update_upgrade_install.py +++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging +import re from unittest import mock import pytest @@ -299,26 +300,26 @@ class TestPackageUpdateUpgradeSchema: ({"packages": []}, SCHEMA_EMPTY_ERROR), ( {"apt_update": False}, - ( + re.escape( "Cloud config schema deprecations: apt_update: " "Deprecated in version 22.2. " - "Use ``package_update`` instead." + "Use **package_update** instead." ), ), ( {"apt_upgrade": False}, - ( + re.escape( "Cloud config schema deprecations: apt_upgrade: " "Deprecated in version 22.2. " - "Use ``package_upgrade`` instead." + "Use **package_upgrade** instead." ), ), ( {"apt_reboot_if_required": False}, - ( + re.escape( "Cloud config schema deprecations: " "apt_reboot_if_required: Deprecated in version 22.2. Use " - "``package_reboot_if_required`` instead." + "**package_reboot_if_required** instead." ), ), ], diff --git a/tests/unittests/config/test_cc_ubuntu_pro.py b/tests/unittests/config/test_cc_ubuntu_pro.py index 07ba8c69bc8..25794c70c64 100644 --- a/tests/unittests/config/test_cc_ubuntu_pro.py +++ b/tests/unittests/config/test_cc_ubuntu_pro.py @@ -444,7 +444,7 @@ class TestUbuntuProSchema: SchemaValidationError, match=re.escape( "ubuntu_advantage: Deprecated in version 24.1." - " Use ``ubuntu_pro`` instead" + " Use **ubuntu_pro** instead" ), ), # If __version__ no longer exists on jsonschema, that means diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py index 8c53c24726d..6ede9954c1b 100644 --- a/tests/unittests/config/test_cc_update_etc_hosts.py +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -87,10 +87,10 @@ class TestUpdateEtcHosts: {"manage_etc_hosts": "template"}, pytest.raises( SchemaValidationError, - match=( + match=re.escape( "Cloud config schema deprecations: " "manage_etc_hosts: Changed in version 22.3. " - "Use of ``template`` is deprecated, use " + "Use of **template** is deprecated, use " "``true`` instead." ), ), diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py index 9fac84d771b..ec2b18337e5 100644 --- a/tests/unittests/config/test_cc_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -372,9 +372,11 @@ class TestUsersGroupsSchema: pytest.raises( SchemaValidationError, match=( - "Cloud config schema deprecations: " - "users.0.lock-passwd: Deprecated in version 22.3." - " Use ``lock_passwd`` instead." + re.escape( + "Cloud config schema deprecations: " + "users.0.lock-passwd: Deprecated in version 22.3." + " Use **lock_passwd** instead." + ) ), ), False, @@ -384,9 +386,11 @@ class TestUsersGroupsSchema: pytest.raises( SchemaValidationError, match=( - "Cloud config schema deprecations: " - "users.0.no-create-home: Deprecated in version 24.2." - " Use ``no_create_home`` instead." + re.escape( + "Cloud config schema deprecations: " + "users.0.no-create-home: Deprecated in version" + " 24.2. Use **no_create_home** instead." + ) ), ), False, @@ -527,15 +531,17 @@ class TestUsersGroupsSchema: pytest.raises( SchemaValidationError, match=( - "Cloud config schema deprecations: " - "users.0.ssh-authorized-keys: " - " Deprecated in version 18.3." - " Use ``ssh_authorized_keys`` instead." - ", " - "users.0.uid: " - " Changed in version 22.3." - " The use of ``string`` type is deprecated." - " Use an ``integer`` instead." + re.escape( + "Cloud config schema deprecations: " + "users.0.ssh-authorized-keys: " + " Deprecated in version 18.3." + " Use **ssh_authorized_keys** instead." + ", " + "users.0.uid: " + " Changed in version 22.3." + " The use of ``string`` type is deprecated." + " Use an ``integer`` instead." + ) ), ), False, diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index 184857583fb..590e0899d54 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -2751,9 +2751,9 @@ def test_handle_schema_unable_to_read_cfg_paths( apt_reboot_if_required: true # D3 # Deprecations: ------------- - # D1: Deprecated in version 22.2. Use ``package_update`` instead. - # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. - # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + # D1: Deprecated in version 22.2. Use **package_update** instead. + # D2: Deprecated in version 22.2. Use **package_upgrade** instead. + # D3: Deprecated in version 22.2. Use **package_reboot_if_required** instead. Valid schema {cfg_file} """ # noqa: E501 @@ -2773,9 +2773,9 @@ def test_handle_schema_unable_to_read_cfg_paths( apt_reboot_if_required: true # D3 # Deprecations: ------------- - # D1: Deprecated in version 22.2. Use ``package_update`` instead. - # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. - # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + # D1: Deprecated in version 22.2. Use **package_update** instead. + # D2: Deprecated in version 22.2. Use **package_upgrade** instead. + # D3: Deprecated in version 22.2. Use **package_reboot_if_required** instead. Valid schema {cfg_file} """ # noqa: E501 @@ -2789,9 +2789,9 @@ def test_handle_schema_unable_to_read_cfg_paths( """\ Cloud config schema deprecations: \ apt_reboot_if_required: Deprecated in version 22.2. Use\ - ``package_reboot_if_required`` instead., apt_update: Deprecated in version\ - 22.2. Use ``package_update`` instead., apt_upgrade: Deprecated in version\ - 22.2. Use ``package_upgrade`` instead.\ + **package_reboot_if_required** instead., apt_update: Deprecated in version\ + 22.2. Use **package_update** instead., apt_upgrade: Deprecated in version\ + 22.2. Use **package_upgrade** instead.\ Valid schema {cfg_file} """ # noqa: E501 ), From 79e5d31ce3050bdc07339167e7055d09fc181bd4 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Tue, 20 Aug 2024 16:17:56 +0200 Subject: [PATCH 109/131] test: fix ca_certs int test (#5626) Remove additional \n which is not present if only one ca_cert is in the instance. --- tests/integration_tests/modules/test_ca_certs.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index 9e740dda228..f50f089a556 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -51,8 +51,7 @@ CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== ------END CERTIFICATE----- -""" +-----END CERTIFICATE-----""" USER_DATA = f"""\ #cloud-config From 0411057e60eec849406c3afade9436b400d359ef Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Aug 2024 12:59:13 -0600 Subject: [PATCH 110/131] docs: new datasources should update reference/ds_dsname_map (#5624) --- doc/rtd/development/datasource_creation.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/rtd/development/datasource_creation.rst b/doc/rtd/development/datasource_creation.rst index 1b6e525b122..82d28c3d7e5 100644 --- a/doc/rtd/development/datasource_creation.rst +++ b/doc/rtd/development/datasource_creation.rst @@ -158,10 +158,11 @@ packaging configuration. Add documentation for your datasource ------------------------------------- -You should add a new file in -:file:`doc/rtd/reference/datasources/.rst` -and reference it in -:file:`doc/rtd/reference/datasources.rst` +You should update the following docs: +1. Add a new file in :file:`doc/rtd/reference/datasources/.rst` +2. Reference `.rst` in :file:`doc/rtd/reference/datasources.rst` +3. Add an alphebetized dsname entry in representing your datasource +:file:`doc/rtd/reference/datasource_dsname_map.rst` Benefits of including your datasource in upstream cloud-init ============================================================ From ca9ffac833b69512f108490815c63a281168a1de Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Aug 2024 13:01:30 -0600 Subject: [PATCH 111/131] docs: alphabetize dsname lookup table. update comment to create the csv (#5624) --- doc/rtd/reference/datasource_dsname_map.rst | 45 +++++++++++---------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/doc/rtd/reference/datasource_dsname_map.rst b/doc/rtd/reference/datasource_dsname_map.rst index 3861ec3491f..b5b5a8db7b1 100644 --- a/doc/rtd/reference/datasource_dsname_map.rst +++ b/doc/rtd/reference/datasource_dsname_map.rst @@ -13,7 +13,7 @@ mapping between datasource module names and ``dsname`` in the table below. .. generate the following map with the following one-liner: - find cloudinit/sources -name 'DataSource*.py' \ + find cloudinit/sources -name 'DataSource*.py' | sort -u \ | xargs grep 'dsname =' \ | awk -F '[/:"]' 'BEGIN { print "**Datasource Module**, **dsname**" }\ {print $3 ", " $5}' @@ -23,32 +23,33 @@ mapping between datasource module names and ``dsname`` in the table below. :align: left **Datasource Module**, **dsname** - DataSourceRbxCloud.py, RbxCloud + DataSourceAkamai.py, Akamai + DataSourceAliYun.py, AliYun + DataSourceAltCloud.py, AltCloud + DataSourceAzure.py, Azure + DataSourceBigstep.py, Bigstep + DataSourceCloudSigma.py, CloudSigma + DataSourceCloudStack.py, CloudStack DataSourceConfigDrive.py, ConfigDrive - DataSourceNoCloud.py, NoCloud - DataSourceVultr.py, Vultr - DataSourceEc2.py, Ec2 - DataSourceOracle.py, Oracle - DataSourceMAAS.py, MAAS DataSourceDigitalOcean.py, DigitalOcean - DataSourceNone.py, None - DataSourceSmartOS.py, Joyent + DataSourceEc2.py, Ec2 + DataSourceExoscale.py, Exoscale + DataSourceGCE.py, GCE DataSourceHetzner.py, Hetzner + DataSourceIBMCloud.py, IBMCloud DataSourceLXD.py, LXD + DataSourceMAAS.py, MAAS + DataSourceNoCloud.py, NoCloud + DataSourceNone.py, None + DataSourceNWCS.py, NWCS DataSourceOpenNebula.py, OpenNebula - DataSourceAzure.py, Azure - DataSourceGCE.py, GCE - DataSourceScaleway.py, Scaleway - DataSourceAltCloud.py, AltCloud - DataSourceCloudSigma.py, CloudSigma - DataSourceBigstep.py, Bigstep - DataSourceIBMCloud.py, IBMCloud + DataSourceOpenStack.py, OpenStack + DataSourceOracle.py, Oracle DataSourceOVF.py, OVF + DataSourceRbxCloud.py, RbxCloud + DataSourceScaleway.py, Scaleway + DataSourceSmartOS.py, Joyent DataSourceUpCloud.py, UpCloud - DataSourceOpenStack.py, OpenStack DataSourceVMware.py, VMware - DataSourceCloudStack.py, CloudStack - DataSourceExoscale.py, Exoscale - DataSourceAliYun.py, AliYun - DataSourceNWCS.py, NWCS - DataSourceAkamai.py, Akamai + DataSourceVultr.py, Vultr + DataSourceWSL.py, WSL From c6ba0dfab31eec575f52b5951981487280dd9ebd Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 21 Aug 2024 10:30:38 +0200 Subject: [PATCH 112/131] test: fix test_honor_cloud_dir int test (#5627) Align integration test with c28092fa615961c7ae2f56738a19ea28331b84a7. --- tests/integration_tests/test_paths.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/tests/integration_tests/test_paths.py b/tests/integration_tests/test_paths.py index b63da5a42b8..31f3497fc36 100644 --- a/tests/integration_tests/test_paths.py +++ b/tests/integration_tests/test_paths.py @@ -1,5 +1,4 @@ import os -import re from datetime import datetime from typing import Iterator @@ -45,12 +44,6 @@ def verify_log_and_files(self, custom_client): assert custom_client.execute(f"test ! -d {DEFAULT_CLOUD_DIR}").ok def collect_logs(self, custom_client: IntegrationInstance): - help_result = custom_client.execute("cloud-init collect-logs -h") - assert help_result.ok, help_result.stderr - assert f"{NEW_CLOUD_DIR}/instance/user-data.txt" in re.sub( - r"\s+", "", help_result.stdout - ), "user-data file not correctly render in collect-logs -h" - # Touch a couple of subiquity files to assert collected installer_files = ( INSTALLER_APPORT_FILES[-1], @@ -75,22 +68,22 @@ def collect_logs(self, custom_client: IntegrationInstance): dirname = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d") expected_logs = [ f"{dirname}/", - f"{dirname}/cloud-init.log", - f"{dirname}/cloud-init-output.log", f"{dirname}/dmesg.txt", - f"{dirname}/user-data.txt", - f"{dirname}/version", f"{dirname}/dpkg-version", f"{dirname}/journal.txt", f"{dirname}/run/", f"{dirname}/run/cloud-init/", - f"{dirname}/run/cloud-init/result.json", f"{dirname}/run/cloud-init/.instance-id", + f"{dirname}/run/cloud-init/cloud-id", f"{dirname}/run/cloud-init/cloud-init-generator.log", f"{dirname}/run/cloud-init/enabled", - f"{dirname}/run/cloud-init/cloud-id", - f"{dirname}/run/cloud-init/instance-data.json", f"{dirname}/run/cloud-init/instance-data-sensitive.json", + f"{dirname}/run/cloud-init/instance-data.json", + f"{dirname}/run/cloud-init/result.json", + f"{dirname}/new-cloud-dir/instance/user-data.txt", + f"{dirname}/var/log/cloud-init-output.log", + f"{dirname}/var/log/cloud-init.log", + f"{dirname}/version", f"{dirname}{installer_files[0].path}", f"{dirname}{installer_files[1].path}", ] From 8f741da4aa103774f5028adf0308531dbe902d49 Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Wed, 21 Aug 2024 10:30:56 +0200 Subject: [PATCH 113/131] test: fix cmd/test_schema int test (#5629) Adapt to new annotation formating from a2193da3eaec4df8945cf3657f94f7c66ff2417d. --- tests/integration_tests/cmd/test_schema.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py index b019e4c2f4f..4654d7eec92 100644 --- a/tests/integration_tests/cmd/test_schema.py +++ b/tests/integration_tests/cmd/test_schema.py @@ -174,9 +174,9 @@ def test_schema_deprecations(self, class_client: IntegrationInstance): apt_reboot_if_required: false\t\t# D3 # Deprecations: ------------- - # D1: Deprecated in version 22.2. Use ``package_update`` instead. - # D2: Deprecated in version 22.2. Use ``package_upgrade`` instead. - # D3: Deprecated in version 22.2. Use ``package_reboot_if_required`` instead. + # D1: Deprecated in version 22.2. Use **package_update** instead. + # D2: Deprecated in version 22.2. Use **package_upgrade** instead. + # D3: Deprecated in version 22.2. Use **package_reboot_if_required** instead. Valid schema /root/user-data""" # noqa: E501 From 6e4343eab55fcfdcf727905511b83e25254c38e3 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 26 Aug 2024 14:44:05 -0600 Subject: [PATCH 114/131] fix(tests): use instance.clean/restart instead of clean --reboot (#5636) Directly calling execute("cloud-init clean --logs --reboot") on an integration instances also involves awaiting a new boot id upon next interaction with with instance to ensure a reboot has actually taken place already on this target machine. Slow responding test instances/platforms may not completed the shutdown restart sequence yet when trying to iteract with an immediate blocking call to execut("cloud-init status --wait") which may exit early if accessing the prior instance boot before the reboot occurred. It is preferable to use inspect /proc/sys/kernel/random/boot_id before issuing a reboot request and block until a delta is seen in boot_id. This blocking wait on reboot and new boot_id is encapsulated inside pycloudlib.BaseInstance.restart which will inspect /proc/sys/kernel/random/boot_id before restart and block until a delta in boot_id across the requested restart. Fix test_status_block_through_all_boot_status to call instance.clean() and restart() to ensure we do not beat the instance reboot race with our post-boot assertions. --- tests/integration_tests/cmd/test_status.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py index fe9946b06a0..de4222e2df5 100644 --- a/tests/integration_tests/cmd/test_status.py +++ b/tests/integration_tests/cmd/test_status.py @@ -19,9 +19,8 @@ def _remove_nocloud_dir_and_reboot(client: IntegrationInstance): # On Impish and below, NoCloud will be detected on an LXD container. # If we remove this directory, it will no longer be detected. client.execute("rm -rf /var/lib/cloud/seed/nocloud-net") - old_boot_id = client.instance.get_boot_id() - client.execute("cloud-init clean --logs --reboot") - client.instance._wait_for_execute(old_boot_id=old_boot_id) + client.instance.clean() + client.instance.restart() @retry(tries=30, delay=1) @@ -157,7 +156,8 @@ def test_status_block_through_all_boot_status(client): push_and_enable_systemd_unit( client, "before-cloud-init-local.service", BEFORE_CLOUD_INIT_LOCAL ) - client.execute("cloud-init clean --logs --reboot") + client.instance.clean() + client.instance.restart() wait_for_cloud_init(client).stdout.strip() client.execute("cloud-init status --wait") From a38d6da766175f7547b0a31932a5841dbb352e5c Mon Sep 17 00:00:00 2001 From: Ksenija Stanojevic Date: Tue, 27 Aug 2024 06:36:03 -0700 Subject: [PATCH 115/131] feat(azure): add PPS support for azure-proxy-agent (#5601) Add PPS support for azure-proxy agent and improve error logging. --- cloudinit/sources/DataSourceAzure.py | 23 ++- cloudinit/sources/azure/errors.py | 5 +- tests/unittests/sources/test_azure.py | 284 +++++++++++++++++++++++++- 3 files changed, 303 insertions(+), 9 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index be4b5a1fbaf..77a5f46f100 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -577,15 +577,31 @@ def _check_azure_proxy_agent_status(self) -> None: ] out, err = subp.subp(cmd) report_diagnostic_event( - "Running azure-proxy-agent %s resulted" - "in stderr output: %s with stdout: %s" % (cmd, err, out), + "Executing %s resulted " + "in stderr=%r with stdout=%r" % (cmd, err, out), logger_func=LOG.debug, ) except subp.ProcessExecutionError as error: if isinstance(error.reason, FileNotFoundError): + LOG.error( + "Failed to activate Azure Guest Proxy Agent: " + "azure-proxy-agent not found" + ) report_error = errors.ReportableErrorProxyAgentNotFound() self._report_failure(report_error) else: + report_diagnostic_event( + "Failed to activate Azure Guest Proxy Agent: " + "status check failed " + "cmd=%r stderr=%r stdout=%r exit_code=%s" + % ( + error.cmd, + error.stderr, + error.stdout, + error.exit_code, + ), + logger_func=LOG.error, + ) reportable_error = ( errors.ReportableErrorProxyAgentStatusFailure(error) ) @@ -706,6 +722,9 @@ def crawl_metadata(self): self._wait_for_pps_unknown_reuse() md, userdata_raw, cfg, files = self._reprovision() + if cfg.get("ProvisionGuestProxyAgent"): + self._check_azure_proxy_agent_status() + # fetch metadata again as it has changed after reprovisioning imds_md = self.get_metadata_from_imds(report_failure=True) diff --git a/cloudinit/sources/azure/errors.py b/cloudinit/sources/azure/errors.py index 2f715e0c4c7..44e2418b115 100644 --- a/cloudinit/sources/azure/errors.py +++ b/cloudinit/sources/azure/errors.py @@ -199,10 +199,7 @@ def __init__(self, exception: Exception) -> None: class ReportableErrorProxyAgentNotFound(ReportableError): def __init__(self) -> None: - super().__init__( - "Unable to activate Azure Guest Proxy Agent." - "azure-proxy-agent not found" - ) + super().__init__("azure-proxy-agent not found") class ReportableErrorProxyAgentStatusFailure(ReportableError): diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 40c04016d67..a2ee3e29c89 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -4091,13 +4091,146 @@ def test_running_pps(self): self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock self.mock_readurl.side_effect = [ mock.MagicMock(contents=json.dumps(imds_md_source).encode()), - mock.MagicMock(contents=construct_ovf_env().encode()), + mock.MagicMock( + contents=construct_ovf_env( + provision_guest_proxy_agent=False + ).encode() + ), + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [] + + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + infinite=True, + log_req_resp=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/reprovisiondata?" + "api-version=2019-06-01", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + log_req_resp=False, + infinite=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + infinite=True, + log_req_resp=True, + timeout=30, + ), + ] + + # Verify DHCP is setup twice. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20), + mock.call(timeout_minutes=5), + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ), + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ), + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready twice. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev="/dev/sr0", + pubkey_info=None, + ), + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev=None, + pubkey_info=None, + ), + ] + + # Verify netlink operations for Running PPS. + assert self.mock_netlink.mock_calls == [ + mock.call.create_bound_netlink_socket(), + mock.call.wait_for_media_disconnect_connect(mock.ANY, "ethBoot0"), + mock.call.create_bound_netlink_socket().close(), + ] + + # Verify reported_ready marker written and cleaned up. + assert self.wrapped_util_write_file.mock_calls[0] == mock.call( + self.patched_reported_ready_marker_path.as_posix(), mock.ANY + ) + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 2 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 2 + + def test_running_pps_gpa(self): + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = "Running" + + nl_sock = mock.MagicMock() + self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), + mock.MagicMock( + contents=construct_ovf_env( + provision_guest_proxy_agent=True + ).encode() + ), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] self.mock_azure_get_metadata_from_fabric.return_value = [] self.azure_ds._check_and_get_data() + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] + assert self.mock_readurl.mock_calls == [ mock.call( "http://169.254.169.254/metadata/instance?" @@ -4209,13 +4342,155 @@ def test_savable_pps(self): ) self.mock_readurl.side_effect = [ mock.MagicMock(contents=json.dumps(imds_md_source).encode()), - mock.MagicMock(contents=construct_ovf_env().encode()), + mock.MagicMock( + contents=construct_ovf_env( + provision_guest_proxy_agent=False + ).encode() + ), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] self.mock_azure_get_metadata_from_fabric.return_value = [] self.azure_ds._check_and_get_data() + assert self.mock_subp_subp.mock_calls == [] + + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + infinite=True, + log_req_resp=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/reprovisiondata?" + "api-version=2019-06-01", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + log_req_resp=False, + infinite=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers_cb=imds.headers_cb, + infinite=True, + log_req_resp=True, + timeout=30, + ), + ] + + # Verify DHCP is setup twice. + assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [ + mock.call(timeout_minutes=20), + mock.call( + iface="ethAttached1", + timeout_minutes=20, + report_failure_if_not_primary=False, + ), + ] + assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [ + mock.call( + self.azure_ds.distro, + None, + dsaz.dhcp_log_cb, + ), + mock.call( + self.azure_ds.distro, + "ethAttached1", + dsaz.dhcp_log_cb, + ), + ] + assert self.azure_ds._wireserver_endpoint == "10.11.12.13" + assert self.azure_ds._is_ephemeral_networking_up() is False + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + ] + assert ( + self.azure_ds.metadata["instance-id"] + == "50109936-ef07-47fe-ac82-890c853f60d5" + ) + + # Verify IMDS metadata. + assert self.azure_ds.metadata["imds"] == self.imds_md + + # Verify reporting ready twice. + assert self.mock_azure_get_metadata_from_fabric.mock_calls == [ + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev="/dev/sr0", + pubkey_info=None, + ), + mock.call( + endpoint="10.11.12.13", + distro=self.azure_ds.distro, + iso_dev=None, + pubkey_info=None, + ), + ] + + # Verify netlink operations for Savable PPS. + assert self.mock_netlink.mock_calls == [ + mock.call.create_bound_netlink_socket(), + mock.call.wait_for_nic_detach_event(nl_sock), + mock.call.wait_for_nic_attach_event(nl_sock, ["ethAttached1"]), + mock.call.create_bound_netlink_socket().close(), + ] + + # Verify reported_ready marker written and cleaned up. + assert self.wrapped_util_write_file.mock_calls[0] == mock.call( + self.patched_reported_ready_marker_path.as_posix(), mock.ANY + ) + assert self.patched_reported_ready_marker_path.exists() is False + + # Verify reports via KVP. + assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 2 + + # Verify dmesg reported via KVP. + assert len(self.mock_report_dmesg_to_kvp.mock_calls) == 2 + + def test_savable_pps_gpa(self): + self.mock_subp_subp.side_effect = [ + subp.SubpResult("Guest Proxy Agent running", ""), + ] + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = "Savable" + + nl_sock = mock.MagicMock() + self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock + self.mock_netlink.wait_for_nic_detach_event.return_value = "eth9" + self.mock_netlink.wait_for_nic_attach_event.return_value = ( + "ethAttached1" + ) + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), + mock.MagicMock( + contents=construct_ovf_env( + provision_guest_proxy_agent=True + ).encode() + ), + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_subp_subp.mock_calls == [ + mock.call( + ["azure-proxy-agent", "--status", "--wait", "120"], + ), + ] + assert self.mock_readurl.mock_calls == [ mock.call( "http://169.254.169.254/metadata/instance?" @@ -4728,7 +5003,10 @@ def test_check_azure_proxy_agent_status(self): subp.SubpResult("Guest Proxy Agent running", ""), ] self.azure_ds._check_azure_proxy_agent_status() - assert "Running azure-proxy-agent" in self.caplog.text + assert ( + "Executing ['azure-proxy-agent', '--status', '--wait', '120']" + in self.caplog.text + ) assert self.mock_wrapping_report_failure.mock_calls == [] def test_check_azure_proxy_agent_status_notfound(self): From bbdfe36630a36614d34c723a8f9c1f3a64c6aa6d Mon Sep 17 00:00:00 2001 From: Alberto Contreras Date: Tue, 27 Aug 2024 17:35:10 +0200 Subject: [PATCH 116/131] fix(sources/wsl): no error with empty .cloud-init dir (SC-1862) (#5633) Do not treat the emptiness of .cloud-init/ as an error in the logs if agent.yaml is present. Fixes GH-5632 --- cloudinit/sources/DataSourceWSL.py | 3 +- doc/rtd/reference/datasources/wsl.rst | 6 ++-- tests/unittests/sources/test_wsl.py | 47 +++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index ddb31411681..5e146ecc177 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -405,7 +405,8 @@ def _get_data(self) -> bool: user_data = ConfigData(self.find_user_data_file(seed_dir)) except (ValueError, IOError) as err: - LOG.error( + log = LOG.info if agent_data else LOG.error + log( "Unable to load any user-data file in %s: %s", seed_dir, str(err), diff --git a/doc/rtd/reference/datasources/wsl.rst b/doc/rtd/reference/datasources/wsl.rst index c6970448b5c..8e1644e52f6 100644 --- a/doc/rtd/reference/datasources/wsl.rst +++ b/doc/rtd/reference/datasources/wsl.rst @@ -52,6 +52,8 @@ User data can be supplied in any cloud-config files or shell scripts. At runtime, the WSL datasource looks for user data in the following locations inside the Windows host filesystem, in the order specified below. +The WSL datasource will be enabled if cloud-init discovers at least one of the +applicable config files described below. First, configurations from Ubuntu Pro/Landscape are checked for in the following paths: @@ -71,8 +73,8 @@ following paths: used instead of the one provided by the ``agent.yaml``, which is treated as a default. -Then, if a file from (1) is not found, a user-provided configuration will be -looked for instead in the following order: +Then, if a file from (1) is not found, optional user-provided configuration +will be looked for in the following order: 1. ``%USERPROFILE%\.cloud-init\.user-data`` holds user data for a specific instance configuration. The datasource resolves the name attributed diff --git a/tests/unittests/sources/test_wsl.py b/tests/unittests/sources/test_wsl.py index 1ba374e468f..2012cd90a3c 100644 --- a/tests/unittests/sources/test_wsl.py +++ b/tests/unittests/sources/test_wsl.py @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging import os +import re from copy import deepcopy from email.mime.multipart import MIMEMultipart from pathlib import PurePath @@ -460,6 +461,52 @@ def test_get_data_jinja(self, m_lsb_release, paths, tmpdir): cast(MIMEMultipart, ud), "text/cloud-config" ), "No cloud-config part should exist" + @pytest.mark.parametrize("with_agent_data", [True, False]) + @mock.patch("cloudinit.util.lsb_release") + def test_get_data_x( + self, m_lsb_release, with_agent_data, caplog, paths, tmpdir + ): + """ + Assert behavior of empty .cloud-config dir with and without agent data + """ + m_lsb_release.return_value = SAMPLE_LINUX_DISTRO + data_path = tmpdir.join(".cloud-init", f"{INSTANCE_NAME}.user-data") + data_path.dirpath().mkdir() + + if with_agent_data: + ubuntu_pro_tmp = tmpdir.join(".ubuntupro", ".cloud-init") + os.makedirs(ubuntu_pro_tmp, exist_ok=True) + agent_path = ubuntu_pro_tmp.join("agent.yaml") + agent_path.write(AGENT_SAMPLE) + + ds = wsl.DataSourceWSL( + sys_cfg=SAMPLE_CFG, + distro=_get_distro("ubuntu"), + paths=paths, + ) + + assert ds.get_data() is with_agent_data + if with_agent_data: + assert ds.userdata_raw == AGENT_SAMPLE + else: + assert ds.userdata_raw is None + + expected_log_level = logging.INFO if with_agent_data else logging.ERROR + regex = ( + "Unable to load any user-data file in /[^:]*/.cloud-init:" + " /.*/.cloud-init directory is empty" + ) + messages = [ + x.message + for x in caplog.records + if x.levelno == expected_log_level and re.match(regex, x.message) + ] + assert ( + len(messages) > 0 + ), "Expected log message matching '{}' with log level '{}'".format( + regex, expected_log_level + ) + @mock.patch("cloudinit.util.get_linux_distro") def test_data_precedence(self, m_get_linux_dist, tmpdir, paths): """Validates the precedence of user-data files.""" From 8bc3e42543d6be2624a439fe0b72b4546928667e Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 27 Aug 2024 14:26:42 -0600 Subject: [PATCH 117/131] feat: add automation for ubuntu/* branches asserting quilt patches apply (#5622) Perform the same steps that cloud-init daily recipe builds performs to assert any packaging branch updates will not break daily builds due to quilt patch apply issues. Steps of daily build recipe reflected in this workflow: - checkout main - merge packaging branch topmost commit - quilt push -a - run unittests (via tox -e py3) - quilt pop -a --- .github/workflows/packaging-tests.yml | 47 +++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 .github/workflows/packaging-tests.yml diff --git a/.github/workflows/packaging-tests.yml b/.github/workflows/packaging-tests.yml new file mode 100644 index 00000000000..8517d862c7d --- /dev/null +++ b/.github/workflows/packaging-tests.yml @@ -0,0 +1,47 @@ +name: Packaging Tests + +on: + pull_request: + branches: + - 'ubuntu/**' + +concurrency: + group: 'ci-${{ github.workflow }}-${{ github.ref }}' + cancel-in-progress: true + +defaults: + run: + shell: sh -ex {0} + +env: + RELEASE: focal + +jobs: + check-patches: + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + # Fetch all branches for merging + fetch-depth: 0 + - name: Prepare dependencies + run: | + sudo DEBIAN_FRONTEND=noninteractive apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get -y install tox quilt + - name: Setup quilt environment + run: | + echo 'QUILT_PATCHES=debian/patches' >> ~/.quiltrc + echo 'QUILT_SERIES=debian/patches/series' >> ~/.quiltrc + + - name: 'Daily recipe: quilt patches apply successfully and tests run' + run: | + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + git remote add upstream https://git.launchpad.net/cloud-init + git fetch upstream main + git checkout upstream/main + git merge ${{ github.sha }} + quilt push -a + tox -e py3 + quilt pop -a From fa042b853a8353c631ea8c3bd92cf7012705faad Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 29 Aug 2024 10:04:58 -0600 Subject: [PATCH 118/131] tests: assert cloud-init user-data cert is the only root cert (#5641) Reintroduce strict assert that cloud-init's cert in userdata is the only root cert defined on the platform. Google guest agent was installed a secondary root cert in ca_certifications.crt for a period of time and this was determined to be less than ideal practice. Allow cloud-init's integration tests to remain strict validation of cert checksum to provide a signal if other platforms or agents attempt to extend or alter the system-wide CA. --- tests/integration_tests/modules/test_ca_certs.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index f50f089a556..0b84c9b9fe4 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -92,8 +92,13 @@ def test_certs_updated(self, class_client: IntegrationInstance): def test_cert_installed(self, class_client: IntegrationInstance): """Test that our specified cert has been installed""" - certs = class_client.execute("cat /etc/ssl/certs/ca-certificates.crt") - assert CERT_CONTENT in certs + checksum = class_client.execute( + "sha256sum /etc/ssl/certs/ca-certificates.crt" + ) + assert ( + "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062" + in checksum + ) def test_clean_log(self, class_client: IntegrationInstance): """Verify no errors, no deprecations and correct inactive modules in From 6d644e612a2d924ab6f9b6a7734d7b4e42981d7e Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Thu, 29 Aug 2024 21:45:33 +0100 Subject: [PATCH 119/131] fix: cc_user_groups incorrectly assumes "useradd" never locks password field (#5355) Currently cc_user_groups assumes that "useradd" never locks the password field of newly created users. This is an incorrect assumption. Change add_user (in both __init__.py and alpine.py) to explicitly call either lock_passwd or unlock_passwd at all times to achieve the desired final result. For existing users with empty or empty locked passwords, no password unlock will be performed and warnings will be issued. To support empty password validation, provide functionality to parse /etc/shadow and /var/lib/extrausers/shadow to assert existing users do not have empty passwords before unlocking. Additionally in this commit: - add NetworkBSD.ifs property to avoid subp side-effect in ___init__ which calls ifconfig -a at every instance initialization Useradd background: From the useradd manpage: '-p, --password PASSWORD The encrypted password, as returned by crypt(3). The default is to disable the password.' That is, if cloud-init runs 'useradd' but does not pass it the "-p" option (with an encrypted password) then the new user's password field will be locked by "useradd". cloud-init only passes the "-p" option when calling "useradd" when user-data specifies the "passwd" option for a new user. For user-data that specifies either the "hashed_passwd" or "plain_text_passwd" options instead then cloud-init calls "useradd" without the "-p" option and so the password field of such a user will be locked by "useradd". For user-data that specifies "hashed_passwd" for a new user then "useradd" is called with no "-p" option, so causing "useradd" to lock the password field, however then cloud-init calls "chpasswd -e" to set the encrypted password which also results in the password field being unlocked. For user-data that specifies either "plain_text_passwd" for a new user then "useradd" is called with no "-p" option, so causing "useradd" to lock the password. cloud-init then calls "chpasswd" to set the password which also results in the password field being unlocked. For user-data that specifies no password at all for a new user then "useradd" is called with no "-p" option, so causing "useradd" to lock the password. The password field is left locked. In all the above scenarios "passwd -l" may be called later by cloud-init to enforce "lock_passwd: true"). Conversely where "lock_passwd: false" applies the above "usermod" situation (for "hash_passwd", "plain_text_passwd" or no password) means that newly created users may have password fields locked when they should be unlocked. For Alpine, "adduser" does not support any form of password being passed and it always locks the password field (the same point applies about password field being unlocked when/if "chpasswd" is called). Therefore in some situations (i.e. no password specified in user-data) the password needs to be unlocked if "lock_passwd: false". --- cloudinit/distros/__init__.py | 189 +++++- cloudinit/distros/alpine.py | 39 +- cloudinit/distros/bsd.py | 1 + cloudinit/distros/freebsd.py | 28 +- cloudinit/distros/netbsd.py | 21 +- cloudinit/distros/networking.py | 11 +- cloudinit/distros/openbsd.py | 16 +- .../modules/test_users_groups.py | 48 +- tests/unittests/distros/test_create_users.py | 538 +++++++++++++++++- tests/unittests/distros/test_dragonflybsd.py | 33 ++ tests/unittests/distros/test_freebsd.py | 14 +- tests/unittests/distros/test_netbsd.py | 24 + tests/unittests/distros/test_openbsd.py | 26 + 13 files changed, 944 insertions(+), 44 deletions(-) create mode 100644 tests/unittests/distros/test_openbsd.py diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 1afef63de95..e65cbfb5d89 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -136,6 +136,10 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): doas_fn = "/etc/doas.conf" ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" hostname_conf_fn = "/etc/hostname" + shadow_fn = "/etc/shadow" + shadow_extrausers_fn = "/var/lib/extrausers/shadow" + # /etc/shadow match patterns indicating empty passwords + shadow_empty_locked_passwd_patterns = ["^{username}::", "^{username}:!:"] tz_zone_dir = "/usr/share/zoneinfo" default_owner = "root:root" init_cmd = ["service"] # systemctl, service etc @@ -655,19 +659,21 @@ def preferred_ntp_clients(self): def get_default_user(self): return self.get_option("default_user") - def add_user(self, name, **kwargs): + def add_user(self, name, **kwargs) -> bool: """ Add a user to the system using standard GNU tools This should be overridden on distros where useradd is not desirable or not available. + + Returns False if user already exists, otherwise True. """ # XXX need to make add_user idempotent somehow as we # still want to add groups or modify SSH keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) - return + return False if "create_groups" in kwargs: create_groups = kwargs.pop("create_groups") @@ -771,6 +777,9 @@ def add_user(self, name, **kwargs): util.logexc(LOG, "Failed to create user %s", name) raise e + # Indicate that a new user was created + return True + def add_snap_user(self, name, **kwargs): """ Add a snappy user to the system using snappy tools @@ -798,6 +807,40 @@ def add_snap_user(self, name, **kwargs): return username + def _shadow_file_has_empty_user_password(self, username) -> bool: + """ + Check whether username exists in shadow files with empty password. + + Support reading /var/lib/extrausers/shadow on snappy systems. + """ + if util.system_is_snappy(): + shadow_files = [self.shadow_extrausers_fn, self.shadow_fn] + else: + shadow_files = [self.shadow_fn] + shadow_empty_passwd_re = "|".join( + [ + pattern.format(username=username) + for pattern in self.shadow_empty_locked_passwd_patterns + ] + ) + for shadow_file in shadow_files: + if not os.path.exists(shadow_file): + continue + shadow_content = util.load_text_file(shadow_file) + if not re.findall(rf"^{username}:", shadow_content, re.MULTILINE): + LOG.debug("User %s not found in %s", username, shadow_file) + continue + LOG.debug( + "User %s found in %s. Checking for empty password", + username, + shadow_file, + ) + if re.findall( + shadow_empty_passwd_re, shadow_content, re.MULTILINE + ): + return True + return False + def create_user(self, name, **kwargs): """ Creates or partially updates the ``name`` user in the system. @@ -824,20 +867,93 @@ def create_user(self, name, **kwargs): return self.add_snap_user(name, **kwargs) # Add the user - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if "plain_text_passwd" in kwargs and kwargs["plain_text_passwd"]: - self.set_passwd(name, kwargs["plain_text_passwd"]) - - # Set password if hashed password is provided and non-empty - if "hashed_passwd" in kwargs and kwargs["hashed_passwd"]: - self.set_passwd(name, kwargs["hashed_passwd"], hashed=True) + pre_existing_user = not self.add_user(name, **kwargs) + + has_existing_password = False + ud_blank_password_specified = False + ud_password_specified = False + password_key = None + + if "plain_text_passwd" in kwargs: + ud_password_specified = True + password_key = "plain_text_passwd" + if kwargs["plain_text_passwd"]: + # Set password if plain-text password provided and non-empty + self.set_passwd(name, kwargs["plain_text_passwd"]) + else: + ud_blank_password_specified = True + + if "hashed_passwd" in kwargs: + ud_password_specified = True + password_key = "hashed_passwd" + if kwargs["hashed_passwd"]: + # Set password if hashed password is provided and non-empty + self.set_passwd(name, kwargs["hashed_passwd"], hashed=True) + else: + ud_blank_password_specified = True + + if pre_existing_user: + if not ud_password_specified: + if "passwd" in kwargs: + password_key = "passwd" + # Only "plain_text_passwd" and "hashed_passwd" + # are valid for an existing user. + LOG.warning( + "'passwd' in user-data is ignored for existing " + "user %s", + name, + ) - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. + # As no password specified for the existing user in user-data + # then check if the existing user's hashed password value is + # empty (whether locked or not). + has_existing_password = not ( + self._shadow_file_has_empty_user_password(name) + ) + else: + if "passwd" in kwargs: + ud_password_specified = True + password_key = "passwd" + if not kwargs["passwd"]: + ud_blank_password_specified = True + + # Default locking down the account. 'lock_passwd' defaults to True. + # Lock account unless lock_password is False in which case unlock + # account as long as a password (blank or otherwise) was specified. if kwargs.get("lock_passwd", True): self.lock_passwd(name) + elif has_existing_password or ud_password_specified: + # 'lock_passwd: False' and either existing account already with + # non-blank password or else existing/new account with password + # explicitly set in user-data. + if ud_blank_password_specified: + LOG.debug( + "Allowing unlocking empty password for %s based on empty" + " '%s' in user-data", + name, + password_key, + ) + + # Unlock the existing/new account + self.unlock_passwd(name) + elif pre_existing_user: + # Pre-existing user with no existing password and none + # explicitly set in user-data. + LOG.warning( + "Not unlocking blank password for existing user %s." + " 'lock_passwd: false' present in user-data but no existing" + " password set and no 'plain_text_passwd'/'hashed_passwd'" + " provided in user-data", + name, + ) + else: + # No password (whether blank or otherwise) explicitly set + LOG.warning( + "Not unlocking password for user %s. 'lock_passwd: false'" + " present in user-data but no 'passwd'/'plain_text_passwd'/" + "'hashed_passwd' provided in user-data", + name, + ) # Configure doas access if "doas" in kwargs: @@ -914,6 +1030,50 @@ def lock_passwd(self, name): util.logexc(LOG, "Failed to disable password for user %s", name) raise e + def unlock_passwd(self, name: str): + """ + Unlock the password of a user, i.e., enable password logins + """ + # passwd must use short '-u' due to SLES11 lacking long form '--unlock' + unlock_tools = (["passwd", "-u", name], ["usermod", "--unlock", name]) + try: + cmd = next(tool for tool in unlock_tools if subp.which(tool[0])) + except StopIteration as e: + raise RuntimeError( + "Unable to unlock user account '%s'. No tools available. " + " Tried: %s." % (name, [c[0] for c in unlock_tools]) + ) from e + try: + _, err = subp.subp(cmd, rcs=[0, 3]) + except Exception as e: + util.logexc(LOG, "Failed to enable password for user %s", name) + raise e + if err: + # if "passwd" or "usermod" are unable to unlock an account with + # an empty password then they display a message on stdout. In + # that case then instead set a blank password. + passwd_set_tools = ( + ["passwd", "-d", name], + ["usermod", "--password", "''", name], + ) + try: + cmd = next( + tool for tool in passwd_set_tools if subp.which(tool[0]) + ) + except StopIteration as e: + raise RuntimeError( + "Unable to set blank password for user account '%s'. " + "No tools available. " + " Tried: %s." % (name, [c[0] for c in unlock_tools]) + ) from e + try: + subp.subp(cmd) + except Exception as e: + util.logexc( + LOG, "Failed to set blank password for user %s", name + ) + raise e + def expire_passwd(self, user): try: subp.subp(["passwd", "--expire", user]) @@ -948,6 +1108,9 @@ def chpasswd(self, plist_in: list, hashed: bool): ) + "\n" ) + # Need to use the short option name '-e' instead of '--encrypted' + # (which would be more descriptive) since Busybox and SLES 11 + # chpasswd don't know about long names. cmd = ["chpasswd"] + (["-e"] if hashed else []) subp.subp(cmd, data=payload) diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index dae4b61564e..19912d3724f 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -205,16 +205,18 @@ def preferred_ntp_clients(self): return self._preferred_ntp_clients - def add_user(self, name, **kwargs): + def add_user(self, name, **kwargs) -> bool: """ Add a user to the system using standard tools On Alpine this may use either 'useradd' or 'adduser' depending on whether the 'shadow' package is installed. + + Returns False if user already exists, otherwise True. """ if util.is_user(name): LOG.info("User %s already exists, skipping.", name) - return + return False if "selinux_user" in kwargs: LOG.warning("Ignoring selinux_user parameter for Alpine Linux") @@ -418,6 +420,9 @@ def add_user(self, name, **kwargs): LOG, "Failed to update %s for user %s", shadow_file, name ) + # Indicate that a new user was created + return True + def lock_passwd(self, name): """ Lock the password of a user, i.e., disable password logins @@ -446,6 +451,36 @@ def lock_passwd(self, name): util.logexc(LOG, "Failed to disable password for user %s", name) raise e + def unlock_passwd(self, name: str): + """ + Unlock the password of a user, i.e., enable password logins + """ + + # Check whether Shadow's or Busybox's version of 'passwd'. + # If Shadow's 'passwd' is available then use the generic + # lock_passwd function from __init__.py instead. + if not os.path.islink( + "/usr/bin/passwd" + ) or "bbsuid" not in os.readlink("/usr/bin/passwd"): + return super().unlock_passwd(name) + + cmd = ["passwd", "-u", name] + # Busybox's 'passwd', unlike Shadow's 'passwd', errors + # if password is already unlocked: + # + # "passwd: password for user2 is already unlocked" + # + # with exit code 1 + # + # and also does *not* error if no password is set. + try: + _, err = subp.subp(cmd, rcs=[0, 1]) + if re.search(r"is already unlocked", err): + return True + except subp.ProcessExecutionError as e: + util.logexc(LOG, "Failed to unlock password for user %s", name) + raise e + def expire_passwd(self, user): # Check whether Shadow's or Busybox's version of 'passwd'. # If Shadow's 'passwd' is available then use the generic diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py index 15be9c36714..8433aac2aa8 100644 --- a/cloudinit/distros/bsd.py +++ b/cloudinit/distros/bsd.py @@ -15,6 +15,7 @@ class BSD(distros.Distro): networking_cls = BSDNetworking hostname_conf_fn = "/etc/rc.conf" rc_conf_fn = "/etc/rc.conf" + shadow_fn = "/etc/master.passwd" default_owner = "root:wheel" # This differs from the parent Distro class, which has -P for diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index ba35b2e611f..fc1c38a424a 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -41,6 +41,17 @@ class Distro(cloudinit.distros.bsd.BSD): dhclient_lease_directory = "/var/db" dhclient_lease_file_regex = r"dhclient.leases.\w+" + # /etc/shadow match patterns indicating empty passwords + # For FreeBSD (from https://man.freebsd.org/cgi/man.cgi?passwd(5)) a + # password field of "" indicates no password, and a password + # field value of either "*" or "*LOCKED*" indicate differing forms of + # "locked" but with no password defined. + shadow_empty_locked_passwd_patterns = [ + r"^{username}::", + r"^{username}:\*:", + r"^{username}:\*LOCKED\*:", + ] + @classmethod def reload_init(cls, rcs=None): """ @@ -86,7 +97,12 @@ def manage_service( def _get_add_member_to_group_cmd(self, member_name, group_name): return ["pw", "usermod", "-n", member_name, "-G", group_name] - def add_user(self, name, **kwargs): + def add_user(self, name, **kwargs) -> bool: + """ + Add a user to the system using standard tools + + Returns False if user already exists, otherwise True. + """ if util.is_user(name): LOG.info("User %s already exists, skipping.", name) return False @@ -140,6 +156,9 @@ def add_user(self, name, **kwargs): if passwd_val is not None: self.set_passwd(name, passwd_val, hashed=True) + # Indicate that a new user was created + return True + def expire_passwd(self, user): try: subp.subp(["pw", "usermod", user, "-p", "01-Jan-1970"]) @@ -170,6 +189,13 @@ def lock_passwd(self, name): util.logexc(LOG, "Failed to lock password login for user %s", name) raise + def unlock_passwd(self, name): + LOG.debug( + "Dragonfly BSD/FreeBSD password lock is not reversible, " + "ignoring unlock for user %s", + name, + ) + def apply_locale(self, locale, out_fn=None): # Adjust the locales value to the new value newconf = StringIO() diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py index da8c1904028..157aba06924 100644 --- a/cloudinit/distros/netbsd.py +++ b/cloudinit/distros/netbsd.py @@ -49,6 +49,17 @@ class NetBSD(cloudinit.distros.bsd.BSD): ci_sudoers_fn = "/usr/pkg/etc/sudoers.d/90-cloud-init-users" group_add_cmd_prefix = ["groupadd"] + # For NetBSD (from https://man.netbsd.org/passwd.5) a password field + # value of either "" or "*************" (13 "*") indicates no password, + # a password field prefixed with "*LOCKED*" indicates a locked + # password, and a password field of "*LOCKED*" followed by 13 "*" + # indicates a locked and blank password. + shadow_empty_locked_passwd_patterns = [ + r"^{username}::", + r"^{username}:\*\*\*\*\*\*\*\*\*\*\*\*\*:", + r"^{username}:\*LOCKED\*\*\*\*\*\*\*\*\*\*\*\*\*\*:", + ] + def __init__(self, name, cfg, paths): super().__init__(name, cfg, paths) if os.path.exists("/usr/pkg/bin/pkgin"): @@ -63,7 +74,12 @@ def __init__(self, name, cfg, paths): def _get_add_member_to_group_cmd(self, member_name, group_name): return ["usermod", "-G", group_name, member_name] - def add_user(self, name, **kwargs): + def add_user(self, name, **kwargs) -> bool: + """ + Add a user to the system using standard tools + + Returns False if user already exists, otherwise True. + """ if util.is_user(name): LOG.info("User %s already exists, skipping.", name) return False @@ -112,6 +128,9 @@ def add_user(self, name, **kwargs): if passwd_val is not None: self.set_passwd(name, passwd_val, hashed=True) + # Indicate that a new user was created + return True + def set_passwd(self, user, passwd, hashed=False): if hashed: hashed_pw = passwd diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py index af9584bdfca..67f10f4fbcf 100644 --- a/cloudinit/distros/networking.py +++ b/cloudinit/distros/networking.py @@ -179,16 +179,21 @@ class BSDNetworking(Networking): def __init__(self): self.ifc = ifconfig.Ifconfig() - self.ifs = {} - self._update_ifs() + self._ifs = {} super().__init__() + @property + def ifs(self) -> dict: + if not self._ifs: + self._update_ifs() + return self._ifs + def _update_ifs(self): ifconf = subp.subp(["ifconfig", "-a"]) # ``ifconfig -a`` always returns at least ``lo0``. # So this ``if`` is really just to make testing/mocking easier if ifconf[0]: - self.ifs = self.ifc.parse(ifconf[0]) + self._ifs = self.ifc.parse(ifconf[0]) def apply_network_config_names(self, netcfg: NetworkConfig) -> None: LOG.debug("Cannot rename network interface.") diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py index a701580deb1..14cf3be2b8e 100644 --- a/cloudinit/distros/openbsd.py +++ b/cloudinit/distros/openbsd.py @@ -14,6 +14,16 @@ class Distro(cloudinit.distros.netbsd.NetBSD): hostname_conf_fn = "/etc/myname" init_cmd = ["rcctl"] + # For OpenBSD (from https://man.openbsd.org/passwd.5) a password field + # of "" indicates no password, and password field values of either + # "*" or "*************" (13 "*") indicate differing forms of "locked" + # but with no password defined. + shadow_empty_locked_passwd_patterns = [ + r"^{username}::", + r"^{username}:\*:", + r"^{username}:\*\*\*\*\*\*\*\*\*\*\*\*\*:", + ] + def _read_hostname(self, filename, default=None): return util.load_text_file(self.hostname_conf_fn) @@ -53,7 +63,11 @@ def lock_passwd(self, name): raise def unlock_passwd(self, name): - pass + LOG.debug( + "OpenBSD password lock is not reversible, " + "ignoring unlock for user %s", + name, + ) def _get_pkg_cmd_environ(self): """Return env vars used in OpenBSD package_command operations""" diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index a904cd9f6f2..809d988f8cb 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -11,7 +11,7 @@ from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, JAMMY -from tests.integration_tests.util import verify_clean_log +from tests.integration_tests.util import verify_clean_boot USER_DATA = """\ #cloud-config @@ -36,6 +36,9 @@ sudo: ALL=(ALL) NOPASSWD:ALL groups: [cloud-users, secret] lock_passwd: true + - name: nopassworduser + gecos: I do not like passwords + lock_passwd: false - name: cloudy gecos: Magic Cloud App Daemon User inactive: '0' @@ -47,6 +50,10 @@ uid: 1743 """ +NEW_USER_EMPTY_PASSWD_WARNING = "Not unlocking password for user {username}. 'lock_passwd: false' present in user-data but no 'passwd'/'plain_text_passwd'/'hashed_passwd' provided in user-data" # noqa: E501 + +EXISTING_USER_EMPTY_PASSWD_WARNING = "Not unlocking blank password for existing user {username}. 'lock_passwd: false' present in user-data but no existing password set and no 'plain_text_passwd'/'hashed_passwd' provided in user-data" # noqa E501 + @pytest.mark.ci @pytest.mark.user_data(USER_DATA) @@ -86,6 +93,11 @@ class TestUsersGroups: (["passwd", "eric"], r"eric:x:1742:"), # Test int uid (["passwd", "archivist"], r"archivist:x:1743:"), + # Test int uid + ( + ["passwd", "nopassworduser"], + r"nopassworduser:x:[0-9]{4}:[0-9]{4}:I do not like passwords", + ), ], ) def test_users_groups(self, regex, getent_args, class_client): @@ -100,13 +112,43 @@ def test_users_groups(self, regex, getent_args, class_client): def test_user_root_in_secret(self, class_client): """Test root user is in 'secret' group.""" - log = class_client.read_from_file("/var/log/cloud-init.log") - verify_clean_log(log) + verify_clean_boot( + class_client, + require_warnings=[ + NEW_USER_EMPTY_PASSWD_WARNING.format(username="nopassworduser") + ], + ) output = class_client.execute("groups root").stdout _, groups_str = output.split(":", maxsplit=1) groups = groups_str.split() assert "secret" in groups + def test_nopassword_unlock_warnings(self, class_client): + """Verify warnings for empty passwords for new and existing users.""" + verify_clean_boot( + class_client, + require_warnings=[ + NEW_USER_EMPTY_PASSWD_WARNING.format(username="nopassworduser") + ], + ) + + # Fake admin clearing and unlocking and empty unlocked password foobar + # This will generate additional warnings about not unlocking passwords + # for pre-existing users which have an existing empty password + class_client.execute("passwd -d foobar") + class_client.instance.clean() + class_client.restart() + verify_clean_boot( + class_client, + ignore_warnings=True, # ignore warnings about existing groups + require_warnings=[ + EXISTING_USER_EMPTY_PASSWD_WARNING.format( + username="nopassworduser" + ), + EXISTING_USER_EMPTY_PASSWD_WARNING.format(username="foobar"), + ], + ) + @pytest.mark.user_data(USER_DATA) @pytest.mark.skipif( diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py index ebbbb418e8a..819e2b9b006 100644 --- a/tests/unittests/distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +from pathlib import Path from typing import List import pytest from cloudinit import distros, features, lifecycle, ssh_util +from tests.unittests.distros import _get_distro from tests.unittests.helpers import mock from tests.unittests.util import abstract_to_concrete @@ -16,6 +18,14 @@ def common_mocks(mocker): mocker.patch("cloudinit.distros.util.system_is_snappy", return_value=False) +def _chpasswdmock(name: str, password: str, hashed: bool = False): + """Return a mock of chpasswd call based on args""" + cmd = ["chpasswd", "-e"] if hashed else ["chpasswd"] + return mock.call( + cmd, data=f"{name}:{password}", logstring=f"chpasswd for {name}" + ) + + def _useradd2call(args: List[str]): # return a mock call for the useradd command in args # with expected 'logstring'. @@ -30,67 +40,420 @@ def _useradd2call(args: List[str]): @mock.patch("cloudinit.distros.subp.subp") class TestCreateUser: @pytest.fixture() - def dist(self): - return abstract_to_concrete(distros.Distro)( + def dist(self, tmpdir): + d = abstract_to_concrete(distros.Distro)( name="test", cfg=None, paths=None ) + # Monkey patch /etc/shadow files to tmpdir + d.shadow_fn = tmpdir.join(d.shadow_fn).strpath + d.shadow_extrausers_fn = tmpdir.join(d.shadow_extrausers_fn).strpath + return d @pytest.mark.parametrize( - "create_kwargs,expected", + "create_kwargs,is_snappy,expected", [ pytest.param( {}, + False, [ _useradd2call([USER, "-m"]), mock.call(["passwd", "-l", USER]), ], id="basic", ), + pytest.param( + {}, + True, + [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ], + id="basic_snappy", + ), pytest.param( {"no_create_home": True}, + False, [ _useradd2call([USER, "-M"]), mock.call(["passwd", "-l", USER]), ], id="no_home", ), + pytest.param( + {"no_create_home": True}, + True, + [ + _useradd2call([USER, "--extrausers", "-M"]), + mock.call(["passwd", "-l", USER]), + ], + id="no_home_snappy", + ), pytest.param( {"system": True}, + False, [ _useradd2call([USER, "--system", "-M"]), mock.call(["passwd", "-l", USER]), ], id="system_user", ), + pytest.param( + {"system": True}, + True, + [ + _useradd2call([USER, "--extrausers", "--system", "-M"]), + mock.call(["passwd", "-l", USER]), + ], + id="system_user_snappy", + ), pytest.param( {"create_no_home": False}, + False, [ _useradd2call([USER, "-m"]), mock.call(["passwd", "-l", USER]), ], id="explicit_no_home_false", ), + pytest.param( + {"create_no_home": False}, + True, + [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ], + id="explicit_no_home_false_snappy", + ), pytest.param( {"lock_passwd": False}, + False, [_useradd2call([USER, "-m"])], id="unlocked", ), pytest.param( - {"passwd": "passfoo"}, + {"lock_passwd": False}, + True, + [_useradd2call([USER, "--extrausers", "-m"])], + id="unlocked_snappy", + ), + pytest.param( + {"passwd": "$6$rounds=..."}, + False, + [ + _useradd2call([USER, "--password", "$6$rounds=...", "-m"]), + mock.call(["passwd", "-l", USER]), + ], + id="set_implicit_encrypted_password", + ), + pytest.param( + {"passwd": "$6$rounds=..."}, + True, [ - _useradd2call([USER, "--password", "passfoo", "-m"]), + _useradd2call( + [ + USER, + "--extrausers", + "--password", + "$6$rounds=...", + "-m", + ] + ), mock.call(["passwd", "-l", USER]), ], - id="set_password", + id="set_implicit_encrypted_password_snappy", + ), + pytest.param( + {"passwd": ""}, + False, + [ + _useradd2call([USER, "-m"]), + mock.call(["passwd", "-l", USER]), + ], + id="set_empty_passwd_new_user", + ), + pytest.param( + {"passwd": ""}, + True, + [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ], + id="set_empty_passwd_new_user_snappy", + ), + pytest.param( + {"plain_text_passwd": "clearfoo"}, + False, + [ + _useradd2call([USER, "-m"]), + _chpasswdmock(USER, "clearfoo"), + mock.call(["passwd", "-l", USER]), + ], + id="set_plain_text_password", + ), + pytest.param( + {"plain_text_passwd": "clearfoo"}, + True, + [ + _useradd2call([USER, "--extrausers", "-m"]), + _chpasswdmock(USER, "clearfoo"), + mock.call(["passwd", "-l", USER]), + ], + id="set_plain_text_password_snappy", + ), + pytest.param( + {"hashed_passwd": "$6$rounds=..."}, + False, + [ + _useradd2call([USER, "-m"]), + _chpasswdmock(USER, "$6$rounds=...", hashed=True), + mock.call(["passwd", "-l", USER]), + ], + id="set_explicitly_hashed_password", + ), + pytest.param( + {"hashed_passwd": "$6$rounds=..."}, + True, + [ + _useradd2call([USER, "--extrausers", "-m"]), + _chpasswdmock(USER, "$6$rounds=...", hashed=True), + mock.call(["passwd", "-l", USER]), + ], + id="set_explicitly_hashed_password_snappy", ), ], ) - def test_create_options(self, m_subp, dist, create_kwargs, expected): + @mock.patch("cloudinit.distros.util.is_user", return_value=False) + def test_create_options( + self, + m_is_user, + m_subp, + dist, + create_kwargs, + is_snappy, + expected, + mocker, + ): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=is_snappy + ) dist.create_user(name=USER, **create_kwargs) assert m_subp.call_args_list == expected + @pytest.mark.parametrize( + "shadow_content,distro_name,is_snappy,expected_logs", + ( + pytest.param( + {"/etc/shadow": f"dnsmasq:!:\n{USER}:!:"}, + "ubuntu", + False, + [ + "Not unlocking blank password for existing user " + "foo_user. 'lock_passwd: false' present in user-data " + "but no existing password set and no " + "'plain_text_passwd'/'hashed_passwd' provided in " + "user-data" + ], + id="no_unlock_on_locked_empty_user_passwd", + ), + pytest.param( + {"/var/lib/extrausers/shadow": f"dnsmasq::\n{USER}:!:"}, + "ubuntu", + True, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_in_snappy_on_locked_empty_user_passwd_in_extrausers", + ), + pytest.param( + {"/etc/shadow": f"dnsmasq::\n{USER}::"}, + "alpine", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_user_passwd_alpine", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}::"}, + "dragonflybsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_user_passwd_dragonflybsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*:"}, + "dragonflybsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format1_empty_user_passwd_dragonflybsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*LOCKED*:"}, + "dragonflybsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format2_empty_user_passwd_dragonflybsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}::"}, + "freebsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_user_passwd_freebsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*:"}, + "freebsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format1_empty_user_passwd_freebsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*LOCKED*:"}, + "freebsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format2_empty_user_passwd_freebsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}::"}, + "netbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_format1_user_passwd_netbsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*************:"}, + "netbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_format2_user_passwd_netbsd", + ), + pytest.param( + { + "/etc/master.passwd": f"dnsmasq::\n{USER}:*LOCKED**************:" # noqa: E501 + }, + "netbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_empty_user_passwd_netbsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}::"}, + "openbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_empty_user_passwd_openbsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*:"}, + "openbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format1_empty_user_passwd_openbsd", + ), + pytest.param( + {"/etc/master.passwd": f"dnsmasq::\n{USER}:*************:"}, + "openbsd", + False, + ["Not unlocking blank password for existing user foo_user."], + id="no_unlock_on_locked_format2_empty_user_passwd_openbsd", + ), + ), + ) + def test_avoid_unlock_preexisting_user_empty_password( + self, + m_subp, + shadow_content, + distro_name, + is_snappy, + expected_logs, + caplog, + mocker, + tmpdir, + ): + dist = _get_distro(distro_name) + dist.shadow_fn = tmpdir.join(dist.shadow_fn).strpath + dist.shadow_extrausers_fn = tmpdir.join( + dist.shadow_extrausers_fn + ).strpath + + mocker.patch("cloudinit.distros.util.is_user", return_value=True) + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=is_snappy + ) + for filename, content in shadow_content.items(): + if dist.shadow_fn == tmpdir.join(filename).strpath: + shadow_file = Path(dist.shadow_fn) + shadow_file.parent.mkdir(parents=True, exist_ok=True) + elif dist.shadow_extrausers_fn == tmpdir.join(filename).strpath: + shadow_file = Path(dist.shadow_extrausers_fn) + shadow_file.parent.mkdir(parents=True, exist_ok=True) + else: + raise AssertionError( + f"Shadow file path {filename} not defined for distro" + f" {dist.name}" + ) + shadow_file.write_text(content) + unlock_passwd = mocker.patch.object(dist, "unlock_passwd") + dist.create_user(name=USER, lock_passwd=False) + for log in expected_logs: + assert log in caplog.text + unlock_passwd.assert_not_called() + assert m_subp.call_args_list == [] + + @pytest.mark.parametrize( + "create_kwargs,expected,expected_logs", + [ + pytest.param( + {"passwd": "$6$rounds=..."}, + [mock.call(["passwd", "-l", USER])], + [ + "'passwd' in user-data is ignored for existing user " + "foo_user" + ], + id="skip_passwd_set_on_existing_user", + ), + pytest.param( + {"plain_text_passwd": "clearfoo"}, + [ + _chpasswdmock(USER, "clearfoo"), + mock.call(["passwd", "-l", USER]), + ], + [], + id="set_plain_text_password_on_existing_user", + ), + pytest.param( + {"hashed_passwd": "$6$rounds=..."}, + [ + _chpasswdmock(USER, "$6$rounds=...", hashed=True), + mock.call(["passwd", "-l", USER]), + ], + [], + id="set_explicitly_hashed_password", + ), + ], + ) + @mock.patch("cloudinit.distros.util.is_user", return_value=True) + def test_create_passwd_existing_user( + self, + m_is_user, + m_subp, + create_kwargs, + expected, + expected_logs, + dist, + caplog, + tmpdir, + mocker, + ): + """When user exists, don't unlock on empty or locked passwords.""" + dist.create_user(name=USER, **create_kwargs) + for log in expected_logs: + assert log in caplog.text + assert m_subp.call_args_list == expected + @mock.patch("cloudinit.distros.util.is_group") - def test_group_added(self, m_is_group, m_subp, dist): + def test_group_added(self, m_is_group, m_subp, dist, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) m_is_group.return_value = False dist.create_user(USER, groups=["group1"]) expected = [ @@ -101,7 +464,24 @@ def test_group_added(self, m_is_group, m_subp, dist): assert m_subp.call_args_list == expected @mock.patch("cloudinit.distros.util.is_group") - def test_only_new_group_added(self, m_is_group, m_subp, dist): + def test_snappy_group_added(self, m_is_group, m_subp, dist, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + m_is_group.return_value = False + dist.create_user(USER, groups=["group1"]) + expected = [ + mock.call(["groupadd", "group1", "--extrausers"]), + _useradd2call([USER, "--extrausers", "--groups", "group1", "-m"]), + mock.call(["passwd", "-l", USER]), + ] + assert m_subp.call_args_list == expected + + @mock.patch("cloudinit.distros.util.is_group") + def test_only_new_group_added(self, m_is_group, m_subp, dist, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) ex_groups = ["existing_group"] groups = ["group1", ex_groups[0]] m_is_group.side_effect = lambda m: m in ex_groups @@ -113,11 +493,34 @@ def test_only_new_group_added(self, m_is_group, m_subp, dist): ] assert m_subp.call_args_list == expected + @mock.patch("cloudinit.distros.util.is_group") + def test_snappy_only_new_group_added( + self, m_is_group, m_subp, dist, mocker + ): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + ex_groups = ["existing_group"] + groups = ["group1", ex_groups[0]] + m_is_group.side_effect = lambda m: m in ex_groups + dist.create_user(USER, groups=groups) + expected = [ + mock.call(["groupadd", "group1", "--extrausers"]), + _useradd2call( + [USER, "--extrausers", "--groups", ",".join(groups), "-m"] + ), + mock.call(["passwd", "-l", USER]), + ] + assert m_subp.call_args_list == expected + @mock.patch("cloudinit.distros.util.is_group") def test_create_groups_with_whitespace_string( - self, m_is_group, m_subp, dist + self, m_is_group, m_subp, dist, mocker ): # groups supported as a comma delimeted string even with white space + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) m_is_group.return_value = False dist.create_user(USER, groups="group1, group2") expected = [ @@ -128,11 +531,34 @@ def test_create_groups_with_whitespace_string( ] assert m_subp.call_args_list == expected + @mock.patch("cloudinit.distros.util.is_group") + def test_snappy_create_groups_with_whitespace_string( + self, m_is_group, m_subp, dist, mocker + ): + # groups supported as a comma delimeted string even with white space + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + m_is_group.return_value = False + dist.create_user(USER, groups="group1, group2") + expected = [ + mock.call(["groupadd", "group1", "--extrausers"]), + mock.call(["groupadd", "group2", "--extrausers"]), + _useradd2call( + [USER, "--extrausers", "--groups", "group1,group2", "-m"] + ), + mock.call(["passwd", "-l", USER]), + ] + assert m_subp.call_args_list == expected + @mock.patch("cloudinit.distros.util.is_group", return_value=False) def test_create_groups_with_dict_deprecated( - self, m_is_group, m_subp, dist, caplog + self, m_is_group, m_subp, dist, caplog, mocker ): """users.groups supports a dict value, but emit deprecation log.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, groups={"group1": None, "group2": None}) expected = [ mock.call(["groupadd", "group1"]), @@ -157,8 +583,13 @@ def test_create_groups_with_dict_deprecated( assert "Use a comma-delimited" in caplog.records[0].message @mock.patch("cloudinit.distros.util.is_group", return_value=False) - def test_create_groups_with_list(self, m_is_group, m_subp, dist, caplog): + def test_create_groups_with_list( + self, m_is_group, m_subp, dist, caplog, mocker + ): """users.groups supports a list value.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, groups=["group1", "group2"]) expected = [ mock.call(["groupadd", "group1"]), @@ -170,7 +601,31 @@ def test_create_groups_with_list(self, m_is_group, m_subp, dist, caplog): assert "WARNING" not in caplog.text assert "DEPRECATED" not in caplog.text - def test_explicit_sudo_false(self, m_subp, dist, caplog): + @mock.patch("cloudinit.distros.util.is_group", return_value=False) + def test_snappy_create_groups_with_list( + self, m_is_group, m_subp, dist, caplog, mocker + ): + """users.groups supports a list value.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + dist.create_user(USER, groups=["group1", "group2"]) + expected = [ + mock.call(["groupadd", "group1", "--extrausers"]), + mock.call(["groupadd", "group2", "--extrausers"]), + _useradd2call( + [USER, "--extrausers", "--groups", "group1,group2", "-m"] + ), + mock.call(["passwd", "-l", USER]), + ] + assert m_subp.call_args_list == expected + assert "WARNING" not in caplog.text + assert "DEPRECATED" not in caplog.text + + def test_explicit_sudo_false(self, m_subp, dist, caplog, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, sudo=False) assert m_subp.call_args_list == [ _useradd2call([USER, "-m"]), @@ -191,7 +646,10 @@ def test_explicit_sudo_false(self, m_subp, dist, caplog): " in 27.2. Use 'null' instead." ) in caplog.text - def test_explicit_sudo_none(self, m_subp, dist, caplog): + def test_explicit_sudo_none(self, m_subp, dist, caplog, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, sudo=None) assert m_subp.call_args_list == [ _useradd2call([USER, "-m"]), @@ -200,11 +658,26 @@ def test_explicit_sudo_none(self, m_subp, dist, caplog): assert "WARNING" not in caplog.text assert "DEPRECATED" not in caplog.text + def test_snappy_explicit_sudo_none(self, m_subp, dist, caplog, mocker): + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + dist.create_user(USER, sudo=None) + assert m_subp.call_args_list == [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ] + assert "WARNING" not in caplog.text + assert "DEPRECATED" not in caplog.text + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_string( - self, m_setup_user_keys, m_subp, dist + self, m_setup_user_keys, m_subp, dist, mocker ): """ssh_authorized_keys allows string and calls setup_user_keys.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, ssh_authorized_keys="mykey") assert m_subp.call_args_list == [ _useradd2call([USER, "-m"]), @@ -212,11 +685,29 @@ def test_setup_ssh_authorized_keys_with_string( ] m_setup_user_keys.assert_called_once_with({"mykey"}, USER) + @mock.patch("cloudinit.ssh_util.setup_user_keys") + def test_snappy_setup_ssh_authorized_keys_with_string( + self, m_setup_user_keys, m_subp, dist, mocker + ): + """ssh_authorized_keys allows string and calls setup_user_keys.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + dist.create_user(USER, ssh_authorized_keys="mykey") + assert m_subp.call_args_list == [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ] + m_setup_user_keys.assert_called_once_with({"mykey"}, USER) + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_list( - self, m_setup_user_keys, m_subp, dist + self, m_setup_user_keys, m_subp, dist, mocker ): """ssh_authorized_keys allows lists and calls setup_user_keys.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=False + ) dist.create_user(USER, ssh_authorized_keys=["key1", "key2"]) assert m_subp.call_args_list == [ _useradd2call([USER, "-m"]), @@ -224,6 +715,21 @@ def test_setup_ssh_authorized_keys_with_list( ] m_setup_user_keys.assert_called_once_with({"key1", "key2"}, USER) + @mock.patch("cloudinit.ssh_util.setup_user_keys") + def test_snappy_setup_ssh_authorized_keys_with_list( + self, m_setup_user_keys, m_subp, dist, mocker + ): + """ssh_authorized_keys allows lists and calls setup_user_keys.""" + mocker.patch( + "cloudinit.distros.util.system_is_snappy", return_value=True + ) + dist.create_user(USER, ssh_authorized_keys=["key1", "key2"]) + assert m_subp.call_args_list == [ + _useradd2call([USER, "--extrausers", "-m"]), + mock.call(["passwd", "-l", USER]), + ] + m_setup_user_keys.assert_called_once_with({"key1", "key2"}, USER) + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_integer( self, m_setup_user_keys, m_subp, dist, caplog diff --git a/tests/unittests/distros/test_dragonflybsd.py b/tests/unittests/distros/test_dragonflybsd.py index 8a240ea5fa9..5419eeeafd4 100644 --- a/tests/unittests/distros/test_dragonflybsd.py +++ b/tests/unittests/distros/test_dragonflybsd.py @@ -1,8 +1,41 @@ # This file is part of cloud-init. See LICENSE file for license information. import cloudinit.util +from tests.unittests.distros import _get_distro from tests.unittests.helpers import mock +M_PATH = "cloudinit.distros." + + +class TestDragonFlyBSD: + @mock.patch(M_PATH + "subp.subp") + def test_add_user(self, m_subp): + distro = _get_distro("dragonflybsd") + assert True is distro.add_user("me2", uid=1234, default=False) + assert [ + mock.call( + [ + "pw", + "useradd", + "-n", + "me2", + "-u", + "1234", + "-d/home/me2", + "-m", + ], + logstring=["pw", "useradd", "-n", "me2", "-d/home/me2", "-m"], + ) + ] == m_subp.call_args_list + + def test_unlock_passwd(self, caplog): + distro = _get_distro("dragonflybsd") + distro.unlock_passwd("me2") + assert ( + "Dragonfly BSD/FreeBSD password lock is not reversible, " + "ignoring unlock for user me2" in caplog.text + ) + def test_find_dragonflybsd_part(): assert cloudinit.util.find_freebsd_part("/dev/vbd0s3") == "vbd0s3" diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py index c4c067ead71..50fb8e9ffc0 100644 --- a/tests/unittests/distros/test_freebsd.py +++ b/tests/unittests/distros/test_freebsd.py @@ -2,7 +2,6 @@ import os -from cloudinit.distros.freebsd import Distro, FreeBSDNetworking from cloudinit.util import find_freebsd_part, get_path_dev_freebsd from tests.unittests.distros import _get_distro from tests.unittests.helpers import CiTestCase, mock @@ -12,10 +11,9 @@ class TestFreeBSD: @mock.patch(M_PATH + "subp.subp") - def test_add_user(self, m_subp, mocker): - mocker.patch.object(Distro, "networking_cls", spec=FreeBSDNetworking) + def test_add_user(self, m_subp): distro = _get_distro("freebsd") - distro.add_user("me2", uid=1234, default=False) + assert True is distro.add_user("me2", uid=1234, default=False) assert [ mock.call( [ @@ -39,6 +37,14 @@ def test_add_user(self, m_subp, mocker): ) ] == m_subp.call_args_list + def test_unlock_passwd(self, caplog): + distro = _get_distro("freebsd") + distro.unlock_passwd("me2") + assert ( + "Dragonfly BSD/FreeBSD password lock is not reversible, " + "ignoring unlock for user me2" in caplog.text + ) + class TestDeviceLookUp(CiTestCase): @mock.patch("cloudinit.subp.subp") diff --git a/tests/unittests/distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py index 2abe5ef1441..c4cb9a55122 100644 --- a/tests/unittests/distros/test_netbsd.py +++ b/tests/unittests/distros/test_netbsd.py @@ -2,6 +2,8 @@ import pytest +from tests.unittests.distros import _get_distro + try: # Blowfish not available in < 3.7, so this has never worked. Ignore failure # to import with AttributeError. We need this module imported prior to @@ -10,6 +12,28 @@ except AttributeError: pass +M_PATH = "cloudinit.distros.netbsd." + + +class TestNetBSD: + @mock.patch(M_PATH + "subp.subp") + def test_add_user(self, m_subp): + distro = _get_distro("netbsd") + assert True is distro.add_user("me2", uid=1234, default=False) + assert [ + mock.call( + ["useradd", "-m", "me2"], logstring=["useradd", "-m", "me2"] + ) + ] == m_subp.call_args_list + + @mock.patch(M_PATH + "subp.subp") + def test_unlock_passwd(self, m_subp, caplog): + distro = _get_distro("netbsd") + distro.unlock_passwd("me2") + assert [ + mock.call(["usermod", "-C", "no", "me2"]) + ] == m_subp.call_args_list + @pytest.mark.parametrize("with_pkgin", (True, False)) @mock.patch("cloudinit.distros.netbsd.os") diff --git a/tests/unittests/distros/test_openbsd.py b/tests/unittests/distros/test_openbsd.py new file mode 100644 index 00000000000..2bab0d3bd14 --- /dev/null +++ b/tests/unittests/distros/test_openbsd.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.distros import _get_distro +from tests.unittests.helpers import mock + +M_PATH = "cloudinit.distros.openbsd." + + +class TestOpenBSD: + @mock.patch(M_PATH + "subp.subp") + def test_add_user(self, m_subp): + distro = _get_distro("openbsd") + assert True is distro.add_user("me2", uid=1234, default=False) + assert [ + mock.call( + ["useradd", "-m", "me2"], logstring=["useradd", "-m", "me2"] + ) + ] == m_subp.call_args_list + + def test_unlock_passwd(self, caplog): + distro = _get_distro("openbsd") + distro.unlock_passwd("me2") + assert ( + "OpenBSD password lock is not reversible, " + "ignoring unlock for user me2" in caplog.text + ) From 93f30bbfcb073fd8213c18c2e7eb7f857234fc8a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 29 Aug 2024 18:22:23 -0400 Subject: [PATCH 120/131] fix: properly handle blank lines in fstab (#5643) --- cloudinit/config/cc_mounts.py | 5 +++-- tests/unittests/config/test_cc_mounts.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 1d9f821bbd0..20e23296a4f 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -374,8 +374,9 @@ def parse_fstab() -> Tuple[List[str], Dict[str, str], List[str]]: fstab_removed.append(line) continue toks = line.split() - fstab_devs[toks[0]] = line - fstab_lines.append(line) + if toks: + fstab_devs[toks[0]] = line + fstab_lines.append(line) return fstab_lines, fstab_devs, fstab_removed diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index 7e85987b744..0e6d8379379 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -533,6 +533,7 @@ def test_fstab_mounts_combinations(self): "LABEL=keepme none ext4 defaults 0 0\n" "/dev/sda1 /a auto defaults,comment=cloudconfig 0 2\n" "LABEL=UEFI\n" + "\n" "/dev/sda2 /b auto defaults,comment=cloudconfig 0 2\n" ) with open(cc_mounts.FSTAB_PATH, "w") as fd: From 5ff1a4a2b5232a4a466f72c46c580957e7961969 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 29 Aug 2024 22:07:31 -0400 Subject: [PATCH 121/131] docs: Clarify v2 set-name behavior (#5639) --- doc/rtd/reference/network-config-format-v2.rst | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/rtd/reference/network-config-format-v2.rst b/doc/rtd/reference/network-config-format-v2.rst index 90a3ba73924..615122cf73c 100644 --- a/doc/rtd/reference/network-config-format-v2.rst +++ b/doc/rtd/reference/network-config-format-v2.rst @@ -151,14 +151,20 @@ Example: :: ``set-name: <(scalar)>`` ------------------------ -When matching on unique properties such as path or MAC, or with additional -assumptions such as "there will only ever be one wifi device", match rules -can be written so that they only match one device. Then this property can be +When matching on unique properties such as MAC, match rules +can be written so that they match only one device. Then this property can be used to give that device a more specific/desirable/nicer name than the default from udev’s ``ifnames``. Any additional device that satisfies the match rules will then fail to get renamed and keep the original kernel name (and dmesg will show an error). +While multiple properties can be used in a match, ``macaddress`` is +**required** for cloud-init to perform the rename. + +.. note:: + On a netplan-based system, cloud-init will perform the rename + independently and prior to netplan. + ``wakeonlan: <(bool)>`` ----------------------- From 10449cb12ac3f70f5243ea205d569c3929528ac7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 30 Aug 2024 11:30:20 -0600 Subject: [PATCH 122/131] Release 24.3 Bump the version in cloudinit/version.py to 24.3 and update ChangeLog. --- ChangeLog | 136 +++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 137 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 260c460df46..bd735d36125 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,139 @@ +24.3 + - docs: Clarify v2 set-name behavior (#5639) + - fix: properly handle blank lines in fstab (#5643) + - fix: cc_user_groups incorrectly assumes "useradd" never locks password + field (#5355) [dermotbradley] + - tests: assert cloud-init user-data cert is the only root cert (#5641) + - feat: add automation for ubuntu/* branches asserting quilt patches apply + (#5622) + - fix(sources/wsl): no error with empty .cloud-init dir (SC-1862) (#5633) + - feat(azure): add PPS support for azure-proxy-agent (#5601) + [Ksenija Stanojevic] + - fix(tests): use instance.clean/restart instead of clean --reboot (#5636) + - test: fix cmd/test_schema int test (#5629) + - test: fix test_honor_cloud_dir int test (#5627) + - docs: alphabetize dsname lookup table. update comment to create the csv + (#5624) + - docs: new datasources should update reference/ds_dsname_map (#5624) + - test: fix ca_certs int test (#5626) + - chore: update schema docs to use RST bold for config key names (#5562) + - fix(doc): italics around deprecation prefix, description bolds key names + (#5562) + - feat(doc): add env vars to debug config module doc builds (#5562) + - fix(doc): doc of nested objects under JSON schema items.oneOf (#5562) + - fix(doc): object type check if patternProperties or properties (#5562) + - doc(schema): schema descriptions should end with trailing stop (#5562) + - fix(wsl): Properly assemble multipart data (#5538) [Carlos Nihelton] + - feat: collect-logs improvements (#5619) + - tests: fix test_ca_certs.py for gcp (#5621) + - fix(nm): Ensure bond property name formatting matches schema definition + (#5383) [Curt Moore] + - Update behavior of base bond interface with NetworkManager (#5385) + [Curt Moore] + - ci: Drop Python 3.6 and 3.7 (#5607) + - chore(black): Bump version (#5607) + - chore(mypy): Fix failures on newer versions of mypy (#5607) + - chore(tox.ini): Simplify configuration, fix minor bugs (#5607) + - chore(mypy): Lint log module (#5607) + - fix(systemd): Correct location of installed drop-in files(#5615) + [Noah Meyerhans] + - fix(btrfs): Version parsing (#5618) + - docs: Remove unnecessary section, add feature flag page (#5617) + - docs: Drop Python 3.6 and 3.7 support (#5617) + - chore: explain other use of oauth (#5616) + - chore(actions): add doc label for any doc related subdir file matches + (#5602) + - doc: Add misc links, improve wording (#5595) + - doc(boot): Make first boot a dedicated page (#5595) + - doc: Describe all stages in a single process (#5595) + - chore: Deprecate old commands in help output (#5595) + - chore: add comment explaining the NetworkManager may-fail setting + (#5598) [Ani Sinha] + - Revert "fix(vmware): Set IPv6 to dhcp when there is no IPv6 addr + (#5471)" (#5596) [PengpengSun] + - fix: read_optional_seed to set network-config when present (#5593) + - feat(snap): avoid refresh on package_upgrade: true and refresh.hold + (#5426) + - fix: Fix tests which have outdated strings (#5585) + - fix: Fix ftp failures (#5585) + - doc: improve integration testing configuration instructions (#5556) + [Alec Warren] + - azure: check azure-proxy-agent status (#5138) [Ksenija Stanojevic] + - refactor: refactor and fix mypy in DataSourceIBMCloud.py (#5509) + [Alec Warren] + - fix: Update default LXD meta-data with user meta-data (#5584) + - chore: Fix log message in url_helper.py (#5583) + - fix: nocloud no fail when network-config absent (#5580) + - feat: Single process optimization (#5489) + - chore: Add helper, refactor utilities into separate module (#5573) + - refactor: update handle function of cc_mounts (#5498) + - fix: Integration tests (#5576) + - fix(NoCloudNet): Add network-config support (#5566) + - feat: Eliminate redundant configuration reads (#5536) + - fix(actions): correct typo in cloudinit/config/schemas/ match (#5570) + - fix: add host template for AOSC (#5557) [Yuanhang Sun] + - chore(debian): Remove vestigial postinst and preinst code (#5569) + - fix(actions): doc labeler needs all clause instead of default any (#5568) + - docs: Overhaul user data formats documentation (#5551) + - chore: Deprecate ENI as an input configuration format (#5561) + - doc: improve drop-in custom modules (#5548) + - doc(NoCloud): Categorize the different configuration types (#5521) + - doc(autoinstall): Remove incorrect statements, be more direct (#5545) + - chore: remove unneeded doc-lint tox env config (#5547) + - fix(doc-spelling): config spelling_word_list_filename (#5547) + - doc(modules): add section to wrap modules' doc (#5550) + - doc: Update docs on boothooks (#5546) + - fix: doc auto label to consider schema json changes as doc PRs (#5543) + - feat(schema): add chef_license schema enum (#5543) + - doc: add diagram with boot stages (#5539) + - docs: improve qemu command line (#5540) [Christian Ehrhardt] + - fix: auto label doc PRs (#5542) + - fix(wsl): Put back the "path" argument to wsl_path in ds-identify + (#5537) [Carlos Nihelton] + - test: fix test_kernel_command_line_match (#5529) + - test: fix no ds cache tests (#5529) + - fix(azurelinux): Change default usr_lib_exec path (#5526) [Minghe Ren] + - feat: Support URI sources in `write_files` module (#5505) + [Lucas Ritzdorf] + - add openeuler to distros in cc_spacewalk.py (#5530) [sxt1001] + - feat(wsl): Special handling Landscape client config tags (#5460) + [Carlos Nihelton] + - chore: Deprecate partially supported system config (#5515) + - chore: Improve detection logging for user clarity (#5515) + - fix(ds-identify): Detect nocloud when seedfrom url exists (#5515) + - refactor: logs.py add typing and small misc refactors (#5414) + - refactor: logs.py pathlib changes (#5414) + - refactor: replace verbosity with log levels in logs.py (#5414) + - feat: Add trace-level logger (#5414) + - chore(formatting): fix squashed commit test formatting (#5524) + - fix: Clean cache if no datasource fallback (#5499) + - Support setting mirrorlist in yum repository config (#5522) [Ani Sinha] + - doc(OFV): Document how to configure cloud-init (#5519) + - fix: Update DNS behavior for NetworkManager interfaces (#5496) + [Curt Moore] + - Fix configuration of DNS servers via OpenStack (#5384) [Curt Moore] + - test: Unconditionally skip test_multi_nic_hotplug_vpc (#5503) + - tests: revert expectation of exit 2 from cloud-init init --local (#5504) + - fix(test): Fix ip printer for non-lxd (#5488) + - feat(systemd): convert warning level message to deprecation (#5209) + - test: allow verify_clean_boot to ignore all or specific tracebacks + (#5209) + - test: Don't fail tests which call cloud-init as a command (#5209) + - feat(systemd): Warn user of unexpected run mode (#5209) + - fix: add schema rules for 'baseurl' and 'metalink' in yum repo config + (#5501) [Ani Sinha] + - Set MTU for bond parent interface (#5495) [Curt Moore] + - refactor: util.mounts to handle errors (#5490) + - refactor: util.get_proc_env to work with strs (#5490) + - typing: fix check_untyped_defs in cloudinit.util (#5490) + - test: Add missing assert to test_status.py (#5494) + - test: Ensure mkcert executable in ftp tests (#5493) + - test: pytestify and cleanup test_cc_mounts.py (#5459) + - fix(vmware): Set IPv6 to dhcp when there is no IPv6 addr (#5471) + [PengpengSun] + - fix(openbsd): fix mtu on newline in hostname files (#5412) [Tobias Urdin] + - feat(aosc): Add 'AOSC OS' support (#5310) [Yuanhang Sun] + 24.2 - test: Fix no default user in test_status.py (#5478) - fix: correct deprecated_version=22.2 for users.sudo diff --git a/cloudinit/version.py b/cloudinit/version.py index b6bc8227d66..2c92f35d59c 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "24.2" +__VERSION__ = "24.3" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ From 6963dcc7cae628bc9928839dd407507880508458 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 30 Aug 2024 18:22:51 -0600 Subject: [PATCH 123/131] fix(netops): fix ip addr flush command (#5651) Drop unnecessary environment variable. Fixes GH-5648 --- cloudinit/net/netops/iproute2.py | 3 +-- tests/unittests/net/test_init.py | 5 ----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/cloudinit/net/netops/iproute2.py b/cloudinit/net/netops/iproute2.py index 46633b6d5c0..e40e5dc9813 100644 --- a/cloudinit/net/netops/iproute2.py +++ b/cloudinit/net/netops/iproute2.py @@ -130,7 +130,6 @@ def add_addr( "dev", interface, ], - update_env={"LANG": "C"}, ) @staticmethod @@ -141,4 +140,4 @@ def del_addr(interface: str, address: str): @staticmethod def flush_addr(interface: str): - subp.subp(["ip", "flush", "dev", interface]) + subp.subp(["ip", "addr", "flush", "dev", interface]) diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 6fa5dc828d6..140161a1977 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -836,7 +836,6 @@ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): "dev", "eth0", ], - update_env={"LANG": "C"}, ), ] expected_teardown_calls = [ @@ -973,7 +972,6 @@ def test_ephemeral_ipv4_network_with_prefix(self, m_subp): "dev", "eth0", ], - update_env={"LANG": "C"}, ) ] ) @@ -992,7 +990,6 @@ def test_ephemeral_ipv4_network_with_prefix(self, m_subp): "dev", "eth0", ], - update_env={"LANG": "C"}, ) ] ) @@ -1023,7 +1020,6 @@ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): "dev", "eth0", ], - update_env={"LANG": "C"}, ), mock.call(["ip", "route", "show", "0.0.0.0/0"]), mock.call( @@ -1104,7 +1100,6 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): "dev", "eth0", ], - update_env={"LANG": "C"}, ), mock.call( [ From 92e380e855fd729d6657a0a331b40bcdd277c422 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 3 Sep 2024 09:50:42 -0600 Subject: [PATCH 124/131] test: add test coverage for iproute2 commands (#5651) --- tests/unittests/net/netops/test_iproute2.py | 203 ++++++++++++++++++++ 1 file changed, 203 insertions(+) create mode 100644 tests/unittests/net/netops/test_iproute2.py diff --git a/tests/unittests/net/netops/test_iproute2.py b/tests/unittests/net/netops/test_iproute2.py new file mode 100644 index 00000000000..e2b326c4b77 --- /dev/null +++ b/tests/unittests/net/netops/test_iproute2.py @@ -0,0 +1,203 @@ +from unittest import mock + +from cloudinit.net.netops import iproute2 +from cloudinit.subp import SubpResult + + +class TestOps: + @mock.patch.object(iproute2.subp, "subp") + def test_link_up(self, m_subp): + iproute2.Iproute2.link_up("eth0") + iproute2.Iproute2.link_up("eth0", "inet6") + assert m_subp.call_args_list == [ + mock.call(["ip", "link", "set", "dev", "eth0", "up"]), + mock.call( + ["ip", "-family", "inet6", "link", "set", "dev", "eth0", "up"] + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_link_down(self, m_subp): + iproute2.Iproute2.link_down("enp24s0") + iproute2.Iproute2.link_down("eno1", "inet6") + assert m_subp.call_args_list == [ + mock.call(["ip", "link", "set", "dev", "enp24s0", "down"]), + mock.call( + [ + "ip", + "-family", + "inet6", + "link", + "set", + "dev", + "eno1", + "down", + ] + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_link_rename(self, m_subp): + iproute2.Iproute2.link_rename("ens1", "ego1") + assert m_subp.call_args_list == [ + mock.call(["ip", "link", "set", "ens1", "name", "ego1"]) + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_add_route(self, m_subp): + iproute2.Iproute2.add_route("wlan0", "102.42.42.0/24") + iproute2.Iproute2.add_route( + "ens2", + "102.42.0.0/16", + gateway="192.168.2.254", + source_address="192.168.2.1", + ) + assert m_subp.call_args_list == [ + mock.call( + [ + "ip", + "-4", + "route", + "replace", + "102.42.42.0/24", + "dev", + "wlan0", + ] + ), + mock.call( + [ + "ip", + "-4", + "route", + "replace", + "102.42.0.0/16", + "via", + "192.168.2.254", + "dev", + "ens2", + "src", + "192.168.2.1", + ] + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_del_route(self, m_subp): + iproute2.Iproute2.del_route("wlan0", "102.42.42.0/24") + iproute2.Iproute2.del_route( + "ens2", + "102.42.0.0/16", + gateway="192.168.2.254", + source_address="192.168.2.1", + ) + assert m_subp.call_args_list == [ + mock.call( + ["ip", "-4", "route", "del", "102.42.42.0/24", "dev", "wlan0"] + ), + mock.call( + [ + "ip", + "-4", + "route", + "del", + "102.42.0.0/16", + "via", + "192.168.2.254", + "dev", + "ens2", + "src", + "192.168.2.1", + ] + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_append_route(self, m_subp): + iproute2.Iproute2.append_route("wlan0", "102.42.42.0/24", "10.0.4.254") + assert m_subp.call_args_list == [ + mock.call( + [ + "ip", + "-4", + "route", + "append", + "102.42.42.0/24", + "via", + "10.0.4.254", + "dev", + "wlan0", + ] + ) + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_add_addr(self, m_subp): + iproute2.Iproute2.add_addr("wlan0", "10.0.17.0", "10.0.17.255") + assert m_subp.call_args_list == [ + mock.call( + [ + "ip", + "-family", + "inet", + "addr", + "add", + "10.0.17.0", + "broadcast", + "10.0.17.255", + "dev", + "wlan0", + ], + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_del_addr(self, m_subp): + iproute2.Iproute2.del_addr("eth0", "10.0.8.3") + assert m_subp.call_args_list == [ + mock.call( + [ + "ip", + "-family", + "inet", + "addr", + "del", + "10.0.8.3", + "dev", + "eth0", + ], + ), + ] + + @mock.patch.object(iproute2.subp, "subp") + def test_flush_addr(self, m_subp): + iproute2.Iproute2.flush_addr("eth0") + assert m_subp.call_args_list == [ + mock.call( + ["ip", "addr", "flush", "dev", "eth0"], + ), + ] + + @mock.patch.object( + iproute2.subp, + "subp", + return_value=SubpResult( + "default via 192.168.0.1 dev enp2s0 proto dhcp src 192.168.0.104" + " metric 100", + "", + ), + ) + def test_add_default_route(self, m_subp): + assert iproute2.Iproute2.get_default_route() == ( + "default via 192.168.0.1 dev enp2s0 proto dhcp src" + " 192.168.0.104 metric 100" + ) + assert m_subp.call_args_list == [ + mock.call( + [ + "ip", + "route", + "show", + "0.0.0.0/0", + ], + ), + ] From 337c6514b1608e59ecf3bf3aa5b59b0504613ee2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 4 Sep 2024 12:53:39 -0600 Subject: [PATCH 125/131] Release 24.3.1 (#5375) Bump the version in cloudinit/version.py to 24.3.1 and update ChangeLog. --- ChangeLog | 4 ++++ cloudinit/version.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index bd735d36125..4953084c093 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +24.3.1 + - test: add test coverage for iproute2 commands (#5651) + - fix(netops): fix ip addr flush command (#5651) (GH: 5648) + 24.3 - docs: Clarify v2 set-name behavior (#5639) - fix: properly handle blank lines in fstab (#5643) diff --git a/cloudinit/version.py b/cloudinit/version.py index 2c92f35d59c..03fbfc365f5 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "24.3" +__VERSION__ = "24.3.1" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ From b0001254160cf4d4cfae6ca79a32c6bbb68fe441 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 5 Sep 2024 12:20:44 -0600 Subject: [PATCH 126/131] fix(no-nocloud-network.patch): avoid reading network-config from read_seeded (#5670) --- debian/patches/no-nocloud-network.patch | 97 +++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/debian/patches/no-nocloud-network.patch b/debian/patches/no-nocloud-network.patch index 5d3269a4ed4..10afabcd12b 100644 --- a/debian/patches/no-nocloud-network.patch +++ b/debian/patches/no-nocloud-network.patch @@ -24,3 +24,100 @@ Last-Update: 2024-08-02 found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -1059,7 +1059,6 @@ + ud_url = base.replace("%s", "user-data" + ext) + vd_url = base.replace("%s", "vendor-data" + ext) + md_url = base.replace("%s", "meta-data" + ext) +- network_url = base.replace("%s", "network-config" + ext) + else: + if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: + if base[-1] != "/" and parse.urlparse(base).query == "": +@@ -1068,17 +1067,7 @@ + ud_url = "%s%s%s" % (base, "user-data", ext) + vd_url = "%s%s%s" % (base, "vendor-data", ext) + md_url = "%s%s%s" % (base, "meta-data", ext) +- network_url = "%s%s%s" % (base, "network-config", ext) + network = None +- try: +- network_resp = url_helper.read_file_or_url( +- network_url, timeout=timeout, retries=retries +- ) +- except url_helper.UrlError as e: +- LOG.debug("No network config provided: %s", e) +- else: +- if network_resp.ok(): +- network = load_yaml(network_resp.contents) + md_resp = url_helper.read_file_or_url( + md_url, timeout=timeout, retries=retries + ) +--- a/tests/unittests/test_util.py ++++ b/tests/unittests/test_util.py +@@ -2476,7 +2476,7 @@ + assert found_md == {"key1": "val1"} + assert found_ud == ud + assert found_vd == vd +- assert found_network == {"test": "true"} ++ assert found_network is None + + @pytest.mark.parametrize( + "base, feature_flag, req_urls", +@@ -2485,7 +2485,6 @@ + "http://10.0.0.1/%s?qs=1", + True, + [ +- "http://10.0.0.1/network-config?qs=1", + "http://10.0.0.1/meta-data?qs=1", + "http://10.0.0.1/user-data?qs=1", + "http://10.0.0.1/vendor-data?qs=1", +@@ -2496,7 +2495,6 @@ + "https://10.0.0.1:8008/", + True, + [ +- "https://10.0.0.1:8008/network-config", + "https://10.0.0.1:8008/meta-data", + "https://10.0.0.1:8008/user-data", + "https://10.0.0.1:8008/vendor-data", +@@ -2507,7 +2505,6 @@ + "https://10.0.0.1:8008", + True, + [ +- "https://10.0.0.1:8008/network-config", + "https://10.0.0.1:8008/meta-data", + "https://10.0.0.1:8008/user-data", + "https://10.0.0.1:8008/vendor-data", +@@ -2518,7 +2515,6 @@ + "https://10.0.0.1:8008", + False, + [ +- "https://10.0.0.1:8008network-config", + "https://10.0.0.1:8008meta-data", + "https://10.0.0.1:8008user-data", + "https://10.0.0.1:8008vendor-data", +@@ -2529,7 +2525,6 @@ + "https://10.0.0.1:8008?qs=", + True, + [ +- "https://10.0.0.1:8008?qs=network-config", + "https://10.0.0.1:8008?qs=meta-data", + "https://10.0.0.1:8008?qs=user-data", + "https://10.0.0.1:8008?qs=vendor-data", +@@ -2568,7 +2563,7 @@ + # user-data, vendor-data read raw. It could be scripts or other format + assert found_ud == "/user-data: 1" + assert found_vd == "/vendor-data: 1" +- assert found_network == {"/network-config": 1} ++ assert found_network is None + assert [ + mock.call(req_url, timeout=5, retries=10) for req_url in req_urls + ] == m_read.call_args_list +@@ -2598,7 +2593,7 @@ + self.assertEqual(found_md, {"key1": "val1"}) + self.assertEqual(found_ud, ud) + self.assertEqual(found_vd, vd) +- self.assertEqual(found_network, {"test": "true"}) ++ self.assertIsNone(found_network) + + + class TestEncode(helpers.TestCase): From 6915279ea9cfd6a612a397014cc4a261ffd680b7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 6 Sep 2024 12:17:57 -0600 Subject: [PATCH 127/131] refresh patches against 24.3.1 patches: debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch debian/patches/drop-unsupported-systemd-condition-environment.patch debian/patches/netplan99-cannot-use-default.patch debian/patches/no-nocloud-network.patch debian/patches/no-single-process.patch debian/patches/revert-551f560d-cloud-config-after-snap-seeding.patch --- ...retain-file-argument-as-main-cmd-arg.patch | 2 +- ...ported-systemd-condition-environment.patch | 10 ++++---- .../netplan99-cannot-use-default.patch | 2 +- debian/patches/no-nocloud-network.patch | 24 +++++++++---------- debian/patches/no-single-process.patch | 8 +++---- ...560d-cloud-config-after-snap-seeding.patch | 2 +- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch index 62ae666bec4..b86c2664e06 100644 --- a/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch +++ b/debian/patches/cli-retain-file-argument-as-main-cmd-arg.patch @@ -21,7 +21,7 @@ Last-Update: 2024-04-30 Date: Fri, 6 Sep 2024 13:50:39 -0600 Subject: [PATCH 128/131] refresh no-nocloud-network.patch tests against snapshot --- debian/patches/no-nocloud-network.patch | 33 ++++++++++++++++--------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/debian/patches/no-nocloud-network.patch b/debian/patches/no-nocloud-network.patch index d235473292b..69c0308d46b 100644 --- a/debian/patches/no-nocloud-network.patch +++ b/debian/patches/no-nocloud-network.patch @@ -7,7 +7,7 @@ Last-Update: 2024-08-02 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py -@@ -190,7 +190,7 @@ class DataSourceNoCloud(sources.DataSour +@@ -190,7 +190,7 @@ # This could throw errors, but the user told us to do it # so if errors are raised, let them raise @@ -16,7 +16,7 @@ Last-Update: 2024-08-02 LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed -@@ -199,7 +199,6 @@ class DataSourceNoCloud(sources.DataSour +@@ -199,7 +199,6 @@ ) mydata["user-data"] = ud mydata["vendor-data"] = vd @@ -26,7 +26,7 @@ Last-Update: 2024-08-02 # Now that we have exhausted any other places merge in the defaults --- a/cloudinit/util.py +++ b/cloudinit/util.py -@@ -1059,7 +1059,6 @@ def read_seeded(base="", ext="", timeout +@@ -1059,7 +1059,6 @@ ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) md_url = base.replace("%s", "meta-data" + ext) @@ -34,7 +34,7 @@ Last-Update: 2024-08-02 else: if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: if base[-1] != "/" and parse.urlparse(base).query == "": -@@ -1068,17 +1067,7 @@ def read_seeded(base="", ext="", timeout +@@ -1068,17 +1067,7 @@ ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) @@ -54,7 +54,16 @@ Last-Update: 2024-08-02 ) --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py -@@ -2536,7 +2536,7 @@ class TestReadSeeded: +@@ -2481,7 +2481,7 @@ + { + "meta-data": {"md": "val"}, + "user-data": b"ud", +- "network-config": {"net": "cfg"}, ++ "network-config": None, + "vendor-data": None, + }, + True, +@@ -2536,7 +2536,7 @@ assert found_md == {"key1": "val1"} assert found_ud == ud assert found_vd == vd @@ -63,7 +72,7 @@ Last-Update: 2024-08-02 @pytest.mark.parametrize( "base, feature_flag, req_urls", -@@ -2545,7 +2545,6 @@ class TestReadSeeded: +@@ -2545,7 +2545,6 @@ "http://10.0.0.1/%s?qs=1", True, [ @@ -71,7 +80,7 @@ Last-Update: 2024-08-02 "http://10.0.0.1/meta-data?qs=1", "http://10.0.0.1/user-data?qs=1", "http://10.0.0.1/vendor-data?qs=1", -@@ -2556,7 +2555,6 @@ class TestReadSeeded: +@@ -2556,7 +2555,6 @@ "https://10.0.0.1:8008/", True, [ @@ -79,7 +88,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", -@@ -2567,7 +2565,6 @@ class TestReadSeeded: +@@ -2567,7 +2565,6 @@ "https://10.0.0.1:8008", True, [ @@ -87,7 +96,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", -@@ -2578,7 +2575,6 @@ class TestReadSeeded: +@@ -2578,7 +2575,6 @@ "https://10.0.0.1:8008", False, [ @@ -95,7 +104,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008meta-data", "https://10.0.0.1:8008user-data", "https://10.0.0.1:8008vendor-data", -@@ -2589,7 +2585,6 @@ class TestReadSeeded: +@@ -2589,7 +2585,6 @@ "https://10.0.0.1:8008?qs=", True, [ @@ -103,7 +112,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008?qs=meta-data", "https://10.0.0.1:8008?qs=user-data", "https://10.0.0.1:8008?qs=vendor-data", -@@ -2628,7 +2623,7 @@ class TestReadSeeded: +@@ -2628,7 +2623,7 @@ # user-data, vendor-data read raw. It could be scripts or other format assert found_ud == "/user-data: 1" assert found_vd == "/vendor-data: 1" @@ -112,7 +121,7 @@ Last-Update: 2024-08-02 assert [ mock.call(req_url, timeout=5, retries=10) for req_url in req_urls ] == m_read.call_args_list -@@ -2658,7 +2653,7 @@ class TestReadSeededWithoutVendorData(he +@@ -2658,7 +2653,7 @@ self.assertEqual(found_md, {"key1": "val1"}) self.assertEqual(found_ud, ud) self.assertEqual(found_vd, vd) From 18a645e1128cbb7e06f090dca84924c4c5c5bb78 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 6 Sep 2024 13:45:54 -0600 Subject: [PATCH 129/131] fix(no-single-process.patch): revert references to cloud-init-network Revert remaning functional references to cloud-init-network service which will not exist on stable releases. --- debian/patches/no-single-process.patch | 76 ++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 5 deletions(-) diff --git a/debian/patches/no-single-process.patch b/debian/patches/no-single-process.patch index 317b6584513..8a1d78387b0 100644 --- a/debian/patches/no-single-process.patch +++ b/debian/patches/no-single-process.patch @@ -6,7 +6,7 @@ Last-Update: 2024-08-02 --- a/systemd/cloud-config.service.tmpl +++ b/systemd/cloud-config.service.tmpl -@@ -9,14 +9,7 @@ ConditionKernelCommandLine=!cloud-init=d +@@ -10,14 +10,7 @@ [Service] Type=oneshot @@ -24,7 +24,7 @@ Last-Update: 2024-08-02 --- a/systemd/cloud-final.service.tmpl +++ b/systemd/cloud-final.service.tmpl -@@ -14,16 +14,10 @@ ConditionKernelCommandLine=!cloud-init=d +@@ -15,16 +15,10 @@ [Service] Type=oneshot @@ -45,7 +45,7 @@ Last-Update: 2024-08-02 ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \ --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl -@@ -7,6 +7,7 @@ DefaultDependencies=no +@@ -7,6 +7,7 @@ {% endif %} Wants=network-pre.target After=hv_kvp_daemon.service @@ -53,7 +53,7 @@ Last-Update: 2024-08-02 {% if variant in ["almalinux", "cloudlinux", "rhel"] %} Requires=dbus.socket After=dbus.socket -@@ -36,14 +37,7 @@ ExecStartPre=/bin/mkdir -p /run/cloud-in +@@ -37,14 +38,7 @@ ExecStartPre=/sbin/restorecon /run/cloud-init ExecStartPre=/usr/bin/touch /run/cloud-init/enabled {% endif %} @@ -193,7 +193,7 @@ Last-Update: 2024-08-02 -WantedBy=cloud-init.target --- /dev/null +++ b/systemd/cloud-init.service.tmpl -@@ -0,0 +1,56 @@ +@@ -0,0 +1,57 @@ +## template:jinja +[Unit] +# https://cloudinit.readthedocs.io/en/latest/explanation/boot.html @@ -238,6 +238,7 @@ Last-Update: 2024-08-02 +{% endif %} +ConditionPathExists=!/etc/cloud/cloud-init.disabled +ConditionKernelCommandLine=!cloud-init=disabled ++ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled + +[Service] +Type=oneshot @@ -250,3 +251,68 @@ Last-Update: 2024-08-02 + +[Install] +WantedBy=cloud-init.target +--- a/cloudinit/cmd/status.py ++++ b/cloudinit/cmd/status.py +@@ -318,9 +318,8 @@ + for service in [ + "cloud-final.service", + "cloud-config.service", +- "cloud-init-network.service", ++ "cloud-init.service", + "cloud-init-local.service", +- "cloud-init-main.service", + ]: + try: + stdout = query_systemctl( +--- a/cloudinit/config/cc_mounts.py ++++ b/cloudinit/config/cc_mounts.py +@@ -525,7 +525,7 @@ + # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno + uses_systemd = cloud.distro.uses_systemd() + default_mount_options = ( +- "defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev" ++ "defaults,nofail,x-systemd.after=cloud-init.service,_netdev" + if uses_systemd + else "defaults,nobootwait" + ) +--- a/cloudinit/config/schemas/schema-cloud-config-v1.json ++++ b/cloudinit/config/schemas/schema-cloud-config-v1.json +@@ -2022,12 +2022,12 @@ + }, + "mount_default_fields": { + "type": "array", +- "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev``.", ++ "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.after=cloud-init.service,_netdev``.", + "default": [ + null, + null, + "auto", +- "defaults,nofail,x-systemd.after=cloud-init-network.service", ++ "defaults,nofail,x-systemd.after=cloud-init.service", + "0", + "2" + ], +--- a/systemd/cloud-config.target ++++ b/systemd/cloud-config.target +@@ -14,5 +14,5 @@ + + [Unit] + Description=Cloud-config availability +-Wants=cloud-init-local.service cloud-init-network.service +-After=cloud-init-local.service cloud-init-network.service ++Wants=cloud-init-local.service cloud-init.service ++After=cloud-init-local.service cloud-init.service +--- a/tests/unittests/config/test_cc_mounts.py ++++ b/tests/unittests/config/test_cc_mounts.py +@@ -566,9 +566,9 @@ + LABEL=keepme none ext4 defaults 0 0 + LABEL=UEFI + /dev/sda4 /mnt2 auto nofail,comment=cloudconfig 1 2 +- /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 ++ /dev/sda5 /mnt3 auto defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sda1 /mnt xfs auto,comment=cloudconfig 0 2 +- /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init-network.service,_netdev,comment=cloudconfig 0 2 ++ /dev/sda3 /mnt4 btrfs defaults,nofail,x-systemd.after=cloud-init.service,_netdev,comment=cloudconfig 0 2 + /dev/sdb1 none swap sw,comment=cloudconfig 0 0 + """ # noqa: E501 + ).strip() From bd7f0ee222a09b9dd3e9e89212013e0200cf6f09 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 6 Sep 2024 12:18:16 -0600 Subject: [PATCH 130/131] update changelog (new upstream snapshot) --- debian/changelog | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 0d7a323ccf8..646e41fbe56 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium +cloud-init (24.3.1-0ubuntu0~20.04.1) UNRELEASED; urgency=medium * d/p/no-nocloud-network.patch: Remove nocloud network feature * d/p/no-single-process.patch: Remove single process optimization @@ -7,8 +7,12 @@ cloud-init (24.2-0ubuntu1~20.04.2) UNRELEASED; urgency=medium - d/p/cli-retain-file-argument-as-main-cmd-arg.patch - d/p/drop-unsupported-systemd-condition-environment.patch - d/p/revert-551f560d-cloud-config-after-snap-seeding.patch + - d/p/netplan99-cannot-use-default.patch + * Upstream snapshot based on 24.3.1. (LP: #2079224). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/24.3.1/ChangeLog - -- James Falcon Tue, 06 Aug 2024 09:04:37 -0500 + -- Chad Smith Fri, 06 Sep 2024 12:18:16 -0600 cloud-init (24.2-0ubuntu1~20.04.1) focal; urgency=medium From 29ed3835650aa6ab100759a006c5cc2c7b4a73ea Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 6 Sep 2024 12:18:21 -0600 Subject: [PATCH 131/131] releasing cloud-init version 24.3.1-0ubuntu0~20.04.1 --- debian/changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 646e41fbe56..7449a3375d4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -cloud-init (24.3.1-0ubuntu0~20.04.1) UNRELEASED; urgency=medium +cloud-init (24.3.1-0ubuntu0~20.04.1) focal; urgency=medium * d/p/no-nocloud-network.patch: Remove nocloud network feature * d/p/no-single-process.patch: Remove single process optimization @@ -12,7 +12,7 @@ cloud-init (24.3.1-0ubuntu0~20.04.1) UNRELEASED; urgency=medium List of changes from upstream can be found at https://raw.githubusercontent.com/canonical/cloud-init/24.3.1/ChangeLog - -- Chad Smith Fri, 06 Sep 2024 12:18:16 -0600 + -- Chad Smith Fri, 06 Sep 2024 12:18:20 -0600 cloud-init (24.2-0ubuntu1~20.04.1) focal; urgency=medium