From 0f30e723ef52f4cce31afa15e777ab488471c6dc Mon Sep 17 00:00:00 2001 From: Jeff Kala <48843785+jeffkala@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:25:39 -0500 Subject: [PATCH] 1.2.0 release (#144) * Updated lib mapping docs (#113) * Updated lib mapping docs * Updated per review suggestions * Pinned mistune for m2r2 (#114) * Switch to poetry-core * Update banner parsing for EOS (#121) Update banner parsing for EOS * Fixes encrypt type7 (#122) * fixes encrypt type7 * Document support on duplicate lines (#128) * Few updates to PR of 125 (#129) * initial pass at type hinting and getting mypy to run * update tasks and ci to do mypy as well * rm unused import * fix contributing end-line number * fix contributing end-line number * Add py.typed file to signify availability of type hints to third parties. * Updates to type hinting. * Add EOS _build_banner type hints. * Address feedback. * Linting. * Remove .idea from commit and ignore in .gitignore. * Remove remnants of merge conflict. * PR feedback. * PR comments. * Ignore pylint errors for abstract-method. * Black. * add examples and better exception messaging Co-authored-by: Leo Kirchner * Adding nxos_ssh to Napalm mapper (#138) * Update CODEOWNERS (#134) adding codeowner * prep for 1.2.0 release (#143) * prep for 1.2.0 release * Update CHANGELOG.md Co-authored-by: Jeff Kala <48843785+jeffkala@users.noreply.github.com> Co-authored-by: Jeff Kala <48843785+jeffkala@users.noreply.github.com> Co-authored-by: Adam Byczkowski <38091261+qduk@users.noreply.github.com> Co-authored-by: Fabian Affolter Co-authored-by: Ken Celenza Co-authored-by: Dr. X Co-authored-by: Leo Kirchner Co-authored-by: Joe Wesch <10467633+joewesch@users.noreply.github.com> Co-authored-by: Andrew Bates --- .github/CODEOWNERS | 2 +- .github/workflows/ci.yml | 14 + .gitignore | 78 +--- CHANGELOG.md | 23 +- README.md | 7 +- docs/requirements.txt | 1 + docs/source/conf.py | 32 +- docs/source/contributing/index.rst | 2 +- docs/source/netutils/configs/index.rst | 22 ++ .../lib_mapping/ANSIBLE_reverse_table.rst | 78 ++++ .../netutils/lib_mapping/ANSIBLE_table.rst | 78 ++++ .../lib_mapping/NAPALM_reverse_table.rst | 42 ++ .../netutils/lib_mapping/NAPALM_table.rst | 45 +++ .../NTCTEMPLATES_reverse_table.rst | 339 ++++++++++++++++ .../lib_mapping/NTCTEMPLATES_table.rst | 339 ++++++++++++++++ .../lib_mapping/PYATS_reverse_table.rst | 36 ++ .../netutils/lib_mapping/PYATS_table.rst | 39 ++ .../lib_mapping/PYNTC_reverse_table.rst | 27 ++ .../netutils/lib_mapping/PYNTC_table.rst | 27 ++ .../lib_mapping/SCRAPLI_reverse_table.rst | 21 + .../netutils/lib_mapping/SCRAPLI_table.rst | 21 + docs/source/netutils/lib_mapping/index.rst | 108 ++---- docs/source/table_template.j2 | 9 + netutils/__init__.py | 2 +- netutils/asn.py | 10 +- netutils/bandwidth.py | 39 +- netutils/banner.py | 19 +- netutils/config/clean.py | 18 +- netutils/config/compliance.py | 111 +++--- netutils/config/parser.py | 362 +++++++++++------- netutils/dns.py | 8 +- netutils/interface.py | 152 +++++--- netutils/ip.py | 116 +++--- netutils/lib_mapper.py | 4 +- netutils/mac.py | 42 +- netutils/password.py | 198 +++++----- netutils/ping.py | 10 +- netutils/protocol_mapper.py | 6 +- netutils/py.typed | 0 netutils/route.py | 13 +- netutils/time.py | 21 +- netutils/utils.py | 5 +- netutils/vlan.py | 44 ++- poetry.lock | 46 ++- pyproject.toml | 31 +- tasks.py | 14 + .../arista_eos/eos_bad_banner_backup.txt | 7 + .../arista_eos/eos_bad_banner_feature.py | 3 + .../arista_eos/eos_bad_banner_intended.txt | 7 + .../arista_eos/eos_bad_banner_received.json | 12 + .../arista_eos/eos_basic_backup.txt | 14 + .../arista_eos/eos_basic_feature.py | 1 + .../arista_eos/eos_basic_intended.txt | 14 + .../arista_eos/eos_basic_received.json | 12 +- .../parser/arista_eos/eos_full_received.py | 5 + .../parser/arista_eos/eos_full_sent.txt | 14 + tests/unit/test_password.py | 12 +- 57 files changed, 2080 insertions(+), 682 deletions(-) create mode 100755 docs/source/netutils/lib_mapping/ANSIBLE_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/ANSIBLE_table.rst create mode 100755 docs/source/netutils/lib_mapping/NAPALM_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/NAPALM_table.rst create mode 100755 docs/source/netutils/lib_mapping/NTCTEMPLATES_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/NTCTEMPLATES_table.rst create mode 100755 docs/source/netutils/lib_mapping/PYATS_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/PYATS_table.rst create mode 100755 docs/source/netutils/lib_mapping/PYNTC_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/PYNTC_table.rst create mode 100755 docs/source/netutils/lib_mapping/SCRAPLI_reverse_table.rst create mode 100755 docs/source/netutils/lib_mapping/SCRAPLI_table.rst create mode 100644 docs/source/table_template.j2 create mode 100644 netutils/py.typed create mode 100644 tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_backup.txt create mode 100644 tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_feature.py create mode 100644 tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_intended.txt create mode 100644 tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_received.json diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fc02243e..0bc8f89d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # Default owner(s) of all files in this repository -* @itdependsnetworks @jeffkala @qduk +* @itdependsnetworks @jeffkala @qduk @abates diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88d06660..021f53d0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,6 +32,19 @@ jobs: run: "poetry run invoke bandit" needs: - "black" + mypy: + runs-on: "ubuntu-20.04" + env: + INVOKE_LOCAL: "True" + steps: + - name: "Check out repository code" + uses: "actions/checkout@v2" + - name: "Setup environment" + uses: "networktocode/gh-action-setup-poetry-environment@v2" + - name: "Type-Hints: mypy" + run: "poetry run invoke mypy" + needs: + - "black" pydocstyle: runs-on: "ubuntu-20.04" env: @@ -103,6 +116,7 @@ jobs: PYTHON_VER=${{ env.PYTHON_VER }} needs: - "bandit" + - "mypy" - "pydocstyle" - "flake8" - "yamllint" diff --git a/.gitignore b/.gitignore index f539d853..68137b43 100644 --- a/.gitignore +++ b/.gitignore @@ -192,48 +192,11 @@ $RECYCLE.BIN/ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 -# User-specific stuff -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/**/usage.statistics.xml -.idea/**/dictionaries -.idea/**/shelf - -# Generated files -.idea/**/contentModel.xml - -# Sensitive or high-churn files -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml -.idea/**/dbnavigator.xml - -# Gradle -.idea/**/gradle.xml -.idea/**/libraries - -# Gradle and Maven with auto-import -# When using Gradle or Maven with auto-import, you should exclude module files, -# since they will be recreated, and may cause churn. Uncomment if using -# auto-import. -# .idea/artifacts -# .idea/compiler.xml -# .idea/jarRepositories.xml -# .idea/modules.xml -# .idea/*.iml -# .idea/modules -# *.iml -# *.ipr +.idea/ # CMake cmake-build-*/ -# Mongo Explorer plugin -.idea/**/mongoSettings.xml - # File-based project format *.iws @@ -246,51 +209,12 @@ out/ # JIRA plugin atlassian-ide-plugin.xml -# Cursive Clojure plugin -.idea/replstate.xml - # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties -# Editor-based Rest Client -.idea/httpRequests - -# Android studio 3.1+ serialized cache file -.idea/caches/build_file_checksums.ser - -### PyCharm Patch ### -# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 - -# *.iml -# modules.xml -# .idea/misc.xml -# *.ipr - -# Sonarlint plugin -# https://plugins.jetbrains.com/plugin/7973-sonarlint -.idea/**/sonarlint/ - -# SonarQube Plugin -# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin -.idea/**/sonarIssues.xml - -# Markdown Navigator plugin -# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced -.idea/**/markdown-navigator.xml -.idea/**/markdown-navigator-enh.xml -.idea/**/markdown-navigator/ - -# Cache file creation bug -# See https://youtrack.jetbrains.com/issue/JBR-2257 -.idea/$CACHE_FILE$ - -# CodeStream plugin -# https://plugins.jetbrains.com/plugin/12206-codestream -.idea/codestream.xml - ### vscode ### .vscode/* *.code-workspace diff --git a/CHANGELOG.md b/CHANGELOG.md index 264aa3e1..56903bfb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## v1.2.0 - 2022-08 + +### Added + +- #128 Documentation for Nokia SROS duplicate lines/duplicate line detection +- #138 Added nxos_ssh to Napalm mapper + +### Changed + +- #113 Updated lib mapping docs +- #115 Switched build backend to poetry-core +- #121 Update banner parsing for EOS +- #129 Add type hints to the whole project and mypy testing setup and CI +- #134 Updated CODEOWNERS + +### Fixed + +- #122 Fixed encrypt type7 + ## v1.1.0 - 2022-04 ### Added @@ -23,7 +42,7 @@ ### Added -- #69 Normalise banner demiliter for IOS to ^C & support parsing delimiter ^ +- #69 Normalise banner delimiter for IOS to ^C & support parsing delimiter ^ ### Fixed @@ -113,7 +132,7 @@ ### Fixed -- Enable docsting tests +- Enable docstring tests - Fix docstring tests - Fix wording and links on README diff --git a/README.md b/README.md index 5688826a..b9c9e667 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ Functions are grouped with like functions, such as IP or MAC address based funct * Compliance - Provides the ability to compare two configurations to sanely understand the differences. * Parsing - Provides the ability to parse configuration for the minor differences that are there. * DNS - Provides the ability to work with DNS, such as validating that a FQDN is resolvable. -* Interface - Provides the ability to work with interface names, expanding, abbreviating, and spliting the names. +* Interface - Provides the ability to work with interface names, expanding, abbreviating, and splitting the names. * IP Address - Provides the ability to work with IP addresses, primarily exposing Python `ipaddress` functionality. * Library Mapper - Provides mappings in expected vendor names between Netmiko, NAPALM, pyntc, ntc-templates, pyats, and scrapli. * MAC Address - Provides the ability to work with MAC addresses such as validating or converting to integer. @@ -152,7 +152,7 @@ Except for unit tests, testing is only supported on Python 3.7. The project is packaged with a light development environment based on `docker-compose` to help with the local development of the project and to run tests within TravisCI. The project is following Network to Code software development guidelines and are leveraging the following: -- Black, Pylint, Bandit, flake8, and pydocstyle for Python linting and formatting. +- Black, Pylint, Bandit, Mypy, flake8, and pydocstyle for Python linting and formatting. - pytest, coverage, and unittest for unit tests. There are a number of things that are required in order to have a successful PR. @@ -160,7 +160,7 @@ There are a number of things that are required in order to have a successful PR. - All new functions must contain at least 1 example in their docstrings. - Docstrings must conform to the google docstring [convention](https://google.github.io/styleguide/pyguide.html#381-docstrings). - Unit test for newly added functions are required. -- If applicable, tests related to config parsing and compliuance must be added. +- If applicable, tests related to config parsing and compliance must be added. - Update the jinja2 filter (netutils.utils.jinja2_convenience_function) for any new functions (see below for details). - If you create a new file in the `netutils` folder, you must create a new folder and `index.rst` in the docs folder (see below for details). - Your PR must not introduce any required dependencies. You can introduce optional or development dependencies. @@ -219,6 +219,7 @@ Each command can be executed with `invoke `. Each command also has its black Run black to check that Python files adhere to its style standards. coverage Run the coverage report against pytest. flake8 Run flake8 to check that Python files adhere to its style standards. + mypy Run mypy to validate typing-hints. pylint Run pylint code analysis. pydocstyle Run pydocstyle to validate docstring formatting adheres to NTC defined standards. pytest Run pytest for the specified name and Python version. diff --git a/docs/requirements.txt b/docs/requirements.txt index 7b89a7ac..9b0a1811 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ m2r2==0.2.7 +mistune==0.8.4 Sphinx toml sphinx-rtd-theme \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index e8ba9e1e..8660f462 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,12 +13,16 @@ import os import sys import toml +from jinja2 import Environment, FileSystemLoader + sys.path.insert(0, os.path.abspath("../..")) +from netutils import lib_mapper # noqa: E402 + +DIR_PATH = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.abspath("sphinxext")) toml_dict = toml.load("../../pyproject.toml") - # -- Project information ----------------------------------------------------- project = toml_dict["tool"]["poetry"]["name"] @@ -63,3 +67,29 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] + + +def build_mapping_tables(app): + """Build the library mappings tables.""" + env = Environment(loader=FileSystemLoader(f"{DIR_PATH}")) + template_file = env.get_template("table_template.j2") + + LIST_OF_MAP_DICTS = [] + for attr in dir(lib_mapper): + if (attr.endswith("MAPPER_REVERSE") or attr.endswith("_MAPPER")) and not ( + attr.startswith("_") or attr.startswith("NETMIKO") or attr.startswith("MAIN") + ): + LIST_OF_MAP_DICTS.append(attr) + + for dict_name in LIST_OF_MAP_DICTS: + lib_name = dict_name.split("_")[0] + filename = f"{lib_name}_reverse" if "REVERSE" in dict_name else lib_name + headers = ["NORMALIZED", lib_name] if "REVERSE" in dict_name else [lib_name, "NORMALIZED"] + rendered_template = template_file.render(lib_names=headers, mappings=getattr(lib_mapper, dict_name)) + with open(f"{DIR_PATH}/netutils/lib_mapping/{filename}_table.rst", "w") as table_file: + table_file.write(rendered_template) + + +def setup(app): + """Call methods during builder initiated.""" + app.connect("builder-inited", build_mapping_tables) diff --git a/docs/source/contributing/index.rst b/docs/source/contributing/index.rst index 5c88e01d..3aec60de 100644 --- a/docs/source/contributing/index.rst +++ b/docs/source/contributing/index.rst @@ -4,5 +4,5 @@ Contributing .. mdinclude:: ../../../README.md :start-line: 148 - :end-line: 233 + :end-line: 234 diff --git a/docs/source/netutils/configs/index.rst b/docs/source/netutils/configs/index.rst index 8074fa07..3c49cc5b 100644 --- a/docs/source/netutils/configs/index.rst +++ b/docs/source/netutils/configs/index.rst @@ -25,3 +25,25 @@ F5 Parser Nokia SROS Parser ----------------- - The section banners have been simplified to extract the section header itself. This means that `echo "System Configuration"` will be converted to just "System Configuration". + +Duplicate Line Detection +-------------------------- +In some circumstances replacing lines, such as secrets without uniqueness in the replacement, will result in duplicated lines that are invalid configuration, such as:: + + snmp-server community <> RO SNMP_ACL_RO + snmp-server community <> RO SNMP_ACL_RO + +There are some known use cases, such as the below that are considered:: + + router bgp 6500 + bgp router-id 10.0.0.11 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family <--- duplicated hierarchy + ! + address-family l2vpn evpn + neighbor underlay activate + exit-address-family <--- duplicated hierarchy + +Documented use cases that are actual configuration on a network device are considered valid and should be opened for bug fixes. However, configuration that does not actually exist on the running config of network devices are out of scope for the parser. diff --git a/docs/source/netutils/lib_mapping/ANSIBLE_reverse_table.rst b/docs/source/netutils/lib_mapping/ANSIBLE_reverse_table.rst new file mode 100755 index 00000000..01b2482c --- /dev/null +++ b/docs/source/netutils/lib_mapping/ANSIBLE_reverse_table.rst @@ -0,0 +1,78 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NORMALIZEDANSIBLE
arista_eosarista.eos.eos
ciena_saosciena.saos6.saos6
cisco_asacisco.asa.asa
cisco_ioscisco.ios.ios
cisco_xrcisco.iosxr.iosxr
cisco_nxoscisco.nxos.nxos
huaweicommunity.network.ce
dell_os6dellemc.os6.os6
dell_os9dellemc.os9.os9
dell_os10dellemc.os10.0s10
ericsson_iposcommunity.network.eric_eccli
extreme_exoscommunity.network.exos
extreme_netironcommunity.network.ironware
extreme_noscommunity.network.nos
extreme_slxcommunity.network.slxos
extreme_vspcommunity.network.voss
juniper_junosjunipernetworks.junos.junos
lenovo_cnoscommunity.network.cnos
lenovo_enoscommunity.network.enos
mikrotik_routeroscommunity.network.routeros
nokia_sroscommunity.network.sros
pluribuscommunity.network.netvisor
ruckus_icxcommunity.network.icx
vyosvyos.vyos.vyos
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/ANSIBLE_table.rst b/docs/source/netutils/lib_mapping/ANSIBLE_table.rst new file mode 100755 index 00000000..d5fb3069 --- /dev/null +++ b/docs/source/netutils/lib_mapping/ANSIBLE_table.rst @@ -0,0 +1,78 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ANSIBLENORMALIZED
arista.eos.eosarista_eos
ciena.saos6.saos6ciena_saos
cisco.asa.asacisco_asa
cisco.ios.ioscisco_ios
cisco.iosxr.iosxrcisco_xr
cisco.nxos.nxoscisco_nxos
community.network.cehuawei
dellemc.os6.os6dell_os6
dellemc.os9.os9dell_os9
dellemc.os10.0s10dell_os10
community.network.eric_eccliericsson_ipos
community.network.exosextreme_exos
community.network.ironwareextreme_netiron
community.network.nosextreme_nos
community.network.slxosextreme_slx
community.network.vossextreme_vsp
junipernetworks.junos.junosjuniper_junos
community.network.cnoslenovo_cnos
community.network.enoslenovo_enos
community.network.routerosmikrotik_routeros
community.network.netvisorpluribus
community.network.icxruckus_icx
community.network.srosnokia_sros
vyos.vyos.vyosvyos
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/NAPALM_reverse_table.rst b/docs/source/netutils/lib_mapping/NAPALM_reverse_table.rst new file mode 100755 index 00000000..b5513e2c --- /dev/null +++ b/docs/source/netutils/lib_mapping/NAPALM_reverse_table.rst @@ -0,0 +1,42 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NORMALIZEDNAPALM
arista_eoseos
brocade_vyosvyos
cisco_asaasa
cisco_iosios
cisco_nxosnxos
cisco_xriosxr
cisco_wlccisco_wlc_ssh
fortinetfortios
huawei_vrphuawei
juniper_junosjunos
paloalto_panospanos
nokia_srossros
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/NAPALM_table.rst b/docs/source/netutils/lib_mapping/NAPALM_table.rst new file mode 100755 index 00000000..a693ce4d --- /dev/null +++ b/docs/source/netutils/lib_mapping/NAPALM_table.rst @@ -0,0 +1,45 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NAPALMNORMALIZED
asacisco_asa
cisco_wlc_sshcisco_wlc
eosarista_eos
fortiosfortinet
huaweihuawei_vrp
ioscisco_ios
nxos_sshcisco_nxos
nxoscisco_nxos
iosxrcisco_xr
junosjuniper_junos
panospaloalto_panos
srosnokia_sros
vyosbrocade_vyos
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/NTCTEMPLATES_reverse_table.rst b/docs/source/netutils/lib_mapping/NTCTEMPLATES_reverse_table.rst new file mode 100755 index 00000000..7da16de3 --- /dev/null +++ b/docs/source/netutils/lib_mapping/NTCTEMPLATES_reverse_table.rst @@ -0,0 +1,339 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NORMALIZEDNTCTEMPLATES
a10a10
accedianaccedian
adtran_osadtran_os
alcatel_aosalcatel_aos
alcatel_srosalcatel_sros
apresia_aeosapresia_aeos
arista_eosarista_eos
aruba_osaruba_os
aruba_osswitcharuba_osswitch
aruba_procurvearuba_procurve
avaya_ersavaya_ers
avaya_vspavaya_vsp
allied_telesis_awplusallied_telesis_awplus
broadcom_icosbroadcom_icos
brocade_fosbrocade_fos
brocade_fastironbrocade_fastiron
brocade_netironbrocade_netiron
brocade_nosbrocade_nos
brocade_vdxbrocade_vdx
brocade_vyosbrocade_vyos
checkpoint_gaiacheckpoint_gaia
calix_b6calix_b6
centec_oscentec_os
ciena_saosciena_saos
cisco_asacisco_asa
cisco_ftdcisco_ftd
cisco_ioscisco_ios
cisco_nxoscisco_nxos
cisco_s300cisco_s300
cisco_tpcisco_tp
cisco_wlccisco_wlc
cisco_xecisco_xe
cisco_xrcisco_xr
cloudgenix_ioncloudgenix_ion
coriantcoriant
dell_dnos9dell_dnos9
dell_force10dell_force10
dell_os6dell_os6
dell_os9dell_os9
dell_os10dell_os10
dell_powerconnectdell_powerconnect
dell_isilondell_isilon
dlink_dsdlink_ds
endaceendace
eltexeltex
eltex_esreltex_esr
enterasysenterasys
ericsson_iposericsson_ipos
extremeextreme
extreme_ersextreme_ers
extreme_exosextreme_exos
extreme_netironextreme_netiron
extreme_nosextreme_nos
extreme_slxextreme_slx
extreme_vdxextreme_vdx
extreme_vspextreme_vsp
extreme_wingextreme_wing
f5_ltmf5_ltm
f5_tmshf5_tmsh
f5_linuxf5_linux
flexvnfflexvnf
fortinetfortinet
genericgeneric
generic_termservergeneric_termserver
hp_comwarehp_comware
hp_procurvehp_procurve
huaweihuawei
huawei_smartaxhuawei_smartax
huawei_olthuawei_olt
huawei_vrpv8huawei_vrpv8
ipinfusion_ocnosipinfusion_ocnos
juniperjuniper
juniper_junosjuniper_junos
juniper_screenosjuniper_screenos
keymilekeymile
keymile_noskeymile_nos
linuxlinux
mikrotik_routerosmikrotik_routeros
mikrotik_switchosmikrotik_switchos
mellanoxmellanox
mellanox_mlnxosmellanox_mlnxos
mrv_lxmrv_lx
mrv_optiswitchmrv_optiswitch
netapp_cdotnetapp_cdot
netgear_prosafenetgear_prosafe
netscalernetscaler
nokia_srosnokia_sros
oneaccess_oneosoneaccess_oneos
ovs_linuxovs_linux
paloalto_panospaloalto_panos
pluribuspluribus
quanta_meshquanta_mesh
rad_etxrad_etx
raisecom_roapraisecom_roap
ruckus_fastironruckus_fastiron
ruijie_osruijie_os
sixwind_ossixwind_os
sophos_sfossophos_sfos
tplink_jetstreamtplink_jetstream
ubiquiti_edgeubiquiti_edge
ubiquiti_edgerouterubiquiti_edgerouter
ubiquiti_edgeswitchubiquiti_edgeswitch
ubiquiti_unifiswitchubiquiti_unifiswitch
vyatta_vyosvyatta_vyos
vyosvyos
watchguard_firewarewatchguard_fireware
zte_zxroszte_zxros
yamahayamaha
watchguard_fireboxwatchguard_firebox
huawei_vrphuawei_vrp
vmware_nsxvvmware_nsxv
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/NTCTEMPLATES_table.rst b/docs/source/netutils/lib_mapping/NTCTEMPLATES_table.rst new file mode 100755 index 00000000..35ab51ff --- /dev/null +++ b/docs/source/netutils/lib_mapping/NTCTEMPLATES_table.rst @@ -0,0 +1,339 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NTCTEMPLATESNORMALIZED
a10a10
accedianaccedian
adtran_osadtran_os
alcatel_aosalcatel_aos
alcatel_srosalcatel_sros
apresia_aeosapresia_aeos
arista_eosarista_eos
aruba_osaruba_os
aruba_osswitcharuba_osswitch
aruba_procurvearuba_procurve
avaya_ersavaya_ers
avaya_vspavaya_vsp
allied_telesis_awplusallied_telesis_awplus
broadcom_icosbroadcom_icos
brocade_fosbrocade_fos
brocade_fastironbrocade_fastiron
brocade_netironbrocade_netiron
brocade_nosbrocade_nos
brocade_vdxbrocade_vdx
brocade_vyosbrocade_vyos
checkpoint_gaiacheckpoint_gaia
calix_b6calix_b6
centec_oscentec_os
ciena_saosciena_saos
cisco_asacisco_asa
cisco_ftdcisco_ftd
cisco_ioscisco_ios
cisco_nxoscisco_nxos
cisco_s300cisco_s300
cisco_tpcisco_tp
cisco_wlccisco_wlc
cisco_xecisco_xe
cisco_xrcisco_xr
cloudgenix_ioncloudgenix_ion
coriantcoriant
dell_dnos9dell_dnos9
dell_force10dell_force10
dell_os6dell_os6
dell_os9dell_os9
dell_os10dell_os10
dell_powerconnectdell_powerconnect
dell_isilondell_isilon
dlink_dsdlink_ds
endaceendace
eltexeltex
eltex_esreltex_esr
enterasysenterasys
ericsson_iposericsson_ipos
extremeextreme
extreme_ersextreme_ers
extreme_exosextreme_exos
extreme_netironextreme_netiron
extreme_nosextreme_nos
extreme_slxextreme_slx
extreme_vdxextreme_vdx
extreme_vspextreme_vsp
extreme_wingextreme_wing
f5_ltmf5_ltm
f5_tmshf5_tmsh
f5_linuxf5_linux
flexvnfflexvnf
fortinetfortinet
genericgeneric
generic_termservergeneric_termserver
hp_comwarehp_comware
hp_procurvehp_procurve
huaweihuawei
huawei_smartaxhuawei_smartax
huawei_olthuawei_olt
huawei_vrpv8huawei_vrpv8
ipinfusion_ocnosipinfusion_ocnos
juniperjuniper
juniper_junosjuniper_junos
juniper_screenosjuniper_screenos
keymilekeymile
keymile_noskeymile_nos
linuxlinux
mikrotik_routerosmikrotik_routeros
mikrotik_switchosmikrotik_switchos
mellanoxmellanox
mellanox_mlnxosmellanox_mlnxos
mrv_lxmrv_lx
mrv_optiswitchmrv_optiswitch
netapp_cdotnetapp_cdot
netgear_prosafenetgear_prosafe
netscalernetscaler
nokia_srosnokia_sros
oneaccess_oneosoneaccess_oneos
ovs_linuxovs_linux
paloalto_panospaloalto_panos
pluribuspluribus
quanta_meshquanta_mesh
rad_etxrad_etx
raisecom_roapraisecom_roap
ruckus_fastironruckus_fastiron
ruijie_osruijie_os
sixwind_ossixwind_os
sophos_sfossophos_sfos
tplink_jetstreamtplink_jetstream
ubiquiti_edgeubiquiti_edge
ubiquiti_edgerouterubiquiti_edgerouter
ubiquiti_edgeswitchubiquiti_edgeswitch
ubiquiti_unifiswitchubiquiti_unifiswitch
vyatta_vyosvyatta_vyos
vyosvyos
watchguard_firewarewatchguard_fireware
zte_zxroszte_zxros
yamahayamaha
watchguard_fireboxwatchguard_firebox
huawei_vrphuawei_vrp
vmware_nsxvvmware_nsxv
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/PYATS_reverse_table.rst b/docs/source/netutils/lib_mapping/PYATS_reverse_table.rst new file mode 100755 index 00000000..405e1a43 --- /dev/null +++ b/docs/source/netutils/lib_mapping/PYATS_reverse_table.rst @@ -0,0 +1,36 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NORMALIZEDPYATS
cisco_asaasa
f5_tmshbigip
cisco_dnacdnac
cisco_iosiosxe
cisco_xriosxr
juniper_junosjunos
linuxlinux
cisco_nxosnxos
nokia_srossros
cisco_viptellaviptela
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/PYATS_table.rst b/docs/source/netutils/lib_mapping/PYATS_table.rst new file mode 100755 index 00000000..1ffc20c7 --- /dev/null +++ b/docs/source/netutils/lib_mapping/PYATS_table.rst @@ -0,0 +1,39 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PYATSNORMALIZED
asacisco_asa
bigipf5_tmsh
dnaccisco_dnac
ioscisco_ios
iosxecisco_ios
iosxrcisco_xr
junosjuniper_junos
linuxlinux
nxoscisco_nxos
srosnokia_sros
viptelacisco_viptella
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/PYNTC_reverse_table.rst b/docs/source/netutils/lib_mapping/PYNTC_reverse_table.rst new file mode 100755 index 00000000..c6ce781d --- /dev/null +++ b/docs/source/netutils/lib_mapping/PYNTC_reverse_table.rst @@ -0,0 +1,27 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + +
NORMALIZEDPYNTC
cisco_asacisco_asa_ssh
arista_eosarista_eos_eapi
f5_tmshf5_tmos_icontrol
cisco_ioscisco_ios_ssh
juniper_junosjuniper_junos_netconf
cisco_nxoscisco_nxos_nxapi
cisco_wlccisco_aireos_ssh
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/PYNTC_table.rst b/docs/source/netutils/lib_mapping/PYNTC_table.rst new file mode 100755 index 00000000..53470f21 --- /dev/null +++ b/docs/source/netutils/lib_mapping/PYNTC_table.rst @@ -0,0 +1,27 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + +
PYNTCNORMALIZED
cisco_asa_sshcisco_asa
arista_eos_eapiarista_eos
f5_tmos_icontrolf5_tmsh
cisco_ios_sshcisco_ios
juniper_junos_netconfjuniper_junos
cisco_nxos_nxapicisco_nxos
cisco_aireos_sshcisco_wlc
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/SCRAPLI_reverse_table.rst b/docs/source/netutils/lib_mapping/SCRAPLI_reverse_table.rst new file mode 100755 index 00000000..cdd7f74f --- /dev/null +++ b/docs/source/netutils/lib_mapping/SCRAPLI_reverse_table.rst @@ -0,0 +1,21 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + +
NORMALIZEDSCRAPLI
cisco_ioscisco_iosxe
cisco_xrcisco_iosxr
cisco_nxoscisco_nxos
arista_eosarista_eos
juniper_junosjuniper_junos
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/SCRAPLI_table.rst b/docs/source/netutils/lib_mapping/SCRAPLI_table.rst new file mode 100755 index 00000000..f420334b --- /dev/null +++ b/docs/source/netutils/lib_mapping/SCRAPLI_table.rst @@ -0,0 +1,21 @@ +.. raw:: html + + + + + + + + + + + + + + + + + + + +
SCRAPLINORMALIZED
cisco_iosxecisco_ios
cisco_iosxrcisco_xr
cisco_nxoscisco_nxos
arista_eosarista_eos
juniper_junosjuniper_junos
\ No newline at end of file diff --git a/docs/source/netutils/lib_mapping/index.rst b/docs/source/netutils/lib_mapping/index.rst index 4953717e..52af9f92 100644 --- a/docs/source/netutils/lib_mapping/index.rst +++ b/docs/source/netutils/lib_mapping/index.rst @@ -41,111 +41,51 @@ a python script to leverage the backup capabilities of pyntc? Here's an example Another use case could be using an example like the above in an Ansible filter. That would allow you to write a filter utilizing whichever automation library you needed without having to store the driver for each one in your Source of Truth. Napalm Mapper -=================== -.. exec:: - import json - from netutils.lib_mapper import NAPALM_LIB_MAPPER - json_obj = json.dumps(NAPALM_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: NAPALM_table.rst Reverse Napalm Mapper -===================== -.. exec:: - import json - from netutils.lib_mapper import NAPALM_LIB_MAPPER_REVERSE - json_obj = json.dumps(NAPALM_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: NAPALM_reverse_table.rst PyNTC Mapper -============== -.. exec:: - import json - from netutils.lib_mapper import PYNTC_LIB_MAPPER - json_obj = json.dumps(PYNTC_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: PYNTC_table.rst Reverse PyNTC Mapper -==================== -.. exec:: - import json - from netutils.lib_mapper import PYNTC_LIB_MAPPER_REVERSE - json_obj = json.dumps(PYNTC_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: PYNTC_reverse_table.rst Ansible Mapper -============== -.. exec:: - import json - from netutils.lib_mapper import ANSIBLE_LIB_MAPPER - json_obj = json.dumps(ANSIBLE_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: ANSIBLE_table.rst Reverse Ansible Mapper -====================== -.. exec:: - import json - from netutils.lib_mapper import ANSIBLE_LIB_MAPPER_REVERSE - json_obj = json.dumps(ANSIBLE_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: ANSIBLE_reverse_table.rst PyATS Mapper -============== -.. exec:: - import json - from netutils.lib_mapper import PYATS_LIB_MAPPER - json_obj = json.dumps(PYATS_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: PYATS_table.rst Reverse PyATS Mapper -==================== -.. exec:: - import json - from netutils.lib_mapper import PYATS_LIB_MAPPER_REVERSE - json_obj = json.dumps(PYATS_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: PYATS_reverse_table.rst Scrapli Mapper -============== -.. exec:: - import json - from netutils.lib_mapper import SCRAPLI_LIB_MAPPER - json_obj = json.dumps(SCRAPLI_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: SCRAPLI_table.rst Reverse Scrapli Mapper -====================== -.. exec:: - import json - from netutils.lib_mapper import SCRAPLI_LIB_MAPPER_REVERSE - json_obj = json.dumps(SCRAPLI_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: SCRAPLI_reverse_table.rst NTC Templates Mapper -==================== -.. exec:: - import json - from netutils.lib_mapper import NTCTEMPLATES_LIB_MAPPER - json_obj = json.dumps(NTCTEMPLATES_LIB_MAPPER, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: NTCTEMPLATES_table.rst Reverse NTC Templates Mapper -============================ -.. exec:: - import json - from netutils.lib_mapper import NTCTEMPLATES_LIB_MAPPER_REVERSE - json_obj = json.dumps(NTCTEMPLATES_LIB_MAPPER_REVERSE, sort_keys=True, indent=4) - json_obj = json_obj[:-1] + " }" - print(f".. code-block:: JavaScript\n\n {json_obj}\n\n") +============================== +.. include:: NTCTEMPLATES_reverse_table.rst diff --git a/docs/source/table_template.j2 b/docs/source/table_template.j2 new file mode 100644 index 00000000..b9394d03 --- /dev/null +++ b/docs/source/table_template.j2 @@ -0,0 +1,9 @@ +.. raw:: html + + + {% for lib_name in lib_names %}{% endfor %} + {% for specific_driver, normalized_driver in mappings.items() %} + + + {% endfor %} +
{{lib_name|upper}}
{{ specific_driver }}{{ normalized_driver }}
\ No newline at end of file diff --git a/netutils/__init__.py b/netutils/__init__.py index 6fae4e2c..7fc179f7 100644 --- a/netutils/__init__.py +++ b/netutils/__init__.py @@ -1,3 +1,3 @@ """Initialization file for library.""" -__version__ = "1.1.0" +__version__ = "1.2.0" diff --git a/netutils/asn.py b/netutils/asn.py index 522e616e..10d1dd53 100644 --- a/netutils/asn.py +++ b/netutils/asn.py @@ -1,14 +1,14 @@ """Functions for working with BGP ASNs.""" -def asn_to_int(asplain): +def asn_to_int(asplain: str) -> int: """Convert AS Number to standardized asplain notation as an integer. Args: - asplain (str): An `asplain` notated BGP ASN with community. + asplain: An `asplain` notated BGP ASN with community. Returns: - int: Integer value within of the given asplain value provided. + Integer value within of the given asplain value provided. Example: >>> from netutils.asn import asn_to_int @@ -20,7 +20,7 @@ def asn_to_int(asplain): """ # ASN is in asdot notation if "." in asplain: - asn = asplain.split(".") - asn = int(f"{int(asn[0]):016b}{int(asn[1]):016b}", 2) + asn_list = asplain.split(".") + asn = int(f"{int(asn_list[0]):016b}{int(asn_list[1]):016b}", 2) return asn return int(asplain) diff --git a/netutils/bandwidth.py b/netutils/bandwidth.py index 13eb6a67..84fd9b45 100644 --- a/netutils/bandwidth.py +++ b/netutils/bandwidth.py @@ -1,8 +1,9 @@ """Functions for performing bandwidth calculations.""" import re +import typing as t -def _get_bits_mapping(): +def _get_bits_mapping() -> t.Dict[str, t.Dict[str, int]]: bits_value = 0 bits_mapping = {} for _bit in ["bps", "Kbps", "Mbps", "Gbps", "Tbps", "Pbps", "Ebps", "Zbps"]: @@ -18,7 +19,7 @@ def _get_bits_mapping(): BITS_MAPPING = _get_bits_mapping() -def _get_bytes_mapping(): +def _get_bytes_mapping() -> t.Dict[str, t.Dict[str, int]]: bytes_value = 0 bytes_mapping = {} for _byte in ["Bps", "KBps", "MBps", "GBps", "TBps", "PBps", "EBps", "ZBps"]: @@ -38,10 +39,10 @@ def name_to_bits(speed: str) -> int: """Method to convert a short bandwidth name to int value in bps. Args: - speed (str): Bandwidth to be converted like `100Gbps` to bps. + speed: Bandwidth to be converted like `100Gbps` to bps. Returns: - int: value of bandwidth to be converted to bps + value of bandwidth to be converted to bps Example: >>> from netutils.bandwidth import name_to_bits @@ -70,10 +71,10 @@ def name_to_bytes(speed: str) -> float: """Method to convert a short bandwidth name to float value in Bps. Args: - speed (str): Bandwidth to be converted like `100GBps` to Bps. + speed: Bandwidth to be converted like `100GBps` to Bps. Returns: - float: value of bandwidth to be converted to Bps + value of bandwidth to be converted to Bps Example: >>> from netutils.bandwidth import name_to_bytes @@ -99,16 +100,16 @@ def name_to_bytes(speed: str) -> float: def bits_to_name( # pylint: disable=too-many-branches,too-many-return-statements - speed: int, nbr_decimal: int = 0 + speed: int, nbr_decimal: t.Optional[int] = 0 ) -> str: """Method to convert an int value for speed int bits to the name value. Args: - speed (int): Speed in bits to be converted. - nbr_decimal (int, optional): Precision of end result, ie number of decimal points to round to. Defaults to 0. + speed: Speed in bits to be converted. + nbr_decimal: Precision of end result, ie number of decimal points to round to. Defaults to 0. Returns: - str: Name value for speed in bits + Name value for speed in bits Example: >>> from netutils.bandwidth import bits_to_name @@ -119,11 +120,11 @@ def bits_to_name( # pylint: disable=too-many-branches,too-many-return-statement """ if not isinstance(speed, int): raise ValueError(f"Speed of {speed} was not a valid speed integer.") + if nbr_decimal == 0: + nbr_decimal = None for bit_type, val in BITS_MAPPING.items(): if val["low"] <= speed < val["high"]: - if nbr_decimal == 0: - nbr_decimal = None if val["low"] == 0: return f"{round(speed, nbr_decimal)}{bit_type}" return f"{round(speed / val['low'], nbr_decimal)}{bit_type}" @@ -134,11 +135,11 @@ def bytes_to_name(speed: float, nbr_decimal: int = 0) -> str: """Method to convert an int value for speed in bytes to the name value. Args: - speed (int): Speed in bytes to be converted. - nbr_decimal (int, optional): Precision of end result, ie number of decimal points to round to. Defaults to 0. + speed: Speed in bytes to be converted. + nbr_decimal: Precision of end result, ie number of decimal points to round to. Defaults to 0. Returns: - str: Name value for speed in bytes + Name value for speed in bytes Example: >>> from netutils.bandwidth import bytes_to_name @@ -164,12 +165,12 @@ def name_to_name(speed: str, speed_type: str, nbr_decimal: int = 0) -> str: """Method to convert a short bandwidth name to another bandwdth name. Args: - speed (str): Bandwidth to be converted like `100GBps`. - speed_type (str): Name to convert the bandwdth to like `MBps`. - nbr_decimal (int, optional): Precision of end result, ie number of decimal points to round to. Defaults to 0. + speed: Bandwidth to be converted like `100GBps`. + speed_type: Name to convert the bandwdth to like `MBps`. + nbr_decimal: Precision of end result, ie number of decimal points to round to. Defaults to 0. Returns: - str: The named value which user wishes to return to. + The named value which user wishes to return to. Example: >>> from netutils.bandwidth import name_to_name diff --git a/netutils/banner.py b/netutils/banner.py index 3e2ee213..be9df8c6 100644 --- a/netutils/banner.py +++ b/netutils/banner.py @@ -1,18 +1,19 @@ """Functions for working with the banner configuration.""" import re + from netutils.constants import CARET_C -def delimiter_change(config, from_delimiter, to_delimiter): +def delimiter_change(config: str, from_delimiter: str, to_delimiter: str) -> str: r"""Change the banner delimiter. Args: - config (str): Configuration line containing banner delimiter. - from_delimiter (str): Delimiter to replace in the banner. - to_delimiter (str): Delimiter to include in the config. + config: Configuration line containing banner delimiter. + from_delimiter: Delimiter to replace in the banner. + to_delimiter: Delimiter to include in the config. Returns: - str: Configuration with delimiter replaced. + Configuration with delimiter replaced. Example: >>> from netutils.banner import delimiter_change @@ -29,15 +30,15 @@ def delimiter_change(config, from_delimiter, to_delimiter): return config_line -def normalise_delimiter_caret_c(delimiter, config): +def normalise_delimiter_caret_c(delimiter: str, config: str) -> str: r"""Normalise delimiter to ^C. Args: - delimiter (str): Banner delimiter. - config (str): Configuration line containing banner delimiter. + delimiter: Banner delimiter. + config: Configuration line containing banner delimiter. Returns: - str: Configuration with delimiter normalised to ^C. + Configuration with delimiter normalised to ^C. Example: >>> from netutils.banner import normalise_delimiter_caret_c diff --git a/netutils/config/clean.py b/netutils/config/clean.py index 8112ab5c..71a4a636 100644 --- a/netutils/config/clean.py +++ b/netutils/config/clean.py @@ -1,19 +1,19 @@ """Functions for working with configuration to clean the config.""" # pylint: disable=anomalous-backslash-in-string - import re +import typing as t -def clean_config(config, filters): +def clean_config(config: str, filters: t.List[t.Dict[str, str]]) -> str: r"""Given a list of regex patterns, delete those lines that match. Args: - config (str): A string representation of a device configuration. - filters (list): A list of regex patterns used to delete remove configuration. + config: A string representation of a device configuration. + filters: A list of regex patterns used to delete remove configuration. Returns: - str: Stripped down configuration. + Stripped down configuration. Example: >>> from netutils.config.clean import clean_config @@ -52,12 +52,12 @@ def clean_config(config, filters): return config -def sanitize_config(config, filters): +def sanitize_config(config: str, filters: t.Optional[t.List[t.Dict[str, str]]] = None) -> str: r"""Given a dictionary of filters, remove sensitive data from the provided config. Args: - config (str): A string representation of a device configuration. - filters (dict, optional): A dictionary of regex patterns used to sanitize configuration, namely secrets. Defaults to empty dictionary. + config: A string representation of a device configuration. + filters: A list of dictionaries of regex patterns used to sanitize configuration, namely secrets. Defaults to an empty list. Returns: str: Sanitized configuration. @@ -75,6 +75,8 @@ def sanitize_config(config, filters): 'enable secret 5 ' >>> """ + if not filters: + filters = [] for item in filters: config = re.sub(item["regex"], item["replace"], config, flags=re.MULTILINE) return config diff --git a/netutils/config/compliance.py b/netutils/config/compliance.py index 7f6a6fe4..07f4b21f 100644 --- a/netutils/config/compliance.py +++ b/netutils/config/compliance.py @@ -1,8 +1,10 @@ """Filter Plugins for compliance checks.""" +import typing as t + from . import parser # pylint: disable=relative-beyond-top-level -parser_map = { +parser_map: t.Dict[str, t.Type[parser.BaseConfigParser]] = { "arista_eos": parser.EOSConfigParser, "cisco_ios": parser.IOSConfigParser, "cisco_nxos": parser.NXOSConfigParser, @@ -15,7 +17,9 @@ "nokia_sros": parser.NokiaConfigParser, } -default_feature = { +# TODO: Once support for 3.7 is dropped, there should be a typing.TypedDict for this which should then also be used +# as the return type for a bunch of the following methods. +default_feature: t.Dict[str, t.Union[str, bool, None]] = { "compliant": None, "missing": None, "extra": None, @@ -27,16 +31,16 @@ } -def _check_configs_differences(intended_cfg, actual_cfg, network_os): +def _check_configs_differences(intended_cfg: str, actual_cfg: str, network_os: str) -> t.Dict[str, t.Union[str, bool]]: r"""Find differences between intended and actual config lines. Args: - intended_cfg (str): Feature intended configuration. - actual_cfg: (str): Feature actual configuration. - network_os (str): Device network operating system that is in parser_map keys. + intended_cfg: Feature intended configuration. + actual_cfg: Feature actual configuration. + network_os: Device network operating system that is in parser_map keys. Returns: - dict: Config fragments that are missing, extra or unordered_compliant. + Config fragments that are missing, extra or unordered_compliant. Example: >>> from netutils.config.compliance import _check_configs_differences @@ -69,12 +73,12 @@ def _check_configs_differences(intended_cfg, actual_cfg, network_os): } -def _is_feature_ordered_compliant(feature_intended_cfg, feature_actual_cfg): +def _is_feature_ordered_compliant(feature_intended_cfg: str, feature_actual_cfg: str) -> bool: """Check if feature intended cfg is compliant with feature actual cfg. Args: - feature_intended_cfg (str): Feature intended configuration. - feature_actual_cfg: (str): Feature actual configuration. + feature_intended_cfg: Feature intended configuration. + feature_actual_cfg: Feature actual configuration. Returns: bool @@ -97,25 +101,30 @@ def _is_feature_ordered_compliant(feature_intended_cfg, feature_actual_cfg): return False -def _open_file_config(cfg_path): +def _open_file_config(cfg_path: str) -> str: """Open config file from local disk.""" - try: - with open(cfg_path, encoding="utf-8") as filehandler: - device_cfg = filehandler.read() - except IOError: - return False + # This might fail, raising an IOError + with open(cfg_path, encoding="utf-8") as filehandler: + device_cfg = filehandler.read() + return device_cfg.strip() -def compliance(features, backup, intended, network_os, cfg_type="file"): +def compliance( + features: t.List[t.Dict[str, t.Union[str, bool, t.List[str]]]], + backup: str, + intended: str, + network_os: str, + cfg_type: str = "file", +) -> t.Dict[str, t.Dict[str, t.Union[str, bool]]]: r"""Report compliance for all features provided as input. Args: - features (list): List of features for particular network os. - backup (path): running config or config backup file to compare against intended. - intended (path): intended config to compare against backup. - network_os (str): Device network operating system that is in parser_map keys. - cfg_type (str, optional): A string that is effectively a choice between `file` and `string`. Defaults to `file`. + features: List of features for particular network os. + backup: running config or config backup file to compare against intended. + intended: intended config to compare against backup. + network_os: Device network operating system that is in parser_map keys. + cfg_type: A string that is effectively a choice between `file` and `string`. Defaults to `file`. Returns: dict: Compliance information per feature. @@ -176,19 +185,21 @@ def compliance(features, backup, intended, network_os, cfg_type="file"): intended_str = section_config(feature, intended_cfg, network_os) compliance_results.update({feature["name"]: feature_compliance(feature, backup_str, intended_str, network_os)}) - return compliance_results + return compliance_results # type: ignore -def config_section_not_parsed(features, device_cfg, network_os): +def config_section_not_parsed( + features: t.List[t.Dict[str, t.Union[str, bool, t.List[str]]]], device_cfg: str, network_os: str +) -> t.Dict[str, t.Union[str, t.List[str]]]: r"""Return device config section that is not checked by compliance. Args: - features (list): List of features for particular network os. - device_cfg (str): Device configuration. - network_os (str): Device network operating system that is in parser_map keys. + features: List of features for particular network os. + device_cfg: Device configuration. + network_os: Device network operating system that is in parser_map keys. Returns: - dict: Config that was not parsed or section not found. + Config that was not parsed or section not found. Example: >>> features = [{ @@ -216,20 +227,20 @@ def config_section_not_parsed(features, device_cfg, network_os): remaining_cfg = remaining_cfg.replace(feature_cfg, "") return { "remaining_cfg": remaining_cfg.strip(), - "section_not_found": section_not_found, + "section_not_found": section_not_found, # type: ignore } -def diff_network_config(compare_config, base_config, network_os): +def diff_network_config(compare_config: str, base_config: str, network_os: str) -> str: """Identify which lines in compare_config are not in base_config. Args: - compare_config (str): The config to evaluate against base_config. - base_config (str): The config to compare compare_config against. - network_os (str): Device network operating system that is in parser_map keys. + compare_config: The config to evaluate against base_config. + base_config: The config to compare compare_config against. + network_os: Device network operating system that is in parser_map keys. Returns: - base_config (str): The string of additional commands in compare_config separated by a newline. + base_config: The string of additional commands in compare_config separated by a newline. Example: >>> compare_config = '''router bgp 100 @@ -270,14 +281,16 @@ def diff_network_config(compare_config, base_config, network_os): return "\n".join(needed_lines) -def feature_compliance(feature, backup_cfg, intended_cfg, network_os): +def feature_compliance( + feature: t.Dict[str, t.Union[str, bool, t.List[str]]], backup_cfg: str, intended_cfg: str, network_os: str +) -> t.Dict[str, t.Union[str, bool]]: r"""Report compliance for all features provided as input. Args: - feature (dict): A dictionary with the attributes of the feature check - backup_cfg (str): running config or config backup of a specific feature to compare. - intended_cfg (str): intended config of a specific feature to compare. - network_os (str): Device network operating system that is in parser_map keys. + feature: A dictionary with the attributes of the feature check + backup_cfg: running config or config backup of a specific feature to compare. + intended_cfg: intended config of a specific feature to compare. + network_os: Device network operating system that is in parser_map keys. Returns: dict: Compliance information of a single feature. @@ -328,15 +341,15 @@ def feature_compliance(feature, backup_cfg, intended_cfg, network_os): else: raise # pylint: disable=misplaced-bare-raise - return feature_data + return feature_data # type: ignore -def find_unordered_cfg_lines(intended_cfg, actual_cfg): +def find_unordered_cfg_lines(intended_cfg: str, actual_cfg: str) -> t.Tuple[bool, t.List[t.Tuple[str, str]]]: """Check if config lines are miss-ordered, i.e in ACL-s. Args: - intended_cfg (str): Feature intended configuration. - actual_cfg: (str): Feature actual configuration. + intended_cfg: Feature intended configuration. + actual_cfg: Feature actual configuration. Returns: list: List of tuples with unordered_compliant cfg lines. @@ -367,19 +380,19 @@ def find_unordered_cfg_lines(intended_cfg, actual_cfg): return (False, unordered_lines) -def section_config(feature, device_cfg, network_os): +def section_config(feature: t.Dict[str, t.Union[str, bool, t.List[str]]], device_cfg: str, network_os: str) -> str: """Parse feature section config from device cfg. In case section attribute of the the feature is not provided entire content of the device_cfg is returned. Args: - feature (dict): Feature name and cfg lines that should be parsed. - device_cfg (str): Device configuration. - network_os (str): Device network operating system that is in parser_map keys. + feature: Feature name and cfg lines that should be parsed. + device_cfg: Device configuration. + network_os : Device network operating system that is in parser_map keys. Returns: - list: The hash report data mapping file hashes to report data. + The hash report data mapping file hashes to report data. Example: >>> feature = { @@ -418,7 +431,7 @@ def section_config(feature, device_cfg, network_os): continue else: match = False - for line_start in section_starts_with: + for line_start in section_starts_with: # type: ignore if not match and line.config_line.startswith(line_start): section_config_list.append(line.config_line) match = True diff --git a/netutils/config/parser.py b/netutils/config/parser.py index 2c9f7e3b..0f3227e1 100644 --- a/netutils/config/parser.py +++ b/netutils/config/parser.py @@ -2,6 +2,7 @@ # pylint: disable=no-member,super-with-arguments,invalid-overridden-method,raise-missing-from,invalid-overridden-method,inconsistent-return-statements,super-with-arguments,redefined-argument-from-local,no-else-break,useless-super-delegation,too-many-lines import re +import typing as t from collections import namedtuple from netutils.banner import normalise_delimiter_caret_c @@ -9,37 +10,53 @@ ConfigLine = namedtuple("ConfigLine", "config_line,parents") -class BaseConfigParser: # pylint: disable=too-few-public-methods +class BaseConfigParser: """Base class for parsers.""" + # pylint: disable=abstract-method + # The pylint disable on the previous line can be removed once support for Python 3.7 is dropped. + comment_chars = ["!"] banner_start = ["banner", "vacant-message"] - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: - config (str): The config text to parse. + config: The config text to parse. """ self.config = config - self._config = None - self._current_parents = () + self._config: t.Optional[str] = None + self._current_parents: t.Tuple[str, ...] = () self.generator_config = (line for line in self.config_lines_only.splitlines()) - self.config_lines = [] + self.config_lines: t.List[ConfigLine] = [] self.build_config_relationship() - def config_lines_only(self): + @property + def config_lines_only(self) -> str: """Remove lines not related to config.""" - raise NotImplementedError + raise NotImplementedError() + + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError() + + def build_config_relationship(self) -> t.List[ConfigLine]: + """Parse text tree of config lines and their parents.""" + raise NotImplementedError() class BaseSpaceConfigParser(BaseConfigParser): """Base parser class for config syntax that demarcates using spaces/indentation.""" + # pylint: disable=abstract-method + # The pylint disable on the previous line can be removed once support for Python 3.7 is dropped. + comment_chars = ["!"] banner_start = ["banner", "vacant-message"] - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: @@ -49,49 +66,49 @@ def __init__(self, config): super(BaseSpaceConfigParser, self).__init__(config) @property - def indent_level(self): + def indent_level(self) -> int: """Count the number of spaces a config line is indented.""" return self._indent_level @indent_level.setter - def indent_level(self, value): + def indent_level(self, value: int) -> None: self._indent_level = value - def is_banner_end(self, line): + def is_banner_end(self, line: str) -> bool: """Determine if line ends the banner config. Args: - line (str): The current config line in iteration. + line: The current config line in iteration. Returns: - bool: True if line ends banner, else False. + True if line ends banner, else False. """ if self.banner_end in line: return True return False - def is_banner_start(self, line): + def is_banner_start(self, line: str) -> bool: """Determine if the line starts a banner config. Args: - line (str): The current config line in iteration. + line: The current config line in iteration. Returns: - bool: True if line starts banner, else False. + True if line starts banner, else False. """ for banner_start in self.banner_start: if line.lstrip().startswith(banner_start): return True return False - def is_comment(self, line): + def is_comment(self, line: str) -> bool: """Determine if line is a comment. Args: - line (str): A config line from the device. + line: A config line from the device. Returns: - bool: True if line is a comment, else False. + True if line is a comment, else False. Example: >>> BaseSpaceConfigParser("interface Ethernet1/1").is_comment("interface Ethernet1/1") @@ -106,11 +123,11 @@ def is_comment(self, line): return False @property - def config_lines_only(self): + def config_lines_only(self) -> str: """Remove spaces and comments from config lines. Returns: - str: The non-space and non-comment lines from ``config``. + The non-space and non-comment lines from ``config``. Example: >>> config = '''! @@ -137,14 +154,14 @@ def config_lines_only(self): return self._config @staticmethod - def get_leading_space_count(config_line): + def get_leading_space_count(config_line: str) -> int: r"""Determine how many spaces the ``config_line`` is indented. Args: - config_line (str): A line of text in the config. + config_line: A line of text in the config. Returns: - int: The number of leading spaces. + The number of leading spaces. Example: >>> config = '''interface GigabitEthernet1\n description link to ISP''' @@ -156,14 +173,14 @@ def get_leading_space_count(config_line): """ return len(config_line) - len(config_line.lstrip()) - def _remove_parents(self, line, current_spaces): + def _remove_parents(self, line: str, current_spaces: int) -> t.Tuple[str, ...]: """Remove parents from ``self._curent_parents`` based on indent levels. Args: - config_line (str): A line of text in the config. + config_line: A line of text in the config. Returns: - tuple: The config lines parent config lines. + The config lines parent config lines. """ deindent_level = 1 try: @@ -178,18 +195,17 @@ def _remove_parents(self, line, current_spaces): parents = self._current_parents[:-deindent_level] or (self._current_parents[0],) return parents - def _build_banner(self, config_line): + def _build_banner(self, config_line: str) -> t.Optional[str]: """Handle banner config lines. Args: - config_line (str): The start of the banner config. + config_line: The start of the banner config. Returns: - str: The next configuration line in the configuration text. - None: When banner end is the end of the config text. + The next configuration line in the configuration text or None Raises: - ValueError: When the parser is unable to identify the End of the Banner. + ValueError: When the parser is unable to identify the end of the Banner. """ self._update_config_lines(config_line) self._current_parents += (config_line,) @@ -213,15 +229,15 @@ def _build_banner(self, config_line): raise ValueError("Unable to parse banner end.") - def _build_nested_config(self, line): + def _build_nested_config(self, line: str) -> t.Optional[str]: """Handle building child config sections. Args: - line (str): A configuration line from the configuration text. + line: A configuration line from the configuration text. Returns: - str: The next top-level configuration line in the configuration text. - None: When the last line of configuration text is a nested configuration line. + The next top-level configuration line in the configuration text or None when the last line of configuration + text is a nested configuration line. Raises: IndexError: When the number of parents does not match the expected deindent level. @@ -246,19 +262,21 @@ def _build_nested_config(self, line): self.indent_level = spaces if self.is_banner_start(line): - line = self._build_banner(line) - if line is None or not line[0].isspace(): + banner_line = self._build_banner(line) + if banner_line is None or not banner_line[0].isspace(): self._current_parents = () self.indent_level = 0 - return line + return banner_line + line = banner_line self._update_config_lines(line) + return None - def _update_config_lines(self, config_line): + def _update_config_lines(self, config_line: str) -> None: """Add a ``ConfigLine`` object to ``self.config_lines``. Args: - config_line (str): The current config line being evaluated. + config_line: The current config line being evaluated. Returns: None @@ -266,7 +284,7 @@ def _update_config_lines(self, config_line): entry = ConfigLine(config_line, self._current_parents) self.config_lines.append(entry) - def build_config_relationship(self): + def build_config_relationship(self) -> t.List[ConfigLine]: r"""Parse text tree of config lines and their parents. Example: @@ -291,24 +309,24 @@ def build_config_relationship(self): if not line[0].isspace(): self._current_parents = () if self.is_banner_start(line): - line = self._build_banner(line) + line = self._build_banner(line) # type: ignore else: previous_config = self.config_lines[-1] self._current_parents = (previous_config.config_line,) self.indent_level = self.get_leading_space_count(line) if not self.is_banner_start(line): - line = self._build_nested_config(line) + line = self._build_nested_config(line) # type: ignore else: - line = self._build_banner(line) + line = self._build_banner(line) # type: ignore if line is not None and line[0].isspace(): - line = self._build_nested_config(line) + line = self._build_nested_config(line) # type: ignore else: self._current_parents = () if line is None: break elif self.is_banner_start(line): - line = self._build_banner(line) + line = self._build_banner(line) # type: ignore self._update_config_lines(line) return self.config_lines @@ -317,9 +335,12 @@ def build_config_relationship(self): class BaseBraceConfigParser(BaseConfigParser): """Base parser class for config syntax that demarcates using braces.""" - multiline_delimiters = [] + # pylint: disable=abstract-method + # The pylint disable on the previous line can be removed once support for Python 3.7 is dropped. + + multiline_delimiters: t.List[str] = [] - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: @@ -328,16 +349,16 @@ def __init__(self, config): super(BaseBraceConfigParser, self).__init__(config) @property - def config_lines_only(self): + def config_lines_only(self) -> str: """Remove trailing spaces and empty lines from config lines. Returns: - str: The non-space lines from ``config``. + The non-space lines from ``config``. """ config_lines = [line.rstrip() for line in self.config.splitlines() if line and not line.isspace()] return "\n".join(config_lines) - def build_config_relationship(self): + def build_config_relationship(self) -> t.List[ConfigLine]: r"""Parse text tree of config lines and their parents. Example: @@ -375,14 +396,14 @@ def build_config_relationship(self): return self.config_lines - def _build_multiline_config(self, delimiter): + def _build_multiline_config(self, delimiter: str) -> t.Optional[ConfigLine]: r"""Build config sections between characters demarcating multiline strings. Args: - delimiter (str): The text to look for to end multiline config. + delimiter: The text to look for to end multiline config. Returns: - ConfigLine: The multiline string text that was added to ``self.config_lines``. + The multiline string text that was added to ``self.config_lines``. Example: >>> config = ( @@ -414,6 +435,7 @@ def _build_multiline_config(self, delimiter): self.config_lines.append(multiline_entry) self._current_parents = self._current_parents[:-1] return multiline_entry + return None class CiscoConfigParser(BaseSpaceConfigParser): @@ -421,24 +443,23 @@ class CiscoConfigParser(BaseSpaceConfigParser): regex_banner = re.compile(r"^(banner\s+\S+|\s*vacant-message)\s+(?P\^C|.)") - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: config (str): The config text to parse. """ - self._banner_end = None + self._banner_end: t.Optional[str] = None super(CiscoConfigParser, self).__init__(config) - def _build_banner(self, config_line): + def _build_banner(self, config_line: str) -> t.Optional[str]: """Handle banner config lines. Args: - config_line (str): The start of the banner config. + config_line: The start of the banner config. Returns: - str: The next configuration line in the configuration text. - None: When banner end is the end of the config text. + The next configuration line in the configuration text or None when banner end is the end of the config text. Raises: ValueError: When the parser is unable to identify the End of the Banner. @@ -452,7 +473,7 @@ def _build_banner(self, config_line): return super(CiscoConfigParser, self)._build_banner(config_line) @staticmethod - def is_banner_one_line(config_line): + def is_banner_one_line(config_line: str) -> bool: """Determine if all banner config is on one line.""" _, delimeter, banner = config_line.partition("^C") # Based on NXOS configs, the banner delimeter is ignored until another char is used @@ -461,7 +482,7 @@ def is_banner_one_line(config_line): return False return True - def is_banner_start(self, line): + def is_banner_start(self, line: str) -> bool: """Determine if the line starts a banner config.""" state = super(CiscoConfigParser, self).is_banner_start(line) if state: @@ -469,12 +490,14 @@ def is_banner_start(self, line): return state @property - def banner_end(self): + def banner_end(self) -> str: """Demarcate End of Banner char(s).""" + if self._banner_end is None: + raise RuntimeError("Banner end not yet set.") return self._banner_end @banner_end.setter - def banner_end(self, banner_start_line): + def banner_end(self, banner_start_line: str) -> None: banner_parsed = self.regex_banner.match(banner_start_line) if not banner_parsed: raise ValueError("There was an error parsing your banner, the end of the banner could not be found") @@ -484,25 +507,24 @@ def banner_end(self, banner_start_line): class IOSConfigParser(CiscoConfigParser, BaseSpaceConfigParser): """Cisco IOS implementation of ConfigParser Class.""" - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: config (str): The config text to parse. """ - self.unique_config_lines = set() - self.same_line_children = set() + self.unique_config_lines: t.Set[ConfigLine] = set() + self.same_line_children: t.Set[ConfigLine] = set() super(IOSConfigParser, self).__init__(config) - def _build_banner(self, config_line): + def _build_banner(self, config_line: str) -> t.Optional[str]: """Handle banner config lines. Args: - config_line (str): The start of the banner config. + config_line: The start of the banner config. Returns: - str: The next configuration line in the configuration text. - None: When banner end is the end of the config text. + The next configuration line in the configuration text or None when banner end is the end of the config text. Raises: ValueError: When the parser is unable to identify the End of the Banner. @@ -510,9 +532,9 @@ def _build_banner(self, config_line): config_line = normalise_delimiter_caret_c(self.banner_end, config_line) return super(IOSConfigParser, self)._build_banner(config_line) - def _update_same_line_children_configs(self): + def _update_same_line_children_configs(self) -> None: """Update parents in ``self.config_lines`` per ``self.same_line_children``.""" - new_config_lines = [] + new_config_lines: t.List[ConfigLine] = [] for line in self.config_lines: if line in self.same_line_children: previous_line = new_config_lines[-1] @@ -522,7 +544,7 @@ def _update_same_line_children_configs(self): new_config_lines.append(line) self.config_lines = new_config_lines - def _update_config_lines(self, config_line): + def _update_config_lines(self, config_line: str) -> None: """Add a ``ConfigLine`` object to ``self.config_lines``. In addition to adding entries to config_lines, this also updates: @@ -530,7 +552,7 @@ def _update_config_lines(self, config_line): * self.unique_config_lines Args: - config_line (str): The current config line being evaluated. + config_line: The current config line being evaluated. Returns: None @@ -541,7 +563,7 @@ def _update_config_lines(self, config_line): self.same_line_children.add(entry) self.unique_config_lines.add(entry) - def build_config_relationship(self): + def build_config_relationship(self) -> t.List[ConfigLine]: r"""Parse text tree of config lines and their parents. Example: @@ -572,28 +594,27 @@ class NXOSConfigParser(CiscoConfigParser, BaseSpaceConfigParser): regex_banner = re.compile(r"^banner\s+\S+\s+(?P\S)") - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: config (str): The config text to parse. """ - self.unique_config_lines = set() - self.same_line_children = set() + self.unique_config_lines: t.Set[ConfigLine] = set() + self.same_line_children: t.Set[ConfigLine] = set() super(NXOSConfigParser, self).__init__(config) - def _build_banner(self, config_line): + def _build_banner(self, config_line: str) -> t.Optional[str]: """Handle banner config lines. Args: - config_line (str): The start of the banner config. + config_line: The start of the banner config. Returns: - str: The next configuration line in the configuration text. - None: When banner end is the end of the config text. + The next configuration line in the configuration text or None when banner end is the end of the config text. Raises: - ValueError: When the parser is unable to identify the End of the Banner. + ValueError: When the parser is unable to identify the end of the Banner. """ config_line = normalise_delimiter_caret_c(self.banner_end, config_line) return super(NXOSConfigParser, self)._build_banner(config_line) @@ -604,46 +625,85 @@ class EOSConfigParser(BaseSpaceConfigParser): banner_end = "EOF" + def _build_banner(self, config_line: str) -> t.Optional[str]: + """Handle banner config lines. + + Args: + config_line: The start of the banner config. + + Returns: + The next configuration line in the configuration text or None when banner end is the end of the config text. + + Raises: + ValueError: When the parser is unable to identify the End of the Banner. + """ + self._update_config_lines(config_line) + self._current_parents += (config_line,) + banner_config = [] + for line in self.generator_config: + if not self.is_banner_end(line): + banner_config.append(line) + else: + banner_config.append(line) + line = "\n".join(banner_config) + self._update_config_lines(line) + self._current_parents = self._current_parents[:-1] + try: + return next(self.generator_config) + except StopIteration: + return None + raise ValueError("Unable to parse banner end.") + class AIREOSConfigParser(CiscoConfigParser, BaseSpaceConfigParser): """AireOSConfigParser implementation fo ConfigParser Class.""" - banner_start = [] + banner_start: t.List[str] = [] - def _build_banner(self, config_line): + def _build_banner(self, config_line: str) -> None: raise NotImplementedError() class LINUXConfigParser(BaseSpaceConfigParser): """Linux config parser.""" - comment_chars = ["#"] + comment_chars: t.List[str] = ["#"] + + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError("Linux platform doesn't have a banner.") class F5ConfigParser(BaseBraceConfigParser): - """F5ConfigParser implementation fo ConfigParser Class.""" + """F5ConfigParser implementation for ConfigParser Class.""" - multiline_delimiters = ['"'] + multiline_delimiters: t.List[str] = ['"'] - def __init__(self, config): + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError("F5 platform doesn't have a banner.") + + def __init__(self, config: str): """Create ConfigParser Object. Args: - config (str): The config text to parse. + config: The config text to parse. """ super().__init__(self._clean_config_f5(config)) - def _clean_config_f5(self, config_text): # pylint: disable=no-self-use + def _clean_config_f5(self, config_text: str) -> str: # pylint: disable=no-self-use """Removes all configuration items with 'ltm rule'. iRules are essentially impossible to parse with the lack of uniformity, therefore, this method ensures they are not included in ``self.config``. Args: - config_text (str): The entire config as a string. + config_text: The entire config as a string. Returns: - str: The sanitized config with all iRules (ltm rule) stanzas removed. + The sanitized config with all iRules (ltm rule) stanzas removed. """ config_split = config_text.split("ltm rule") if len(config_split) > 1: @@ -655,7 +715,7 @@ def _clean_config_f5(self, config_text): # pylint: disable=no-self-use final_config = config_text return final_config - def build_config_relationship(self): + def build_config_relationship(self) -> t.List[ConfigLine]: r"""Parse text tree of config lines and their parents. Example: @@ -700,15 +760,15 @@ def build_config_relationship(self): return self.config_lines - def _build_multiline_single_configuration_line(self, delimiter, prev_line): + def _build_multiline_single_configuration_line(self, delimiter: str, prev_line: str) -> t.Optional[ConfigLine]: r"""Concatenate Multiline strings between delimiter when newlines causes string to traverse multiple lines. Args: - delimiter (str): The text to look for to end multiline config. - prev_line (str): The text from the previously analyzed line. + delimiter: The text to look for to end multiline config. + prev_line: The text from the previously analyzed line. Returns: - ConfigLine: The multiline string text that was added to ``self.config_lines``. + The multiline string text that was added to ``self.config_lines``. Example: config = '''apm resource webtop-link aShare { @@ -748,31 +808,37 @@ def _build_multiline_single_configuration_line(self, delimiter, prev_line): self.config_lines[-1] = multiline_entry self._current_parents = self._current_parents[:-1] return multiline_entry + return None class JunosConfigParser(BaseSpaceConfigParser): """Junos config parser.""" - comment_chars = [] - banner_start = [] + comment_chars: t.List[str] = [] + banner_start: t.List[str] = [] + + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError("Junos platform doesn't have a banner.") class ASAConfigParser(CiscoConfigParser): """Cisco ASA implementation of ConfigParser Class.""" - comment_chars = ["!", ":"] + comment_chars: t.List[str] = ["!", ":"] - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: - config (str): The config text to parse. + config: The config text to parse. """ - self.unique_config_lines = set() - self.same_line_children = set() + self.unique_config_lines: t.Set[ConfigLine] = set() + self.same_line_children: t.Set[ConfigLine] = set() super(ASAConfigParser, self).__init__(config) - def _update_config_lines(self, config_line): + def _update_config_lines(self, config_line: str) -> None: """Add a ``ConfigLine`` object to ``self.config_lines``. In addition to adding entries to config_lines, this also updates: @@ -791,7 +857,7 @@ def _update_config_lines(self, config_line): self.same_line_children.add(entry) self.unique_config_lines.add(entry) - def build_config_relationship(self): + def build_config_relationship(self) -> t.List[ConfigLine]: r"""Parse text tree of config lines and their parents. Example: @@ -820,7 +886,7 @@ def build_config_relationship(self): self._current_parents = (previous_config.config_line,) self.indent_level = self.get_leading_space_count(line) if line is not None and line[0].isspace(): - line = self._build_nested_config(line) + line = self._build_nested_config(line) # type: ignore else: self._current_parents = () @@ -835,10 +901,15 @@ def build_config_relationship(self): class FortinetConfigParser(BaseSpaceConfigParser): """Fortinet Fortios config parser.""" - comment_chars = [] - banner_start = [] + comment_chars: t.List[str] = [] + banner_start: t.List[str] = [] + + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError("Fortinet FortiOS platform doesn't have a banner.") - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: @@ -847,14 +918,14 @@ def __init__(self, config): self.uncommon_data = self._get_uncommon_lines(config) super(FortinetConfigParser, self).__init__(config) - def is_end_next(self, line): # pylint: disable=no-self-use + def is_end_next(self, line: str) -> bool: # pylint: disable=no-self-use """Determine if line has 'end' or 'next' in it. Args: - line (str): A config line from the device. + line: A config line from the device. Returns: - bool: True if line has 'end' or 'next', else False. + True if line has 'end' or 'next', else False. Example: >>> FortinetConfigParser("config system virtual-switch").is_end_next("config system virtual-switch") @@ -868,7 +939,7 @@ def is_end_next(self, line): # pylint: disable=no-self-use return True return False - def _parse_out_offending(self, config): # pylint: disable=no-self-use + def _parse_out_offending(self, config: str) -> str: # pylint: disable=no-self-use """Preprocess out strings that offend the normal spaced configuration syntax. Args: @@ -881,11 +952,11 @@ def _parse_out_offending(self, config): # pylint: disable=no-self-use return re.sub(pattern, r"\1 [\2]\n", config) @property - def config_lines_only(self): + def config_lines_only(self) -> str: """Remove spaces and comments from config lines. Returns: - str: The non-space and non-comment lines from ``config``. + The non-space and non-comment lines from ``config``. """ # Specific to fortinet to remove uncommon data patterns for use later in _build_nested_config. self.config = self._parse_out_offending(self.config) @@ -898,11 +969,11 @@ def config_lines_only(self): self._config = "\n".join(config_lines) return self._config - def _get_uncommon_lines(self, config): # pylint: disable=no-self-use + def _get_uncommon_lines(self, config: str) -> t.Dict[str, str]: # pylint: disable=no-self-use """Regex to find replacemsg lines which can contain html/css data. Args: - config (str): Original config before parsing. + config: Original config before parsing. Returns: dict: dictionary with replace message name as key, html/css data as value. @@ -914,21 +985,24 @@ def _get_uncommon_lines(self, config): # pylint: disable=no-self-use result.update({group_match[0].split('"')[1]: group_match[1]}) return result - def _build_nested_config(self, line): + def _build_nested_config(self, line: str) -> t.Optional[str]: """Handle building child config sections. Args: - line (str): A configuration line from the configuration text. + line: A configuration line from the configuration text. Returns: - str: The next top-level configuration line in the configuration text. - None: When the last line of configuration text is a nested configuration line. + The next top-level configuration line in the configuration text or None when the last line of configuration + text is a nested configuration line. Raises: IndexError: When the number of parents does not match the expected deindent level. """ if "[" in line: - line = self.uncommon_data.get(line.split('"')[1]) + updated_line = self.uncommon_data.get(line.split('"')[1], None) + if not updated_line: + raise ValueError("Input line is malformed.") + line = updated_line self._update_config_lines(line) for line in self.generator_config: if not line[0].isspace(): @@ -949,15 +1023,21 @@ def _build_nested_config(self, line): self.indent_level = spaces self._update_config_lines(line) + return None class NokiaConfigParser(BaseSpaceConfigParser): """Nokia SrOS config parser.""" - comment_chars = ["#"] - banner_start = [] + comment_chars: t.List[str] = ["#"] + banner_start: t.List[str] = [] + + @property + def banner_end(self) -> str: + """Demarcate End of Banner char(s).""" + raise NotImplementedError("Nokia SROS platform doesn't have a banner.") - def __init__(self, config): + def __init__(self, config: str): """Create ConfigParser Object. Args: @@ -965,27 +1045,27 @@ def __init__(self, config): """ super(NokiaConfigParser, self).__init__(config) - def _is_section_title(self, line): # pylint: disable=no-self-use + def _is_section_title(self, line: str) -> bool: # pylint: disable=no-self-use """Determine if line is a section title in banner. Args: - line (str): A config line from the device. + line: A config line from the device. Returns: - bool: True if line is a sectiont, else False. + True if line is a section, else False. """ if re.match(r"^echo\s\".+\"", string=line): return True return False - def _get_section_title(self, line): # pylint: disable=no-self-use + def _get_section_title(self, line: str) -> t.Union[str, bool]: # pylint: disable=no-self-use """Determine section title from banner. Args: - line (str): A config line from the device that has been found to be a section title. + line: A config line from the device that has been found to be a section title. Returns: - str|bool: The section's title from the section banner, else False. + The section's title from the section banner, else False. """ section_title = re.match(r"^echo\s\"(?P.+)\"", string=line) if section_title: @@ -993,18 +1073,22 @@ def _get_section_title(self, line): # pylint: disable=no-self-use return False @property - def config_lines_only(self): + def config_lines_only(self) -> str: """Remove spaces and comments from config lines. Returns: - str: The non-space and non-comment lines from ``config``. + The non-space and non-comment lines from ``config``. """ if self._config is None: config_lines = [] for line in self.config.splitlines(): if line and not self.is_comment(line) and not line.isspace(): if self._is_section_title(line): - config_lines.append(self._get_section_title(line)) + section_title = self._get_section_title(line) + # At this point it is safe to assume that self._get_section_title returns a string, not a bool. + # The following line passes this assumption to Mypy. + assert isinstance(section_title, str) # nosec + config_lines.append(section_title) else: config_lines.append(line.rstrip()) self._config = "\n".join(config_lines) diff --git a/netutils/dns.py b/netutils/dns.py index a57ac16a..680751f7 100644 --- a/netutils/dns.py +++ b/netutils/dns.py @@ -2,17 +2,17 @@ import socket -def fqdn_to_ip(hostname): +def fqdn_to_ip(hostname: str) -> str: """Provides the IP address of a resolvable name on the machine it is running from. There are many reasons that a valid FQDN may not be resolvable, such as a network error from your machine to the DNS server, an upstream DNS issue, etc. Args: - hostname (str): An FQDN that may or may not be resolvable. + hostname: An FQDN that may or may not be resolvable. Returns: - ip (str): The IP Address of a valid FQDN. + The IP Address of a valid FQDN. Example: >>> from netutils.dns import fqdn_to_ip @@ -28,7 +28,7 @@ def fqdn_to_ip(hostname): return socket.getaddrinfo(hostname, 0)[0][4][0] -def is_fqdn_resolvable(hostname): +def is_fqdn_resolvable(hostname: str) -> bool: """Verifies whether a hostname is resolvable on the machine it is running from. There are many reasons that a valid FQDN may not be resolvable, such as a network error diff --git a/netutils/interface.py b/netutils/interface.py index d0579261..3e597359 100644 --- a/netutils/interface.py +++ b/netutils/interface.py @@ -5,17 +5,18 @@ from abc import ABC, abstractmethod from functools import total_ordering from operator import itemgetter + from .constants import BASE_INTERFACES, REVERSE_MAPPING -def interface_range_expansion(interface_pattern): +def interface_range_expansion(interface_pattern: str) -> t.List[str]: """Expand interface pattern into a list of interfaces. Args: - interface_pattern (str): The string pattern that will be parsed to create the list of interfaces. + interface_pattern: The string pattern that will be parsed to create the list of interfaces. Returns: - list: Contains the expanded list of interfaces. + Contains the expanded list of interfaces. Example: >>> from netutils.interface import interface_range_expansion @@ -25,8 +26,8 @@ def interface_range_expansion(interface_pattern): ['FastEthernet1/0/10', 'FastEthernet1/0/11', 'FastEthernet1/0/12', 'FastEthernet1/0/13', 'FastEthernet1/0/14', 'FastEthernet1/0/15', 'FastEthernet2/0/10', 'FastEthernet2/0/11', 'FastEthernet2/0/12', 'FastEthernet2/0/13', 'FastEthernet2/0/14', 'FastEthernet2/0/15'] """ - def _range_expand(regex_match): - number_range = [] + def _range_expand(regex_match: str) -> t.List[int]: + number_range: t.List[int] = [] for value in regex_match.split(","): if "-" in value[1:]: first_number, second_number = value[1:].split("-", 1) @@ -35,7 +36,7 @@ def _range_expand(regex_match): number_range.append(int(value)) return number_range - def _pairwise(interface_constant): + def _pairwise(interface_constant: t.List[int]) -> t.List[t.Tuple[int, int]]: interface_constant_it = iter(interface_constant) return list(zip(interface_constant_it, interface_constant_it)) @@ -64,14 +65,14 @@ def _pairwise(interface_constant): return expanded_interfaces -def split_interface(interface): +def split_interface(interface: str) -> t.Tuple[str, str]: """Split an interface name based on first digit, slash, or space match. Args: - interface (str): The interface you are attempting to split. + interface: The interface you are attempting to split. Returns: - tuple: The split between the name of the interface the value. + The split between the name of the interface the value. Example: >>> from netutils.interface import split_interface @@ -86,7 +87,9 @@ def split_interface(interface): return (head, tail) -def canonical_interface_name(interface, addl_name_map=None, verify=False): +def canonical_interface_name( + interface: str, addl_name_map: t.Optional[t.Dict[str, str]] = None, verify: bool = False +) -> str: """Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential @@ -96,12 +99,12 @@ def canonical_interface_name(interface, addl_name_map=None, verify=False): easily troubleshot, found, or known. Args: - interface (str): The interface you are attempting to expand. - addl_name_map (dict, optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. - verify (bool, optional): Whether or not to verify the interface matches a known interface standard. Defaults to False. + interface: The interface you are attempting to expand. + addl_name_map: A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. + verify: Whether or not to verify the interface matches a known interface standard. Defaults to False. Returns: - str: The name of the interface in the long form. + The name of the interface in the long form. Example: >>> from netutils.interface import canonical_interface_name @@ -118,8 +121,8 @@ def canonical_interface_name(interface, addl_name_map=None, verify=False): if isinstance(addl_name_map, dict): name_map.update(addl_name_map) # check in dict for mapping - if name_map.get(interface_type): - long_int = name_map.get(interface_type) + if interface_type in name_map: + long_int = name_map[interface_type] return long_int + str(interface_number) if verify: raise ValueError(f"Verify interface on and no match found for {interface}") @@ -127,7 +130,13 @@ def canonical_interface_name(interface, addl_name_map=None, verify=False): return interface -def canonical_interface_name_list(interfaces, addl_name_map=None, verify=False, order=None, reverse=None): +def canonical_interface_name_list( + interfaces: t.List[str], + addl_name_map: t.Optional[t.Dict[str, str]] = None, + verify: bool = False, + order: t.Optional[str] = None, + reverse: bool = False, +) -> t.List[str]: """Function to return a list of interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential @@ -137,14 +146,14 @@ def canonical_interface_name_list(interfaces, addl_name_map=None, verify=False, easily troubleshot, found, or known. Args: - interfaces (list): List of interfaces you are attempting to expand. - addl_name_map (dict, optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. - verify (bool, optional): Whether or not to verify the interface matches a known interface standard. Defaults to False. - order (str, optional): Determines what order the list of interfaces should be returned in. Defaults to None. - reverse (bool, optional): Specify if the order of the list should be reversed when setting an order. Defaults to None. + interfaces: List of interfaces you are attempting to expand. + addl_name_map: A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. + verify: Whether or not to verify the interface matches a known interface standard. Defaults to False. + order: Determines what order the list of interfaces should be returned in. Defaults to None. + reverse: Specify if the order of the list should be reversed when setting an order. Defaults to None. Returns: - list: List of the interfaces in their long form. + List of the interfaces in their long form. Raises: ValueError: Raised if any interface name in list cannot be converted to its long form and verify parameter is set to true. @@ -178,7 +187,10 @@ def canonical_interface_name_list(interfaces, addl_name_map=None, verify=False, raise ValueError(f"Verify interface on and no match found for {no_match_string}") if order: - canonical_interface_list = INTERFACE_LIST_ORDERING_OPTIONS.get(order)(canonical_interface_list) + order_function = INTERFACE_LIST_ORDERING_OPTIONS.get(order, None) + if not order_function: + raise ValueError(f"No order function available called {order}") + canonical_interface_list = order_function(canonical_interface_list) if reverse: canonical_interface_list = _reverse_list(canonical_interface_list) @@ -186,17 +198,22 @@ def canonical_interface_name_list(interfaces, addl_name_map=None, verify=False, return canonical_interface_list -def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=None, verify=False): +def abbreviated_interface_name( + interface: str, + addl_name_map: t.Optional[t.Dict[str, str]] = None, + addl_reverse_map: t.Optional[t.Dict[str, str]] = None, + verify: bool = False, +) -> str: """Function to return an abbreviated representation of the interface name. Args: - interface (str): The interface you are attempting to shorten. - addl_name_map (dict, optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. - addl_reverse_map (dict, optional): A dict containing key/value pairs that updates the abbreviated mapping. Defaults to None. - verify (bool, optional): Whether or not to verify the interface matches a known interface standard. Defaults to False. + interface: The interface you are attempting to shorten. + addl_name_map: A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. + addl_reverse_map: A dict containing key/value pairs that updates the abbreviated mapping. Defaults to None. + verify: Whether or not to verify the interface matches a known interface standard. Defaults to False. Returns: - str: The name of the interface in the abbreviated form. + The name of the interface in the abbreviated form. Example: >>> abbreviated_interface_name("GigabitEthernet1/0/1") @@ -219,9 +236,8 @@ def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=N rev_name_map.update(addl_reverse_map) # Try to ensure canonical type. - if name_map.get(interface_type): - canonical_type = name_map.get(interface_type) - else: + canonical_type = name_map.get(interface_type, None) + if not canonical_type: canonical_type = interface_type try: @@ -237,7 +253,9 @@ def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=N return interface -@total_ordering +# Mypy is currently (0.961) unable to handle the combination of ABC and @total_ordering, see here: +# https://github.com/python/mypy/issues/5374 +@total_ordering # type: ignore class CharacterClass(ABC): """CharacterClass embodies the state needed to sort interfaces.""" @@ -247,10 +265,10 @@ def __init__(self, val: str, terminal: bool = False) -> None: # noqa: D107 super().__init__() @abstractmethod - def __lt__(self, other) -> bool: # noqa: D105 + def __lt__(self, other: "CharacterClass") -> bool: # noqa: D105 ... - def __eq__(self, other) -> bool: # noqa: D105 + def __eq__(self, other: t.Any) -> t.Any: # noqa: D105 return self.weight == other.weight and self.val == other.val @property @@ -260,7 +278,7 @@ def weight(self) -> int: ... @property - def terminal(self): + def terminal(self) -> bool: """Flag whether a node is terminal.""" return self._terminal @@ -280,7 +298,7 @@ def __hash__(self) -> int: # noqa: D105 class CCString(CharacterClass): """Strings are sorted lexicographically.""" - def __lt__(self, other) -> bool: # noqa: D105 + def __lt__(self, other: "CharacterClass") -> bool: # noqa: D105 return self.weight < other.weight or self.val < other.val def __repr__(self) -> str: # noqa: D105 @@ -294,7 +312,7 @@ def weight(self) -> int: # noqa: D107,D102 class CCInt(CharacterClass): """Ints must be sorted canonically because '11' < '5'.""" - def __lt__(self, other) -> bool: # noqa: D105 + def __lt__(self, other: "CharacterClass") -> bool: # noqa: D105 return self.weight < other.weight or int(self.val) < int(other.val) def __repr__(self) -> str: # noqa: D105 @@ -310,7 +328,7 @@ class CCSeparator(CharacterClass): weights: t.Dict[str, int] = {".": 10, "/": 20} - def __lt__(self, other) -> bool: # noqa: D105 + def __lt__(self, other: "CharacterClass") -> bool: # noqa: D105 return self.weight < other.weight or self.weights.get(self.val, 0) < self.weights.get(other.val, 0) def __repr__(self) -> str: # noqa: D105 @@ -321,7 +339,7 @@ def weight(self) -> int: # noqa: D102 return 30 -def _CCfail(*args): # pylint: disable=C0103 +def _CCfail(*args: t.Any) -> t.NoReturn: # pylint: disable=C0103 """Helper to raise an exception on a bad character match.""" raise ValueError(f"unknown character '{args[0][0]}'.") @@ -340,6 +358,9 @@ def _split_interface_tuple(interface: str) -> t.Tuple[CharacterClass, ...]: ] while idx < len(interface): for regex, cls in regexes: + # Hint for Mypy to realize that both the classes and the function on the right side of the regexes tuples + # are in fact callable. + assert callable(cls) # nosec part = "" while idx < len(interface) and re.match(regex, interface[idx]): part += interface[idx] @@ -353,11 +374,11 @@ def _split_interface_tuple(interface: str) -> t.Tuple[CharacterClass, ...]: return tail -def _reverse_list(interface_list): +def _reverse_list(interface_list: t.List[str]) -> t.List[str]: """Reverses an alphabetical list of interfaces. Args: - interface_list (list): Alphabetically sorted list of interfaces. + interface_list: Alphabetically sorted list of interfaces. """ # Convert interface name into Tuple of : Text, Int and Separator split_intf = re.compile(r"([^\W0-9]+|[0-9]+|\W)") @@ -434,20 +455,25 @@ def sort_interface_list(interfaces: t.List[str]) -> t.List[str]: def abbreviated_interface_name_list( # pylint: disable=R0913, R0914 - interfaces, addl_name_map=None, addl_reverse_map=None, verify=False, order=None, reverse=None -): + interfaces: t.List[str], + addl_name_map: t.Optional[t.Dict[str, str]] = None, + addl_reverse_map: t.Optional[t.Dict[str, str]] = None, + verify: bool = False, + order: t.Optional[str] = None, + reverse: bool = False, +) -> t.List[str]: """Function to return a list of interface's abbreviated name. Args: - interfaces (list): List of interface names you are attempting to abbreviate. - addl_name_map (dict, optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. - addl_reverse_map (dict, optional): A dict containing key/value pairs that updates the abbreviated mapping. Defaults to None. - verify (bool, optional): Whether or not to verify the interface matches a known interface standard. Defaults to False. - order (str, optional): Determines what order the list of interfaces should be returned in. Defaults to None. - reverse (bool, optional): Specify if the order of the list should be reversed when setting an order. Defaults to None. + interfaces: List of interface names you are attempting to abbreviate. + addl_name_map: A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}. Defaults to None. + addl_reverse_map: A dict containing key/value pairs that updates the abbreviated mapping. Defaults to None. + verify: Whether or not to verify the interface matches a known interface standard. Defaults to False. + order: Determines what order the list of interfaces should be returned in. Defaults to None. + reverse: Specify if the order of the list should be reversed when setting an order. Defaults to None. Returns: - list: List of the interfaces in their abbreviated form. + List of the interfaces in their abbreviated form. Raises: ValueError: Raised if any interface name in list cannot be converted to its abbreviated form and verify parameter is set to true. @@ -483,9 +509,8 @@ def abbreviated_interface_name_list( # pylint: disable=R0913, R0914 for interface in interfaces: interface_type, interface_number = split_interface(interface) # Try to ensure canonical type. - if name_map.get(interface_type): - canonical_type = name_map.get(interface_type) - else: + canonical_type = name_map.get(interface_type, None) + if not canonical_type: canonical_type = interface_type try: @@ -500,7 +525,10 @@ def abbreviated_interface_name_list( # pylint: disable=R0913, R0914 raise ValueError(f"Verify interface on and no match found for {no_match_string}") if order: - abbreviated_interface_list = INTERFACE_LIST_ORDERING_OPTIONS.get(order)(abbreviated_interface_list) + order_function = INTERFACE_LIST_ORDERING_OPTIONS.get(order, None) + if not order_function: + raise ValueError(f"No order function available called {order}") + abbreviated_interface_list = order_function(abbreviated_interface_list) if reverse: abbreviated_interface_list = _reverse_list(abbreviated_interface_list) @@ -508,11 +536,11 @@ def abbreviated_interface_name_list( # pylint: disable=R0913, R0914 return abbreviated_interface_list -def _check_order_option_exists(order): +def _check_order_option_exists(order: str) -> None: """Check if the given order for an interface list exists. Args: - order (str): Requested ordering of the interface list. + order: Requested ordering of the interface list. Raises: ValueError: Raised the given order is not a proper ordering type. @@ -521,7 +549,7 @@ def _check_order_option_exists(order): raise ValueError(f"{order} is not one of the supported orderings") -def _ranges_in_list(numbers: t.List[int]): +def _ranges_in_list(numbers: t.List[int]) -> t.List[t.List[int]]: """Find contiguous ranges in a list of numbers. Example: @@ -532,7 +560,7 @@ def _ranges_in_list(numbers: t.List[int]): numbers: list of numbers Returns: - list: list of ranges in input + list of ranges in input """ return [list(map(itemgetter(1), g)) for k, g in itertools.groupby(enumerate(numbers), lambda x: x[0] - x[1])] @@ -554,9 +582,9 @@ def interface_range_compress(interface_list: t.List[str]) -> t.List[str]: interface_list: list of interfaces Returns: - list: list of interface ranges + list of interface ranges """ - result_dict = {} + result_dict: t.Dict[str, t.List[int]] = {} final_result_list = [] sorted_ints = [_split_interface_tuple(x) for x in sort_interface_list(interface_list)] if not sorted_ints: diff --git a/netutils/ip.py b/netutils/ip.py index 497797bd..c70d9966 100644 --- a/netutils/ip.py +++ b/netutils/ip.py @@ -1,18 +1,20 @@ """Functions for working with IP addresses.""" import ipaddress +import typing as t from operator import attrgetter + from netutils.constants import IPV4_MASKS, IPV6_MASKS -def ipaddress_address(ip, attr): +def ipaddress_address(ip: str, attr: str) -> t.Any: """Convenience function primarily built to expose ipaddress.ip_address to Jinja. Args: - ip_addr (str): IP Address str compliant with ipaddress.ip_address inputs. - attr (atr): An attribute in string dotted format. + ip: IP Address str compliant with ipaddress.ip_address inputs. + attr: An attribute in string dotted format. Returns: - str: Returns the value provided by the ipaddress.ip_address attribute provided. + Returns the value provided by the ipaddress.ip_address attribute provided. Example: >>> from netutils.ip import ipaddress_address @@ -31,15 +33,15 @@ def ipaddress_address(ip, attr): return retrieved_method -def ipaddress_interface(ip, attr): +def ipaddress_interface(ip: str, attr: str) -> t.Any: """Convenience function primarily built to expose ipaddress.ip_interface to Jinja. Args: - ip_interface (str): IP interface str compliant with ipaddress.ip_interface inputs. - attr (atr): An attribute in string dotted format. + ip: IP interface str compliant with ipaddress.ip_interface inputs. + attr: An attribute in string dotted format. Returns: - str: Returns the value provided by the ipaddress.ip_interface attribute provided. + Returns the value provided by the ipaddress.ip_interface attribute provided. Example: >>> from netutils.ip import ipaddress_interface @@ -55,15 +57,15 @@ def ipaddress_interface(ip, attr): return retrieved_method -def ipaddress_network(ip, attr): +def ipaddress_network(ip: str, attr: str) -> t.Any: """Convenience function primarily built to expose ipaddress.ip_network to Jinja. Args: - ip_network (str): IP network str compliant with ipaddress.ip_network inputs. - attr (atr): An attribute in string dotted format. + ip: IP network str compliant with ipaddress.ip_network inputs. + attr: An attribute in string dotted format. Returns: - str: Returns the value provided by the ipaddress.ip_network attribute provided. + Returns the value provided by the ipaddress.ip_network attribute provided. Example: >>> from netutils.ip import ipaddress_network @@ -80,14 +82,14 @@ def ipaddress_network(ip, attr): return retrieved_method -def ip_to_hex(ip): +def ip_to_hex(ip: str) -> str: """Converts an IP address in string format to a hex string. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. + ip: An IP address in string format that is able to be converted by `ipaddress` library. Returns: - str: HEX value of the IP address. + HEX value of the IP address. Example: >>> from netutils.ip import ip_to_hex @@ -99,15 +101,15 @@ def ip_to_hex(ip): return str(hex(int(ip_obj)))[2:].zfill(int(ip_obj.max_prefixlen / 4)) -def ip_addition(ip, val): +def ip_addition(ip: str, val: int) -> str: """Adds an integer to an IP address. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. - val (int): An integer of which the IP address should be added by. + ip: An IP address in string format that is able to be converted by `ipaddress` library. + val: An integer of which the IP address should be added by. Returns: - str: IP address formatted string with the newly added IP address. + IP address formatted string with the newly added IP address. Example: >>> from netutils.ip import ip_addition @@ -118,14 +120,14 @@ def ip_addition(ip, val): return str(ipaddress.ip_address(ip) + val) -def ip_to_bin(ip): +def ip_to_bin(ip: str) -> str: """Converts an IP address in string format to a binary string. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. + ip: An IP address in string format that is able to be converted by `ipaddress` library. Returns: - str: Binary value of the IP address. + Binary value of the IP address. Example: >>> from netutils.ip import ip_to_bin @@ -137,15 +139,15 @@ def ip_to_bin(ip): return bin(int(ip_obj))[2:].zfill(ip_obj.max_prefixlen) -def ip_subtract(ip, val): +def ip_subtract(ip: str, val: int) -> str: """Subtract an integer to an IP address. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. - val (int): An integer of which the IP address should be subtracted by. + ip: An IP address in string format that is able to be converted by `ipaddress` library. + val: An integer of which the IP address should be subtracted by. Returns: - str: IP address formatted string with the newly subtracted IP address. + IP address formatted string with the newly subtracted IP address. Example: >>> from netutils.ip import ip_subtract @@ -156,14 +158,14 @@ def ip_subtract(ip, val): return str(ipaddress.ip_address(ip) - val) -def is_ip(ip): +def is_ip(ip: str) -> bool: """Verifies whether or not a string is a valid IP address. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. + ip: An IP address in string format that is able to be converted by `ipaddress` library. Returns: - bool: The result as to whether or not the string is a valid IP address. + The result as to whether or not the string is a valid IP address. Example: >>> from netutils.ip import is_ip @@ -174,20 +176,20 @@ def is_ip(ip): >>> """ try: - ip = ipaddress.ip_address(ip) + ipaddress.ip_address(ip) return True except ValueError: return False -def is_netmask(netmask): +def is_netmask(netmask: str) -> bool: """Verifies whether or not a string is a valid subnet mask. Args: - netmask (str): A subnet mask in + netmask: A subnet mask in Returns: - bool: True if string is a valid subnet mask. Otherwise, false. + True if string is a valid subnet mask. Otherwise, false. Example: >>> from netutils.ip import is_netmask @@ -204,14 +206,14 @@ def is_netmask(netmask): return False -def netmask_to_cidr(netmask): +def netmask_to_cidr(netmask: str) -> int: """Creates a CIDR notation of a given subnet mask in decimal format. Args: - netmask (str): A subnet mask in decimal format. + netmask: A subnet mask in decimal format. Returns: - cidr (str): CIDR representation of subnet mask. + CIDR representation of subnet mask. Example: >>> from netutils.ip import netmask_to_cidr @@ -225,16 +227,16 @@ def netmask_to_cidr(netmask): raise ValueError("Subnet mask is not valid.") -def cidr_to_netmask(cidr): +def cidr_to_netmask(cidr: int) -> str: """Creates a decimal format of a CIDR value. **IPv4** only. For IPv6, please use `cidr_to_netmaskv6`. Args: - cidr (int): A CIDR value. + cidr: A CIDR value. Returns: - netmask (str): Decimal format representation of CIDR value. + Decimal format representation of CIDR value. Example: >>> from netutils.ip import cidr_to_netmask @@ -248,14 +250,14 @@ def cidr_to_netmask(cidr): raise ValueError("Parameter must be an integer between 0 and 32.") -def cidr_to_netmaskv6(cidr): +def cidr_to_netmaskv6(cidr: int) -> str: """Creates a decimal format of a CIDR value. Args: - cidr (int): A CIDR value. + cidr: A CIDR value. Returns: - netmask (str): Decimal format (IPv6) representation of CIDR value. + Decimal format (IPv6) representation of CIDR value. Example: >>> from netutils.ip import cidr_to_netmaskv6 @@ -269,14 +271,14 @@ def cidr_to_netmaskv6(cidr): raise ValueError("Parameter must be an integer between 0 and 128.") -def get_all_host(ip_network): +def get_all_host(ip_network: str) -> t.Generator[str, None, None]: """Given a network, return the list of usable IP addresses. Args: - ip_network (str): An IP network in string format that is able to be converted by `ipaddress` library. + ip_network: An IP network in string format that is able to be converted by `ipaddress` library. Returns: - generator: Generator of usable IP Addresses within network. + Generator of usable IP Addresses within network. Example: >>> from netutils.ip import get_all_host @@ -287,14 +289,14 @@ def get_all_host(ip_network): return (str(ip) for ip in ipaddress.ip_network(ip_network).hosts()) -def get_broadcast_address(ip_network): +def get_broadcast_address(ip_network: str) -> str: """Given a network, determine the broadcast IP address. Args: - ip_network (str): An IP network in string format that is able to be converted by `ipaddress` library. + ip_network: An IP network in string format that is able to be converted by `ipaddress` library. Returns: - str: IP address formatted string with the broadcast IP address in the network. + IP address formatted string with the broadcast IP address in the network. Example: >>> from netutils.ip import get_broadcast_address @@ -305,14 +307,14 @@ def get_broadcast_address(ip_network): return str(ipaddress.ip_network(ip_network).broadcast_address) -def get_first_usable(ip_network): +def get_first_usable(ip_network: str) -> str: """Given a network, determine the first usable IP address. Args: - ip_network (str): An IP network in string format that is able to be converted by `ipaddress` library. + ip_network: An IP network in string format that is able to be converted by `ipaddress` library. Returns: - str: IP address formatted string with the first usable IP address in the network. + IP address formatted string with the first usable IP address in the network. Example: >>> from netutils.ip import get_first_usable @@ -326,14 +328,14 @@ def get_first_usable(ip_network): return str(net[1]) -def get_peer_ip(ip_interface): +def get_peer_ip(ip_interface: str) -> str: """Given an IP interface (an ip address, with subnet mask) that is on a peer network, return the peer IP. Args: - ip_interface (str): An IP interface in string format that is able to be converted by `ipaddress` library. + ip_interface: An IP interface in string format that is able to be converted by `ipaddress` library. Returns: - str: IP address formatted string with the corresponding peer IP. + IP address formatted string with the corresponding peer IP. Example: >>> from netutils.ip import get_peer_ip @@ -364,14 +366,14 @@ def get_peer_ip(ip_interface): return val[0] -def get_usable_range(ip_network): +def get_usable_range(ip_network: str) -> str: """Given a network, return the string of usable IP addresses. Args: - ip_network (str): An IP network in string format that is able to be converted by `ipaddress` library. + ip_network: An IP network in string format that is able to be converted by `ipaddress` library. Returns: - str: String of usable IP Addresses within network. + String of usable IP Addresses within network. Example: >>> from netutils.ip import get_usable_range diff --git a/netutils/lib_mapper.py b/netutils/lib_mapper.py index c55c7c9a..44df1f28 100644 --- a/netutils/lib_mapper.py +++ b/netutils/lib_mapper.py @@ -1,8 +1,9 @@ """Variable definitions to map from network automation library to network automation library.""" import copy +import typing as t -_NETMIKO_LIB_MAPPER = { +_NETMIKO_LIB_MAPPER: t.Dict[str, t.Dict[str, str]] = { "a10": {}, "accedian": {}, "adtran_os": {}, @@ -130,6 +131,7 @@ "fortios": "fortinet", "huawei": "huawei_vrp", "ios": "cisco_ios", + "nxos_ssh": "cisco_nxos", "nxos": "cisco_nxos", "iosxr": "cisco_xr", "junos": "juniper_junos", diff --git a/netutils/mac.py b/netutils/mac.py index 73d5affd..ee987460 100644 --- a/netutils/mac.py +++ b/netutils/mac.py @@ -1,19 +1,22 @@ """Functions for working with MAC addresses.""" import re +import typing as t from functools import wraps + from .constants import MAC_CREATE, MAC_REGEX -def _valid_mac(func): +def _valid_mac(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """Decorator to validate a MAC address is valid.""" @wraps(func) - def decorated(*args, **kwargs): + def decorated(*args: t.Any, **kwargs: t.Any) -> t.Any: if kwargs.get("mac"): mac = kwargs.get("mac") else: mac = args[0] + assert isinstance(mac, str) # nosec if not is_valid_mac(mac): raise ValueError(f"There was not a valid mac address in: `{mac}`") return func(*args, **kwargs) @@ -21,14 +24,14 @@ def decorated(*args, **kwargs): return decorated -def is_valid_mac(mac): +def is_valid_mac(mac: str) -> bool: """Verifies whether or not a string is a valid MAC address. Args: - mac (str): A MAC address in string format that matches one of the defined regex patterns. + mac: A MAC address in string format that matches one of the defined regex patterns. Returns: - bool: The result as to whether or not the string is a valid MAC address. + The result as to whether or not the string is a valid MAC address. Example: >>> from netutils.mac import is_valid_mac @@ -45,15 +48,15 @@ def is_valid_mac(mac): @_valid_mac -def mac_to_format(mac, frmt="MAC_NO_SPECIAL"): +def mac_to_format(mac: str, frmt: str = "MAC_NO_SPECIAL") -> str: """Converts the MAC address to a specific format. Args: - mac (str): A MAC address in string format that matches one of the defined regex patterns. - frmt (str): A format in which the MAC address should be returned in. + mac: A MAC address in string format that matches one of the defined regex patterns. + frmt: A format in which the MAC address should be returned in. Returns: - str: A MAC address in the specified format. + A MAC address in the specified format. Example: >>> from netutils.mac import mac_to_format @@ -66,18 +69,18 @@ def mac_to_format(mac, frmt="MAC_NO_SPECIAL"): mac = mac_normalize(mac) count = MAC_CREATE[frmt]["count"] char = MAC_CREATE[frmt]["char"] - return char.join([mac[i : i + count] for i in range(0, len(mac), count)]) # noqa: E203 + return char.join([mac[i : i + count] for i in range(0, len(mac), count)]) # type: ignore # noqa: E203 @_valid_mac -def mac_to_int(mac): +def mac_to_int(mac: str) -> int: """Converts the MAC address to an integer. Args: - mac (str): A MAC address in string format that matches one of the defined regex patterns. + mac: A MAC address in string format that matches one of the defined regex patterns. Returns: - int: The valid MAC address converted to an integer. + The valid MAC address converted to an integer. Example: >>> from netutils.mac import mac_to_int @@ -89,14 +92,14 @@ def mac_to_int(mac): @_valid_mac -def mac_type(mac): # pylint: disable=inconsistent-return-statements +def mac_type(mac: str) -> t.Optional[str]: """Retuns the "type" of MAC address, as defined by the regex pattern names. Args: - mac (str): A MAC address in string format that matches one of the defined regex patterns. + mac: A MAC address in string format that matches one of the defined regex patterns. Returns: - str: The regex pattern type of the MAC address. + The regex pattern type of the MAC address. Example: >>> from netutils.mac import mac_type @@ -109,17 +112,18 @@ def mac_type(mac): # pylint: disable=inconsistent-return-statements for name, pattern in MAC_REGEX.items(): if re.fullmatch(pattern, mac): return name + return None @_valid_mac -def mac_normalize(mac): +def mac_normalize(mac: str) -> str: """Retuns the MAC address with only the address, and no special characters. Args: - mac (str): A MAC address in string format that matches one of the defined regex patterns. + mac: A MAC address in string format that matches one of the defined regex patterns. Returns: - str: The MAC address with no special characters. + The MAC address with no special characters. Example: >>> from netutils.mac import mac_normalize diff --git a/netutils/password.py b/netutils/password.py index 814eaf6c..119c2a53 100644 --- a/netutils/password.py +++ b/netutils/password.py @@ -2,78 +2,81 @@ import crypt import random -import string import secrets +import string import sys +import ast +import typing as t from functools import wraps # Code example from Python docs ALPHABET = string.ascii_letters + string.digits DEFAULT_PASSWORD_CHARS = "".join((string.ascii_letters + string.digits + ".,:-_")) DEFAULT_PASSWORD_LENGTH = 20 +ENCRYPT_TYPE7_LENGTH = 25 XLAT = [ - 0x64, - 0x73, - 0x66, - 0x64, - 0x3B, - 0x6B, - 0x66, - 0x6F, - 0x41, - 0x2C, - 0x2E, - 0x69, - 0x79, - 0x65, - 0x77, - 0x72, - 0x6B, - 0x6C, - 0x64, - 0x4A, - 0x4B, - 0x44, - 0x48, - 0x53, - 0x55, - 0x42, - 0x73, - 0x67, - 0x76, - 0x63, - 0x61, - 0x36, - 0x39, - 0x38, - 0x33, - 0x34, - 0x6E, - 0x63, - 0x78, - 0x76, - 0x39, - 0x38, - 0x37, - 0x33, - 0x32, - 0x35, - 0x34, - 0x6B, - 0x3B, - 0x66, - 0x67, - 0x38, - 0x37, + "0x64", + "0x73", + "0x66", + "0x64", + "0x3b", + "0x6b", + "0x66", + "0x6f", + "0x41", + "0x2c", + "0x2e", + "0x69", + "0x79", + "0x65", + "0x77", + "0x72", + "0x6b", + "0x6c", + "0x64", + "0x4a", + "0x4b", + "0x44", + "0x48", + "0x53", + "0x55", + "0x42", + "0x73", + "0x67", + "0x76", + "0x63", + "0x61", + "0x36", + "0x39", + "0x38", + "0x33", + "0x34", + "0x6e", + "0x63", + "0x78", + "0x76", + "0x39", + "0x38", + "0x37", + "0x33", + "0x32", + "0x35", + "0x34", + "0x6b", + "0x3b", + "0x66", + "0x67", + "0x38", + "0x37", ] -def _fail_on_mac(func): +def _fail_on_mac(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """There is an issue with Macintosh for encryption.""" @wraps(func) - def decorated(*args, **kwargs): + def decorated(*args: t.Any, **kwargs: t.Any) -> t.Any: if sys.platform == "darwin": raise ValueError("Macintosh is not supported, see https://bugs.python.org/issue33213 for upstream issue.") return func(*args, **kwargs) @@ -81,16 +84,18 @@ def decorated(*args, **kwargs): return decorated -def compare_type5(unencrypted_password, encrypted_password, return_original=False): +def compare_type5( + unencrypted_password: str, encrypted_password: str, return_original: bool = False +) -> t.Union[str, bool]: """Given an encrypted and unencrypted password of Cisco Type 5 password, compare if they are a match. Args: - unencrypted_password (str): A password that has not been encrypted, and will be compared against. - encrypted_password (str): A password that has been encrypted. - return_original (bool, optional): Whether or not to return the original, this is helpful when used to populate the configuration. Defaults to False. + unencrypted_password: A password that has not been encrypted, and will be compared against. + encrypted_password: A password that has been encrypted. + return_original: Whether or not to return the original, this is helpful when used to populate the configuration. Defaults to False. Returns: - bool: Whether or not the password is as compared to. + Whether or not the password is as compared to. Example: >>> from netutils.password import compare_type5 @@ -108,16 +113,18 @@ def compare_type5(unencrypted_password, encrypted_password, return_original=Fals return False -def compare_type7(unencrypted_password, encrypted_password, return_original=False): +def compare_type7( + unencrypted_password: str, encrypted_password: str, return_original: bool = False +) -> t.Union[str, bool]: """Given an encrypted and unencrypted password of Cisco Type 7 password, compare if they are a match. Args: - unencrypted_password (str): A password that has not been encrypted, and will be compared against. - encrypted_password (str): A password that has been encrypted. - return_original (bool, optional): Whether or not to return the original, this is helpful when used to populate the configuration. Defaults to False. + unencrypted_password: A password that has not been encrypted, and will be compared against. + encrypted_password: A password that has been encrypted. + return_original: Whether or not to return the original, this is helpful when used to populate the configuration. Defaults to False. Returns: - bool: Whether or not the password is as compared to. + Whether or not the password is as compared to. Example: >>> from netutils.password import compare_type7 @@ -134,14 +141,14 @@ def compare_type7(unencrypted_password, encrypted_password, return_original=Fals return False -def decrypt_type7(encrypted_password): +def decrypt_type7(encrypted_password: str) -> str: """Given an unencrypted password of Cisco Type 7 password decrypt it. Args: - encrypted_password (str): A password that has been encrypted, and will be decrypted. + encrypted_password: A password that has been encrypted, and will be decrypted. Returns: - string: The unencrypted_password password. + The unencrypted_password password. Example: >>> from netutils.password import decrypt_type7 @@ -165,16 +172,16 @@ def decrypt_type7(encrypted_password): @_fail_on_mac -def encrypt_type5(unencrypted_password, salt=None, salt_len=4): +def encrypt_type5(unencrypted_password: str, salt: t.Optional[str] = None, salt_len: int = 4) -> str: """Given an unencrypted password of Cisco Type 5 password, encrypt it. Args: - unencrypted_password (str): A password that has not been encrypted, and will be compared against. - salt (str, optional): A random set of characters that can be set by the operator. Defaults to random generated one. - salt_len (int, optional): The number of random set of characters, when not manually set. Defaults to 4. + unencrypted_password: A password that has not been encrypted, and will be compared against. + salt: A random set of characters that can be set by the operator. Defaults to random generated one. + salt_len: The number of random set of characters, when not manually set. Defaults to 4. Returns: - string: The encrypted password. + The encrypted password. Example: >>> from netutils.password import encrypt_type5 @@ -183,48 +190,57 @@ def encrypt_type5(unencrypted_password, salt=None, salt_len=4): >>> """ if not salt: - salt = "".join(secrets.choice(ALPHABET) for i in range(salt_len)) + salt = "".join(secrets.choice(ALPHABET) for _ in range(salt_len)) elif not set(salt) <= set(ALPHABET): raise ValueError(f"type5_pw salt used inproper characters, must be one of {ALPHABET}") return crypt.crypt(unencrypted_password, f"$1${salt}$") -def encrypt_type7(unencrypted_password, salt=None): +def encrypt_type7(unencrypted_password: str, salt: t.Optional[int] = None) -> str: """Given an unencrypted password of Cisco Type 7 password, encypt it. Args: - unencrypted_password (str): A password that has not been encrypted, and will be compared against. - salt (str, optional): A random number between 0 and 15 that can be set by the operator. Defaults to random generated one. + unencrypted_password: A password that has not been encrypted, and will be compared against. + salt: A random number between 0 and 15 that can be set by the operator. Defaults to random generated one. Returns: - string: The encrypted password. + The encrypted password. Example: >>> from netutils.password import encrypt_type7 - >>> encrypt_type5("cisco") # doctest: +SKIP - '$1$ZLGo$J.gAGxS2wqO96drs0Cith/' + >>> encrypt_type7("cisco", 11) + '110A1016141D' >>> """ + # max length of password for encrypt t7 is 25 + if len(unencrypted_password) > ENCRYPT_TYPE7_LENGTH: # nosec + raise ValueError("Password must not exceed 25 characters.") + if not salt: - salt = random.randrange(0, 15) # nosec - encrypted_password = "%02x" % salt # pylint: disable=consider-using-f-string + salt = random.randint(0, 15) # nosec + # Start building the encrypted password - pre-pend the 2 decimal digit offset. + encrypted_password = format(salt, "02d") for i, _ in enumerate(unencrypted_password): - hex_password = "%02x" % (ord(unencrypted_password[i]) ^ XLAT[salt]) # pylint: disable=consider-using-f-string - encrypted_password += hex_password - salt += 1 - if salt == 51: - salt = 0 + # Get the next of the plaintext character. + dec_char = ord(unencrypted_password[i]) + # Get the next character of the key. + key_char = ast.literal_eval(XLAT[(i + salt) % 53]) + # XOR the plaintext character with the key character. + enc_char = dec_char ^ key_char + # Build the encrypted password one character at a time. + # The ASCII code of each encrypted character is added as 2 hex digits. + encrypted_password += format(enc_char, "02X") return encrypted_password -def get_hash_salt(encrypted_password): +def get_hash_salt(encrypted_password: str) -> str: """Given an encrypted password obtain the salt value from it. Args: - encrypted_password (str): A password that has been encrypted, which the salt will be taken from. + encrypted_password: A password that has been encrypted, which the salt will be taken from. Returns: - string: The encrypted password. + The encrypted password. Example: >>> from netutils.password import get_hash_salt diff --git a/netutils/ping.py b/netutils/ping.py index f5cc0976..d11ebad9 100644 --- a/netutils/ping.py +++ b/netutils/ping.py @@ -2,16 +2,16 @@ import socket -def tcp_ping(ip, port, timeout=1): # pylint: disable=invalid-name +def tcp_ping(ip: str, port: int, timeout: int = 1) -> bool: # pylint: disable=invalid-name """Verifies whether a TCP port is open on a given IP address. Args: - ip (str): An IP address in string format that is able to be converted by `ipaddress` library. - port (int): A valid TCP port. - timeout (int): The timeout in seconds before returning a False. Defaults to 1. + ip: An IP address in string format that is able to be converted by `ipaddress` library. + port: A valid TCP port. + timeout: The timeout in seconds before returning a False. Defaults to 1. Returns: - bool: The result as to whether or not you were able ping the IP address. + The result as to whether or not you were able ping the IP address. Example: >>> from netutils.ping import tcp_ping diff --git a/netutils/protocol_mapper.py b/netutils/protocol_mapper.py index ed48dc37..6eeaadba 100644 --- a/netutils/protocol_mapper.py +++ b/netutils/protocol_mapper.py @@ -1,15 +1,17 @@ """Protocol Mappers.""" +import typing as t + from netutils.constants import PROTOCOLS -def _number_to_name_mapper(proto: str) -> dict: +def _number_to_name_mapper(proto: str) -> t.Dict[int, str]: """Create a dictionary that maps protocol port number to a name. Args: proto: Protocol to map ['tcp', 'udp', 'sctp', 'dccp'] Returns: - proto_num_to_name: Dictionary of the number to name mapping. + Dictionary of the number to name mapping. """ proto_num_to_name = {} diff --git a/netutils/py.typed b/netutils/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/netutils/route.py b/netutils/route.py index da11a918..1dde8ca7 100644 --- a/netutils/route.py +++ b/netutils/route.py @@ -1,21 +1,22 @@ """Utilities to get best route from routing table.""" import ipaddress +import typing as t class NoRouteFound(BaseException): """Custom Exception for No Route Found.""" -def longest_prefix_match(ip_addr, routes): +def longest_prefix_match(ip_addr: str, routes: t.List[t.Dict[str, str]]) -> str: """From a list of networks and an IP address, find the most specific route. Args: - ip_addr (str): String representation of an IP address. - routes (list): list of dictionaries with network and mask as keys. Subnet can also be CIDR(number) notation. + ip_addr: String representation of an IP address. + routes: list of dictionaries with network and mask as keys. Subnet can also be CIDR(number) notation. Returns: - [IPv4Network object]: Longest Match Route + Longest Match Route Example: >>> from netutils.route import longest_prefix_match @@ -29,7 +30,7 @@ def longest_prefix_match(ip_addr, routes): if not len(routes) > 0: raise IndexError(f"'routing_table' should have more than zero indexes. Got {len(routes)}") if isinstance(ip_addr, str): - ip_addr = ipaddress.ip_address(ip_addr) + ip_addr = ipaddress.ip_address(ip_addr) # type: ignore else: if not isinstance(ip_addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): raise TypeError(f"'ip_addr' should be a str, got {type(ip_addr)}") @@ -37,7 +38,7 @@ def longest_prefix_match(ip_addr, routes): networks = [ ipaddress.IPv4Network(f'{route["network"]}/{route["mask"]}') for route in routes - if ip_addr in ipaddress.IPv4Network(f'{route["network"]}/{route["mask"]}') + if ip_addr in ipaddress.IPv4Network(f'{route["network"]}/{route["mask"]}') # type: ignore ] try: return str(sorted(networks)[-1]) diff --git a/netutils/time.py b/netutils/time.py index 0545d55a..c39f3965 100644 --- a/netutils/time.py +++ b/netutils/time.py @@ -1,16 +1,18 @@ """Functions for working with time.""" import re +import typing as t + from .constants import TIME_MAPPINGS, UPTIME_REGEX_PATTERNS -def uptime_seconds_to_string(uptime_seconds): +def uptime_seconds_to_string(uptime_seconds: int) -> str: """Converts uptime in seconds to uptime in string format. Args: - uptime_seconds (int): Uptime in seconds. + uptime_seconds: Uptime in seconds. Returns: - str: Uptime in string format. + Uptime in string format. Example: >>> from netutils.time import uptime_seconds_to_string @@ -29,14 +31,14 @@ def uptime_seconds_to_string(uptime_seconds): return ", ".join(result) -def uptime_string_to_seconds(uptime_string): +def uptime_string_to_seconds(uptime_string: str) -> int: """Converts uptime string seconds. Args: - uptime_string (str): Uptime in string format + uptime_string: Uptime in string format Returns: - int: Uptime string converted to seconds. + Uptime string converted to seconds. Example: >>> from netutils.time import uptime_string_to_seconds @@ -51,7 +53,7 @@ def uptime_string_to_seconds(uptime_string): """ compiled_regex_list = [re.compile(reg_pattern) for reg_pattern in UPTIME_REGEX_PATTERNS] - uptime_dict = {} + uptime_dict: t.Dict[str, str] = {} for regex in compiled_regex_list: match = regex.search(uptime_string) @@ -64,6 +66,7 @@ def uptime_string_to_seconds(uptime_string): uptime_seconds = 0 for time_interval, value in TIME_MAPPINGS: - if uptime_dict.get(time_interval): - uptime_seconds += int(uptime_dict.get(time_interval)) * value + time_interval_as_int = uptime_dict.get(time_interval) + if time_interval_as_int: + uptime_seconds += int(time_interval_as_int) * value return uptime_seconds diff --git a/netutils/utils.py b/netutils/utils.py index 9d4c586e..007e2e2a 100644 --- a/netutils/utils.py +++ b/netutils/utils.py @@ -1,4 +1,5 @@ """Utilities for the netutils library.""" +import typing as t from importlib import import_module _JINJA2_FUNCTION_MAPPINGS = { @@ -65,11 +66,11 @@ } -def jinja2_convenience_function(): +def jinja2_convenience_function() -> t.Dict[str, t.Callable[..., t.Any]]: """Convenience function that allows netutils filter to be used easily with jinja2. Returns: - dict: Keys are the function names for the Jinja2 filter and values are the function objects. + Keys are the function names for the Jinja2 filter and values are the function objects. Example: >>> from netutils.utils import jinja2_convenience_function diff --git a/netutils/vlan.py b/netutils/vlan.py index b7cfe476..a4d549c3 100644 --- a/netutils/vlan.py +++ b/netutils/vlan.py @@ -1,22 +1,27 @@ """Functions for working with VLANs.""" import re - -from operator import itemgetter +import typing as t from itertools import groupby +from operator import itemgetter -def vlanlist_to_config(vlan_list, first_line_len=48, other_line_len=44, min_grouping_size=3): +def vlanlist_to_config( + vlan_list: t.List[int], + first_line_len: int = 48, + other_line_len: int = 44, + min_grouping_size: int = 3, +) -> t.List[str]: """Given a List of VLANs, build the IOS-like vlan list of configurations. Args: - vlan_list (list): Unsorted list of vlan integers. - first_line_len (int, optional): The maximum length of the line of the first element of within the return list. Defaults to 48. - other_line_len (int, optional): The maximum length of the line of all other elements of within the return list. Defaults to 44. - min_grouping_size (int, optional): The minimum consecutive VLANs to aggregate with a hyphen . Defaults to Cisco's minimum grouping size of 3. + vlan_list: Unsorted list of vlan integers. + first_line_len: The maximum length of the line of the first element of within the return list. Defaults to 48. + other_line_len: The maximum length of the line of all other elements of within the return list. Defaults to 44. + min_grouping_size: The minimum consecutive VLANs to aggregate with a hyphen. Defaults to Cisco's minimum grouping size of 3. Returns: - list: Sorted string list of integers according to IOS-like vlan list rules + Sorted string list of integers according to IOS-like vlan list rules Example: >>> from netutils.vlan import vlanlist_to_config @@ -26,16 +31,29 @@ def vlanlist_to_config(vlan_list, first_line_len=48, other_line_len=44, min_grou ['1,3,5-6,100-105,107,109'] >>> vlanlist_to_config([1,3,5,6,100,101,102,103,104,105,107,109], min_grouping_size=1) ['1,3,5,6,100,101,102,103,104,105,107,109'] + >>> vlan_list = [1, 2, 3, 5, 6, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018] + >>> for index, vlan in enumerate(vlanlist_to_config(vlan_list)): + ... if index == 0: + ... print(f"switchport trunk allowed vlan {vlan}") + ... else: + ... print(f"switchport trunk allowed vlan add {vlan}") + ... + switchport trunk allowed vlan 1-3,5,6,1000,1002,1004,1006,1008,1010,1012,1014 + switchport trunk allowed vlan add 1016,1018 """ - def build_final_vlan_cfg(vlan_cfg): + def build_final_vlan_cfg(vlan_cfg: str) -> t.List[str]: if len(vlan_cfg) <= first_line_len: return [vlan_cfg] # Split VLAN config if lines are too long first_line = re.match(f"^.{{0,{first_line_len}}}(?=,)", vlan_cfg) + if not first_line: + raise ValueError( + f"Line with comma seperated vlans is expected.(E.g. 1-3,5,6,1000,1002) Received {vlan_cfg}" + ) vlan_cfg_lines = [first_line.group(0)] - next_lines = next_lines = re.compile(f"(?<=,).{{0,{other_line_len}}}(?=,|$)") + next_lines = re.compile(f"(?<=,).{{0,{other_line_len}}}(?=,|$)") for line in next_lines.findall(vlan_cfg, first_line.end()): vlan_cfg_lines.append(line) return vlan_cfg_lines @@ -76,14 +94,14 @@ def build_final_vlan_cfg(vlan_cfg): return build_final_vlan_cfg(",".join(vlan_strings)) -def vlanconfig_to_list(vlan_config): +def vlanconfig_to_list(vlan_config: str) -> t.List[int]: """Given an IOS-like vlan list of configurations, return the list of VLANs. Args: - vlan_config (list): IOS-like vlan list of configurations. + vlan_config: IOS-like vlan list of configurations. Returns: - dict: Sorted string list of integers according to IOS-like vlan list rules + Sorted string list of integers according to IOS-like vlan list rules Example: >>> vlan_config = '''switchport trunk allowed vlan 1025,1069-1072,1114,1173-1181,1501,1502''' diff --git a/poetry.lock b/poetry.lock index de573944..bc7c7b5e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -318,6 +318,25 @@ category = "dev" optional = false python-versions = "*" +[[package]] +name = "mypy" +version = "0.961" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + [[package]] name = "mypy-extensions" version = "0.4.3" @@ -743,7 +762,7 @@ testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytes [metadata] lock-version = "1.1" python-versions = "^3.6" -content-hash = "56454ebe211ab923d3e1ce70e867397ed84ba4d31766941daf3e85a1c2c036fa" +content-hash = "9d23f91405a5a3ea5cc425913d6c78c410cb941d131ee3bf36dad117cc421934" [metadata.files] alabaster = [ @@ -995,6 +1014,31 @@ mistune = [ {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, ] +mypy = [ + {file = "mypy-0.961-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:697540876638ce349b01b6786bc6094ccdaba88af446a9abb967293ce6eaa2b0"}, + {file = "mypy-0.961-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b117650592e1782819829605a193360a08aa99f1fc23d1d71e1a75a142dc7e15"}, + {file = "mypy-0.961-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bdd5ca340beffb8c44cb9dc26697628d1b88c6bddf5c2f6eb308c46f269bb6f3"}, + {file = "mypy-0.961-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3e09f1f983a71d0672bbc97ae33ee3709d10c779beb613febc36805a6e28bb4e"}, + {file = "mypy-0.961-cp310-cp310-win_amd64.whl", hash = "sha256:e999229b9f3198c0c880d5e269f9f8129c8862451ce53a011326cad38b9ccd24"}, + {file = "mypy-0.961-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b24be97351084b11582fef18d79004b3e4db572219deee0212078f7cf6352723"}, + {file = "mypy-0.961-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f4a21d01fc0ba4e31d82f0fff195682e29f9401a8bdb7173891070eb260aeb3b"}, + {file = "mypy-0.961-cp36-cp36m-win_amd64.whl", hash = "sha256:439c726a3b3da7ca84a0199a8ab444cd8896d95012c4a6c4a0d808e3147abf5d"}, + {file = "mypy-0.961-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5a0b53747f713f490affdceef835d8f0cb7285187a6a44c33821b6d1f46ed813"}, + {file = "mypy-0.961-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e9f70df36405c25cc530a86eeda1e0867863d9471fe76d1273c783df3d35c2e"}, + {file = "mypy-0.961-cp37-cp37m-win_amd64.whl", hash = "sha256:b88f784e9e35dcaa075519096dc947a388319cb86811b6af621e3523980f1c8a"}, + {file = "mypy-0.961-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d5aaf1edaa7692490f72bdb9fbd941fbf2e201713523bdb3f4038be0af8846c6"}, + {file = "mypy-0.961-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9f5f5a74085d9a81a1f9c78081d60a0040c3efb3f28e5c9912b900adf59a16e6"}, + {file = "mypy-0.961-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f4b794db44168a4fc886e3450201365c9526a522c46ba089b55e1f11c163750d"}, + {file = "mypy-0.961-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:64759a273d590040a592e0f4186539858c948302c653c2eac840c7a3cd29e51b"}, + {file = "mypy-0.961-cp38-cp38-win_amd64.whl", hash = "sha256:63e85a03770ebf403291ec50097954cc5caf2a9205c888ce3a61bd3f82e17569"}, + {file = "mypy-0.961-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f1332964963d4832a94bebc10f13d3279be3ce8f6c64da563d6ee6e2eeda932"}, + {file = "mypy-0.961-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:006be38474216b833eca29ff6b73e143386f352e10e9c2fbe76aa8549e5554f5"}, + {file = "mypy-0.961-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9940e6916ed9371809b35b2154baf1f684acba935cd09928952310fbddaba648"}, + {file = "mypy-0.961-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a5ea0875a049de1b63b972456542f04643daf320d27dc592d7c3d9cd5d9bf950"}, + {file = "mypy-0.961-cp39-cp39-win_amd64.whl", hash = "sha256:1ece702f29270ec6af25db8cf6185c04c02311c6bb21a69f423d40e527b75c56"}, + {file = "mypy-0.961-py3-none-any.whl", hash = "sha256:03c6cc893e7563e7b2949b969e63f02c000b32502a1b4d1314cabe391aa87d66"}, + {file = "mypy-0.961.tar.gz", hash = "sha256:f730d56cb924d371c26b8eaddeea3cc07d78ff51c521c6d04899ac6904b75492"}, +] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, diff --git a/pyproject.toml b/pyproject.toml index 5c115ee4..ffe7040a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "netutils" -version = "1.1.0" +version = "1.2.0" description = "Common helper functions useful in network automation." authors = ["Network to Code, LLC "] license = "Apache-2.0" @@ -44,6 +44,7 @@ sphinx = "*" sphinx-rtd-theme = "*" toml = "*" yamllint = "*" +mypy = "^0.961" [tool.black] line-length = 120 @@ -93,7 +94,29 @@ notes = """, python_paths = "./" testpaths = "tests/" addopts = "-vv --doctest-modules -p no:warnings --ignore-glob='*mock*'" -[build-system] -requires = ["poetry>=0.12"] -build-backend = "poetry.masonry.api" +[tool.mypy] +python_version = 3.7 +ignore_errors = false +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +disallow_untyped_decorators = true +check_untyped_defs = true +disallow_any_generics = true +ignore_missing_imports = true +strict_optional = true +warn_unused_ignores = true +warn_return_any = true +warn_unused_configs = true +warn_redundant_casts = true +disallow_subclassing_any = true +no_implicit_optional = true +implicit_reexport = true +strict_equality = true +exclude = ["tests/", "tasks.py"] +show_error_codes = true + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/tasks.py b/tasks.py index bf476002..7b2bb611 100644 --- a/tasks.py +++ b/tasks.py @@ -2,6 +2,7 @@ import os import sys from distutils.util import strtobool + from invoke import task try: @@ -210,6 +211,18 @@ def bandit(context, local=INVOKE_LOCAL): run_cmd(context, exec_cmd, local) +@task +def mypy(context, local=INVOKE_LOCAL): + """Run mypy to validate typing-hints. + + Args: + context (obj): Used to run specific commands + local (bool): Define as `True` to execute locally + """ + exec_cmd = "mypy ./netutils" + run_cmd(context, exec_cmd, local) + + @task def cli(context): """Enter the image to perform troubleshooting or dev work. @@ -235,6 +248,7 @@ def tests(context, local=INVOKE_LOCAL): yamllint(context, local) pydocstyle(context, local) bandit(context, local) + mypy(context, local) pytest(context, local) print("All tests have passed!") diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_backup.txt b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_backup.txt new file mode 100644 index 00000000..ceb40d01 --- /dev/null +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_backup.txt @@ -0,0 +1,7 @@ +! +banner login +* GOOD DATA LINE 1 +* GOOD DATA LINE 2 +EOF +! +end \ No newline at end of file diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_feature.py b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_feature.py new file mode 100644 index 00000000..15927ba8 --- /dev/null +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_feature.py @@ -0,0 +1,3 @@ +features = [ + {"name": "banner", "ordered": True, "section": ["banner "]}, +] diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_intended.txt b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_intended.txt new file mode 100644 index 00000000..e60eb8ed --- /dev/null +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_intended.txt @@ -0,0 +1,7 @@ +! +banner login +* GOOD DATA LINE 1 +* BAD DATA LINE 2 +EOF +! +end \ No newline at end of file diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_received.json b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_received.json new file mode 100644 index 00000000..f3c4d12a --- /dev/null +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_bad_banner_received.json @@ -0,0 +1,12 @@ +{ + "banner": { + "actual": "banner login\n* GOOD DATA LINE 1\n* GOOD DATA LINE 2\nEOF", + "cannot_parse": true, + "compliant": false, + "extra": "banner login\n* GOOD DATA LINE 1\n* GOOD DATA LINE 2\nEOF", + "intended": "banner login\n* GOOD DATA LINE 1\n* BAD DATA LINE 2\nEOF", + "missing": "banner login\n* GOOD DATA LINE 1\n* BAD DATA LINE 2\nEOF", + "ordered_compliant": false, + "unordered_compliant": false + } +} diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_backup.txt b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_backup.txt index cf71ed51..4c2f3a9a 100644 --- a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_backup.txt +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_backup.txt @@ -6,6 +6,20 @@ router bgp 65254 neighbor 10.11.11.21 maximum-routes 12000 redistribute connected ! +banner login +******************************************************************** +* This system is the property of Allied Widget Co. * +* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED * +* * +* You must have explicit, authorized permission to access or * +* configure this device. Unauthorized attempts and actions to * +* access or use this system may result in civil and/or criminal * +* penalties. * +* * +* All activities performed on this deviceare logged and monitored. * +******************************************************************** +EOF +! management api http-commands protocol http protocol unix-socket diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_feature.py b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_feature.py index abee7249..147a27fe 100644 --- a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_feature.py +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_feature.py @@ -1,3 +1,4 @@ features = [ {"name": "bgp", "ordered": True, "section": ["router bgp "]}, + {"name": "banner", "ordered": True, "section": ["banner "]}, ] diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_intended.txt b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_intended.txt index cf71ed51..4c2f3a9a 100644 --- a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_intended.txt +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_intended.txt @@ -6,6 +6,20 @@ router bgp 65254 neighbor 10.11.11.21 maximum-routes 12000 redistribute connected ! +banner login +******************************************************************** +* This system is the property of Allied Widget Co. * +* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED * +* * +* You must have explicit, authorized permission to access or * +* configure this device. Unauthorized attempts and actions to * +* access or use this system may result in civil and/or criminal * +* penalties. * +* * +* All activities performed on this deviceare logged and monitored. * +******************************************************************** +EOF +! management api http-commands protocol http protocol unix-socket diff --git a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_received.json b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_received.json index 8fa5bfe1..a18022d0 100644 --- a/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_received.json +++ b/tests/unit/mock/config/compliance/compliance/arista_eos/eos_basic_received.json @@ -8,5 +8,15 @@ "missing": "", "ordered_compliant": true, "unordered_compliant": true + }, + "banner": { + "actual": "banner login\n********************************************************************\n* This system is the property of Allied Widget Co. *\n* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED *\n* *\n* You must have explicit, authorized permission to access or *\n* configure this device. Unauthorized attempts and actions to *\n* access or use this system may result in civil and/or criminal *\n* penalties. *\n* *\n* All activities performed on this deviceare logged and monitored. *\n********************************************************************\nEOF", + "cannot_parse": true, + "compliant": true, + "extra": "", + "intended": "banner login\n********************************************************************\n* This system is the property of Allied Widget Co. *\n* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED *\n* *\n* You must have explicit, authorized permission to access or *\n* configure this device. Unauthorized attempts and actions to *\n* access or use this system may result in civil and/or criminal *\n* penalties. *\n* *\n* All activities performed on this deviceare logged and monitored. *\n********************************************************************\nEOF", + "missing": "", + "ordered_compliant": true, + "unordered_compliant": true } -} \ No newline at end of file +} diff --git a/tests/unit/mock/config/parser/arista_eos/eos_full_received.py b/tests/unit/mock/config/parser/arista_eos/eos_full_received.py index 26c29f09..8c019751 100644 --- a/tests/unit/mock/config/parser/arista_eos/eos_full_received.py +++ b/tests/unit/mock/config/parser/arista_eos/eos_full_received.py @@ -155,6 +155,11 @@ ConfigLine(config_line=" neighbor 10.11.11.21 remote-as 65253", parents=("router bgp 65254",)), ConfigLine(config_line=" neighbor 10.11.11.21 maximum-routes 12000", parents=("router bgp 65254",)), ConfigLine(config_line=" redistribute connected", parents=("router bgp 65254",)), + ConfigLine(config_line="banner login", parents=()), + ConfigLine( + config_line="********************************************************************\n* This system is the property of Allied Widget Co. *\n* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED *\n* *\n* You must have explicit, authorized permission to access or *\n* configure this device. Unauthorized attempts and actions to *\n* access or use this system may result in civil and/or criminal *\n* penalties. *\n* *\n* All activities performed on this deviceare logged and monitored. *\n********************************************************************\nEOF", + parents=("banner login",), + ), ConfigLine(config_line="management api http-commands", parents=()), ConfigLine(config_line=" protocol http", parents=("management api http-commands",)), ConfigLine(config_line=" protocol unix-socket", parents=("management api http-commands",)), diff --git a/tests/unit/mock/config/parser/arista_eos/eos_full_sent.txt b/tests/unit/mock/config/parser/arista_eos/eos_full_sent.txt index d78fd1e8..1f871798 100644 --- a/tests/unit/mock/config/parser/arista_eos/eos_full_sent.txt +++ b/tests/unit/mock/config/parser/arista_eos/eos_full_sent.txt @@ -184,6 +184,20 @@ router bgp 65254 neighbor 10.11.11.21 maximum-routes 12000 redistribute connected ! +banner login +******************************************************************** +* This system is the property of Allied Widget Co. * +* UNAUTHORIZED ACCESS TO THIS DEVICE IS PROHIBITED * +* * +* You must have explicit, authorized permission to access or * +* configure this device. Unauthorized attempts and actions to * +* access or use this system may result in civil and/or criminal * +* penalties. * +* * +* All activities performed on this deviceare logged and monitored. * +******************************************************************** +EOF +! management api http-commands protocol http protocol unix-socket diff --git a/tests/unit/test_password.py b/tests/unit/test_password.py index c1e9308f..6ed43750 100644 --- a/tests/unit/test_password.py +++ b/tests/unit/test_password.py @@ -24,26 +24,26 @@ COMPARE_TYPE7 = [ { - "sent": {"unencrypted_password": "cisco", "encrypted_password": "121A0C041104"}, + "sent": {"unencrypted_password": "cisco", "encrypted_password": "070C285F4D06"}, "received": True, }, { "sent": { "unencrypted_password": "cisco", - "encrypted_password": "121A0C041104", + "encrypted_password": "070C285F4D06", "return_original": True, }, - "received": "121A0C041104", + "received": "070C285F4D06", }, { - "sent": {"unencrypted_password": "invalid_password", "encrypted_password": "121A0C041104"}, + "sent": {"unencrypted_password": "invalid_password", "encrypted_password": "070C285F4D06"}, "received": False, }, ] DECRYPT_TYPE7 = [ { - "sent": {"encrypted_password": "121A0C041104"}, + "sent": {"encrypted_password": "14141B180F0B"}, "received": "cisco", } ] @@ -58,7 +58,7 @@ ENCRYPT_TYPE7 = [ { "sent": {"unencrypted_password": "cisco", "salt": 10}, - "received": "0a4d000a0618", + "received": "104D000A0618", }, ]