From 37f5282c3acda79a64562c792a4faa378142879f Mon Sep 17 00:00:00 2001 From: Ilya Kheifets <138466237+ikheifets-splunk@users.noreply.github.com> Date: Fri, 18 Oct 2024 12:27:41 +0200 Subject: [PATCH 01/10] chore: upgrade deprecated semgrep (#1106) Signed-off-by: Ilya Kheifets --- .github/workflows/ci-main.yaml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 3314046c0..54f3ffaf7 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -59,13 +59,11 @@ jobs: FOSSA_API_KEY: ${{ secrets.FOSSA_API_KEY }} semgrep-scan: name: semgrep - runs-on: ubuntu-latest if: (github.actor != 'dependabot[bot]') - steps: - - uses: actions/checkout@v4 - - uses: semgrep/semgrep-action@v1 - with: - publishToken: ${{ secrets.SEMGREP_APP_TOKEN }} + uses: splunk/sast-scanning/.github/workflows/sast-scan.yml@main + secrets: inherit + with: + block_mode: "on" pre-commit: runs-on: ubuntu-latest steps: From cbdc062819d306df822e1c49b5aa66e4770aed61 Mon Sep 17 00:00:00 2001 From: ajasnosz <139114006+ajasnosz@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:33:22 +0200 Subject: [PATCH 02/10] fix: refactor enrich (#1100) --- .github/workflows/ci-main.yaml | 8 ++ poetry.lock | 16 +-- pyproject.toml | 2 +- splunk_connect_for_snmp/enrich/tasks.py | 166 +++++++++++++++--------- test/enrich/test_enrich.py | 52 ++++++-- 5 files changed, 163 insertions(+), 81 deletions(-) diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index 54f3ffaf7..a79efd061 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -138,6 +138,10 @@ jobs: steps: - name: Checkout Project uses: actions/checkout@v4 + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: "3.10" - name: run install_microk8s.sh run: | sudo snap install microk8s --classic --channel=1.30/stable @@ -160,6 +164,10 @@ jobs: steps: - name: Checkout Project uses: actions/checkout@v4 + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: "3.10" - name: Install docker compose run: | # Add Docker's official GPG key: diff --git a/poetry.lock b/poetry.lock index faa6830c4..638fdd744 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "amqp" @@ -2242,16 +2242,6 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, - {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, - {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -2316,5 +2306,5 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" -python-versions = "^3.8" -content-hash = "0d96427eac02294dfb7ce6770111c88343202c9342ad3a9d807aeec52e7be425" +python-versions = ">=3.8,<3.12" +content-hash = "8fc9e4f9972418fec948be2960d7454911de3e854f7740f16d89e0f454f080df" diff --git a/pyproject.toml b/pyproject.toml index ce57d1e96..603407701 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ testpaths = ["test"] python_files = ["test_*.py"] [tool.poetry.dependencies] -python = "^3.8" +python = ">=3.8,<3.12" pymongo = {extras = ["srv"], version = "^4.0.0"} requests = {extras = ["crypto"], version = "^2.31.0"} celery = {extras = ["tblib"], version = "5.4.0"} diff --git a/splunk_connect_for_snmp/enrich/tasks.py b/splunk_connect_for_snmp/enrich/tasks.py index 0c5d03002..18b78bd62 100644 --- a/splunk_connect_for_snmp/enrich/tasks.py +++ b/splunk_connect_for_snmp/enrich/tasks.py @@ -96,16 +96,8 @@ def enrich(self, result): updates = [] attribute_updates = [] - current_target = targets_collection.find_one( - {"address": address}, {"target": True, "sysUpTime": True} - ) - if current_target is None: - logger.info(f"First time for {address}") - current_target = {"address": address} - else: - logger.info(f"Not first time for {address}") + current_target = get_current_target(address, targets_collection) - # TODO: Compare the ts field with the lastmodified time of record and only update if we are newer check_restart(current_target, result["result"], targets_collection, address) logger.info(f"After check_restart for {address}") # First write back to DB new/changed data @@ -136,46 +128,17 @@ def enrich(self, result): upsert=True, ) new_fields = [] - for field_key, field_value in group_data["fields"].items(): - field_key_hash = field_key.replace(".", "|") - field_value["name"] = field_key - cv = None - if current_attributes and field_key_hash in current_attributes.get( - "fields", {} - ): - cv = current_attributes["fields"][field_key_hash] - - # if new field_value is different than the previous one, update - if cv and cv != field_value: - # modifed - attribute_updates.append( - {"$set": {f"fields.{field_key_hash}": field_value}} - ) - - elif cv: - # unchanged - pass - else: - # new - new_fields.append({"$set": {f"fields.{field_key_hash}": field_value}}) - if field_key in TRACKED_F: - updates.append( - {"$set": {f"state.{field_key.replace('.', '|')}": field_value}} - ) - - if len(updates) >= MONGO_UPDATE_BATCH_THRESHOLD: - targets_collection.update_one( - {"address": address}, updates, upsert=True - ) - updates.clear() - - if len(attribute_updates) >= MONGO_UPDATE_BATCH_THRESHOLD: - attributes_collection.update_one( - {"address": address, "group_key_hash": group_key_hash}, - attribute_updates, - upsert=True, - ) - attribute_updates.clear() + set_attribute_updates( + address, + attribute_updates, + attributes_collection, + current_attributes, + group_data, + group_key_hash, + new_fields, + targets_collection, + updates, + ) if new_fields: attributes_bulk_write_operations.append( UpdateOne( @@ -186,15 +149,14 @@ def enrich(self, result): ) new_fields.clear() - if updates: - targets_collection.update_one({"address": address}, updates, upsert=True) - updates.clear() - if attribute_updates: - attributes_collection.update_one( - {"address": address, "group_key_hash": group_key_hash}, - attribute_updates, - ) - attribute_updates.clear() + update_collections( + address, + attribute_updates, + attributes_collection, + group_key_hash, + targets_collection, + updates, + ) # Now add back any fields we need if current_attributes: @@ -203,6 +165,23 @@ def enrich(self, result): if attribute_group_id in result["result"]: snmp_object = result["result"][attribute_group_id] enrich_metric_with_fields_from_db(snmp_object, fields) + bulk_write_attributes(attributes_bulk_write_operations, attributes_collection) + return result + + +def get_current_target(address, targets_collection): + current_target = targets_collection.find_one( + {"address": address}, {"target": True, "sysUpTime": True} + ) + if current_target is None: + logger.info(f"First time for {address}") + current_target = {"address": address} + else: + logger.info(f"Not first time for {address}") + return current_target + + +def bulk_write_attributes(attributes_bulk_write_operations, attributes_collection): if attributes_bulk_write_operations: logger.debug("Start of bulk_write") start = time.time() @@ -214,7 +193,76 @@ def enrich(self, result): f"ELAPSED TIME OF BULK: {end - start} for {len(attributes_bulk_write_operations)} operations" ) logger.debug(f"result api: {bulk_result.bulk_api_result}") - return result + + +def update_collections( + address, + attribute_updates, + attributes_collection, + group_key_hash, + targets_collection, + updates, +): + if updates: + targets_collection.update_one({"address": address}, updates, upsert=True) + updates.clear() + if attribute_updates: + attributes_collection.update_one( + {"address": address, "group_key_hash": group_key_hash}, + attribute_updates, + ) + attribute_updates.clear() + + +def set_attribute_updates( + address, + attribute_updates, + attributes_collection, + current_attributes, + group_data, + group_key_hash, + new_fields, + targets_collection, + updates, +): + for field_key, field_value in group_data["fields"].items(): + field_key_hash = field_key.replace(".", "|") + field_value["name"] = field_key + cv = None + if current_attributes and field_key_hash in current_attributes.get( + "fields", {} + ): + cv = current_attributes["fields"][field_key_hash] + + # if new field_value is different than the previous one, update + if cv and cv != field_value: + # modifed + attribute_updates.append( + {"$set": {f"fields.{field_key_hash}": field_value}} + ) + + elif cv: + # unchanged + pass + else: + # new + new_fields.append({"$set": {f"fields.{field_key_hash}": field_value}}) + if field_key in TRACKED_F: + updates.append( + {"$set": {f"state.{field_key.replace('.', '|')}": field_value}} + ) + + if len(updates) >= MONGO_UPDATE_BATCH_THRESHOLD: + targets_collection.update_one({"address": address}, updates, upsert=True) + updates.clear() + + if len(attribute_updates) >= MONGO_UPDATE_BATCH_THRESHOLD: + attributes_collection.update_one( + {"address": address, "group_key_hash": group_key_hash}, + attribute_updates, + upsert=True, + ) + attribute_updates.clear() def enrich_metric_with_fields_from_db(snmp_object, fields_from_db): diff --git a/test/enrich/test_enrich.py b/test/enrich/test_enrich.py index a86eb4c66..f512eb417 100644 --- a/test/enrich/test_enrich.py +++ b/test/enrich/test_enrich.py @@ -1,14 +1,16 @@ from unittest import TestCase -from unittest.mock import patch +from unittest.mock import MagicMock, patch from splunk_connect_for_snmp.enrich.tasks import ( enrich, enrich_metric_with_fields_from_db, + get_current_target, + logger, ) attributes = { "id": "GROUP1", - "address": "192.168.0.1", + "address": "192.168.0.1:161", "fields": { "SNMPv2-MIB|sysDescr": { "time": 1234, @@ -36,7 +38,7 @@ attributes2 = { "id": "GROUP2", - "address": "192.168.0.1", + "address": "192.168.0.1:161", "fields": { "UDP-MIB|extraAttr": { "time": 1234, @@ -49,7 +51,7 @@ } input_dict = { - "address": "192.168.0.1", + "address": "192.168.0.1:161", "result": { "GROUP1": { "fields": { @@ -92,7 +94,7 @@ input_enrich = { "time": 1676291976.2939305, - "address": "54.91.99.113", + "address": "54.91.99.113:161", "result": { "ENTITY-MIB::int=1": { "metrics": {}, @@ -232,7 +234,7 @@ class TestEnrich(TestCase): @patch("pymongo.collection.Collection.bulk_write") @patch("splunk_connect_for_snmp.enrich.tasks.check_restart") def test_enrich(self, m_check_restart, bulk_write, m_update_one, m_find_one): - current_target = {"address": "192.168.0.1"} + current_target = {"address": "192.168.0.1:161"} m_find_one.side_effect = [current_target, True, attributes, attributes2, {}] result = enrich(input_dict) @@ -281,7 +283,7 @@ def test_enrich(self, m_check_restart, bulk_write, m_update_one, m_find_one): result["result"]["GROUP2"]["fields"]["UDP-MIB.extraAttr"], ) - self.assertEqual("192.168.0.1", result["address"]) + self.assertEqual("192.168.0.1:161", result["address"]) m_check_restart.assert_called() bulk_write.assert_called() @@ -323,7 +325,7 @@ def test_enrich_no_target( ) bulk_write.assert_called() - self.assertEqual("192.168.0.1", result["address"]) + self.assertEqual("192.168.0.1:161", result["address"]) def test_enrich_metric_with_fields_from_db(self): additional_field = { @@ -480,3 +482,37 @@ def test_enrich_metric_with_fields_no_metrics(self): result = snmp_object.copy() enrich_metric_with_fields_from_db(snmp_object, additional_field) self.assertEqual(result, snmp_object) + + def test_get_current_target(self): + address = "127.0.0.1:161" + targets_collection = MagicMock() + targets_collection.find_one.side_effect = [ + address, + True, + attributes, + attributes2, + {}, + ] + with self.assertLogs(logger, level="INFO") as logs: + current_address = get_current_target(address, targets_collection) + self.assertEqual("127.0.0.1:161", current_address) + self.assertEqual( + [ + "INFO:splunk_connect_for_snmp.enrich.tasks:Not first time for 127.0.0.1:161" + ], + logs.output, + ) + + def test_get_current_target_empty_find(self): + address = "127.0.0.1:161" + targets_collection = MagicMock() + targets_collection.find_one.return_value = None + with self.assertLogs(logger, level="INFO") as logs: + current_address = get_current_target(address, targets_collection) + self.assertEqual({"address": "127.0.0.1:161"}, current_address) + self.assertEqual( + [ + "INFO:splunk_connect_for_snmp.enrich.tasks:First time for 127.0.0.1:161" + ], + logs.output, + ) From 6836f918dd5f0b4310d43c86ad92dd27309245d7 Mon Sep 17 00:00:00 2001 From: srv-rr-github-token <94607705+srv-rr-github-token@users.noreply.github.com> Date: Fri, 18 Oct 2024 11:38:45 +0000 Subject: [PATCH 03/10] chore(release): 1.12.1-beta.1 ## [1.12.1-beta.1](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.12.0...v1.12.1-beta.1) (2024-10-18) ### Bug Fixes * refactor enrich ([#1100](https://github.com/splunk/splunk-connect-for-snmp/issues/1100)) ([cbdc062](https://github.com/splunk/splunk-connect-for-snmp/commit/cbdc062819d306df822e1c49b5aa66e4770aed61)) --- charts/splunk-connect-for-snmp/Chart.yaml | 4 ++-- docker_compose/.env | 4 ++-- pyproject.toml | 2 +- splunk_connect_for_snmp/__init__.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index 3c99ddfa4..1d02088c3 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,12 +14,12 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.12.0 +version: 1.12.1-beta.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.12.0" +appVersion: "1.12.1-beta.1" # dependencies: - name: mongodb diff --git a/docker_compose/.env b/docker_compose/.env index e4f563aca..8a177ec38 100644 --- a/docker_compose/.env +++ b/docker_compose/.env @@ -1,12 +1,12 @@ # Deployment configuration SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container -SC4SNMP_TAG="1.12.0" +SC4SNMP_TAG="1.12.1-beta.1" SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= COREDNS_ADDRESS=172.28.0.255 -SC4SNMP_VERSION="1.12.0" +SC4SNMP_VERSION="1.12.1-beta.1" IPv6_ENABLED=false # Dependencies images diff --git a/pyproject.toml b/pyproject.toml index 603407701..7d37a98bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.12.0" +version = "1.12.1-beta.1" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index b2e19ee2d..97ee6073c 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.12.0" +__version__ = "1.12.1-beta.1" From 6349536c3e93f6b6cd428d3a6e53ee37245b5bbf Mon Sep 17 00:00:00 2001 From: Ilya Kheifets <138466237+ikheifets-splunk@users.noreply.github.com> Date: Wed, 23 Oct 2024 23:40:09 +0200 Subject: [PATCH 04/10] chore: add .semgrepignore to exclude tests from analyze (#1107) Signed-off-by: Ilya Kheifets --- .semgrepignore | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.semgrepignore b/.semgrepignore index e6d226d46..1756918a4 100644 --- a/.semgrepignore +++ b/.semgrepignore @@ -1,2 +1,6 @@ -Dockerfile -integration_tests/ \ No newline at end of file +docs/ +test/ +ui_tests/ +integration_tests/ +rendered/ +examples/ \ No newline at end of file From ab23765b2d24f6b2f0960c131632fff84a7fe120 Mon Sep 17 00:00:00 2001 From: ajasnosz <139114006+ajasnosz@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:31:54 +0100 Subject: [PATCH 05/10] docs: refactor documentation (#1112) --- charts/splunk-connect-for-snmp/values.yaml | 14 +- docs/architecture/design.md | 2 +- docs/architecture/planning.md | 13 +- .../configuration/deployment-configuration.md | 49 --- docs/configuration/worker-configuration.md | 249 ------------ docs/dashboard.md | 45 ++- docs/dockercompose/10-enable-ipv6.md | 1 + docs/dockercompose/2-download-package.md | 2 +- .../3-inventory-configuration.md | 2 +- .../4-scheduler-configuration.md | 6 +- .../dockercompose/6-env-file-configuration.md | 4 +- docs/dockercompose/9-splunk-logging.md | 4 +- docs/gui/enable-gui.md | 46 --- docs/gui/inventory-gui.md | 21 - docs/gui/profiles-gui.md | 35 -- docs/ha.md | 3 +- docs/improved-polling.md | 4 +- docs/index.md | 3 +- docs/mib-request.md | 18 +- .../configuration/configuring-groups.md | 0 .../configuration/configuring-profiles.md | 68 ++-- .../configuration/coredns-configuration.md | 0 .../configuration/deployment-configuration.md | 54 +++ .../configuration/mongo-configuration.md | 0 .../configuration/poller-configuration.md | 43 +- .../configuration/redis-configuration.md | 0 .../configuration/scheduler-configuration.md | 18 +- .../configuration/sim-configuration.md | 7 +- .../configuration/snmp-data-format.md | 7 +- .../configuration/snmpv3-configuration.md | 2 +- .../configuration/step-by-step-poll.md | 18 +- .../configuration/trap-configuration.md | 32 +- .../values-params-description.md | 62 +-- .../configuration/worker-configuration.md | 373 ++++++++++++++++++ .../enable-ipv6.md | 8 +- docs/{ => microk8s}/gui/apply-changes.md | 4 +- docs/microk8s/gui/enable-gui.md | 58 +++ docs/{ => microk8s}/gui/groups-gui.md | 12 +- docs/microk8s/gui/inventory-gui.md | 21 + docs/microk8s/gui/profiles-gui.md | 35 ++ .../mk8s/k8s-microk8s-scaling.md | 10 +- .../mk8s/k8s-microk8s.md | 15 +- .../offlineinstallation/offline-microk8s.md | 16 +- .../offlineinstallation/offline-sc4snmp.md | 6 +- .../offlineinstallation/offline-sck.md | 27 +- .../sc4snmp-installation.md | 16 +- .../sck-installation.md | 4 +- .../splunk-requirements.md | 0 docs/{ => microk8s}/upgrade.md | 0 docs/releases.md | 4 +- docs/small-environment.md | 7 +- docs/troubleshooting/configuring-logs.md | 4 +- docs/troubleshooting/k8s-commands.md | 2 +- docs/troubleshooting/polling-issues.md | 11 +- docs/troubleshooting/traps-issues.md | 6 +- examples/offline_installation_values.md | 4 +- examples/polling_and_traps_v3.yaml | 4 +- examples/polling_values.yaml | 2 +- mkdocs.yml | 98 ++--- 59 files changed, 885 insertions(+), 694 deletions(-) delete mode 100644 docs/configuration/deployment-configuration.md delete mode 100644 docs/configuration/worker-configuration.md delete mode 100644 docs/gui/enable-gui.md delete mode 100644 docs/gui/inventory-gui.md delete mode 100644 docs/gui/profiles-gui.md rename docs/{ => microk8s}/configuration/configuring-groups.md (100%) rename docs/{ => microk8s}/configuration/configuring-profiles.md (76%) rename docs/{ => microk8s}/configuration/coredns-configuration.md (100%) create mode 100644 docs/microk8s/configuration/deployment-configuration.md rename docs/{ => microk8s}/configuration/mongo-configuration.md (100%) rename docs/{ => microk8s}/configuration/poller-configuration.md (52%) rename docs/{ => microk8s}/configuration/redis-configuration.md (100%) rename docs/{ => microk8s}/configuration/scheduler-configuration.md (71%) rename docs/{ => microk8s}/configuration/sim-configuration.md (86%) rename docs/{ => microk8s}/configuration/snmp-data-format.md (94%) rename docs/{ => microk8s}/configuration/snmpv3-configuration.md (92%) rename docs/{ => microk8s}/configuration/step-by-step-poll.md (93%) rename docs/{ => microk8s}/configuration/trap-configuration.md (77%) rename docs/{ => microk8s}/configuration/values-params-description.md (93%) create mode 100644 docs/microk8s/configuration/worker-configuration.md rename docs/{gettingstarted => microk8s}/enable-ipv6.md (90%) rename docs/{ => microk8s}/gui/apply-changes.md (75%) create mode 100644 docs/microk8s/gui/enable-gui.md rename docs/{ => microk8s}/gui/groups-gui.md (56%) create mode 100644 docs/microk8s/gui/inventory-gui.md create mode 100644 docs/microk8s/gui/profiles-gui.md rename docs/{gettingstarted => microk8s}/mk8s/k8s-microk8s-scaling.md (96%) rename docs/{gettingstarted => microk8s}/mk8s/k8s-microk8s.md (72%) rename docs/{ => microk8s}/offlineinstallation/offline-microk8s.md (87%) rename docs/{ => microk8s}/offlineinstallation/offline-sc4snmp.md (94%) rename docs/{ => microk8s}/offlineinstallation/offline-sck.md (66%) rename docs/{gettingstarted => microk8s}/sc4snmp-installation.md (93%) rename docs/{gettingstarted => microk8s}/sck-installation.md (96%) rename docs/{gettingstarted => microk8s}/splunk-requirements.md (100%) rename docs/{ => microk8s}/upgrade.md (100%) diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index d57196226..96a4ce1b9 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -137,7 +137,7 @@ sim: scheduler: ### Group definitions ### # Create the group definition in case you want to configure polling from multiple hosts - # at once, more on this: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-groups/ + # at once, more on this: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/configuring-groups/ #groups: | # example_group_1: @@ -150,7 +150,7 @@ scheduler: ### Profiles definitions ### # Create a profile definition to set varbinds you want to poll from the device. - # more on this: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-profiles/ + # more on this: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/configuring-profiles/ #profiles: | # smart_profile: @@ -195,7 +195,7 @@ scheduler: profiles: "" # mapping MIB fields to custom names - # more: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/configuring-profiles/#custom-translations + # more: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/configuring-profiles/#custom-translations customTranslations: {} # set CPU and Memory limits for a scheduler pod @@ -217,7 +217,7 @@ scheduler: poller: # Appending OID indexes to metrics. - # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/#append-oid-index-part-to-the-metrics + # https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/poller-configuration/#append-oid-index-part-to-the-metrics metricsIndexingEnabled: false # Enable polling base profiles (with IF-MIB and SNMPv2-MIB) from @@ -229,11 +229,11 @@ poller: maxOidToProcess: 70 # list of kubernetes secrets name that will be used for polling - # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/#define-usernamesecrets + # https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/poller-configuration/#define-usernamesecrets usernameSecrets: [] # Here is where polling happens. Learn more on how to configure it here: - # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/ + # https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/poller-configuration/ #inventory: | # address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete @@ -242,7 +242,7 @@ poller: worker: # workers are responsible for the actual execution of polling, processing trap messages, and sending data to Splunk. - # More: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration/ + # More: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/worker-configuration/ # The poller worker consumes all the tasks related to polling poller: diff --git a/docs/architecture/design.md b/docs/architecture/design.md index dc4a9bcac..3a5bee804 100644 --- a/docs/architecture/design.md +++ b/docs/architecture/design.md @@ -1,7 +1,7 @@ # Architecture SC4SNMP is deployed using a Kubernetes distribution, typically MicroK8s, -that's designed to be a low-touch experience for integration with sensitive +that is designed to be a low-touch experience for integration with sensitive edge network devices. It will typically be deployed in the same network management zone as the monitored devices and separated from Splunk by an existing firewall. diff --git a/docs/architecture/planning.md b/docs/architecture/planning.md index 0d51a2bac..f1df88fb1 100644 --- a/docs/architecture/planning.md +++ b/docs/architecture/planning.md @@ -6,19 +6,14 @@ available. ## Requirements -- A supported deployment of MicroK8s - +- A supported deployment of MicroK8s or Docker Compose - 16 Core/32 threads x64 architecture server or virtual machine (single instance) 12 GB ram - - HA Requires 3 or more instances (odd numbers) 8 core/16 thread 16 GB ram - - 50 GB root mount - - HTTP access (non-proxy) allowed for the HTTP(s) connection from SC4SNMP to the Splunk destination. - - Splunk Enterprise/Cloud 8.x or newer and/or Splunk Infrastructure Monitoring (SignalFx) @@ -30,6 +25,10 @@ A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with 16 Core/32 threads x64 and 64 GB RAM will be able to handle up to 1500 SNMP TRAPs per second. -A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with 16 Core/32 threads x64 and 64 GB RAM is able to handle up to 2750 SNMP varbinds per second. As for events per second that are visible in Splunk, a single SC4SNMP event can contain more than one varbind inside, which is an automatic grouping feature. For example, the network interface would be grouped into one event, with varbinds grouped together to describe the same thing. That is why, depending on the configuration, the number of events per second may vary. +A single installation of Splunk Connect for SNMP (SC4SNMP) on a machine with 16 Core/32 threads x64 and 64 GB RAM is able +to handle up to 2750 SNMP varbinds per second. As for events per second that are visible in Splunk, a single SC4SNMP event +can contain more than one varbind inside, which is an automatic grouping feature. For example, the network interface would +be grouped into one event, with varbinds grouped together to describe the same thing. That is why, depending on the configuration, +the number of events per second may vary. diff --git a/docs/configuration/deployment-configuration.md b/docs/configuration/deployment-configuration.md deleted file mode 100644 index 5cf546090..000000000 --- a/docs/configuration/deployment-configuration.md +++ /dev/null @@ -1,49 +0,0 @@ -#Deployment Configuration - -`values.yaml` is the main point of SC4SNMP management. You can check all the default values of Helm dependencies using the following command: - -``` -microk8s helm3 inspect values splunk-connect-for-snmp/splunk-connect-for-snmp > values.yaml -``` - -The whole file is divided into the following parts: - -To configure the endpoint for sending SNMP data: - -- `splunk` - in case you use Splunk Enterprise/Cloud -- `sim` - in case you use Splunk Observability Cloud. More details: [sim configuration](sim-configuration.md) - -For polling purposes: - -- `scheduler` - more details: [scheduler configuration](scheduler-configuration.md) -- `poller` - more details: [poller configuration](poller-configuration.md) - -For traps receiving purposes: - -- `traps` - more details: [trap configuration](trap-configuration.md) - -Shared components: - -- `worker` - more details: [worker configuration](worker-configuration.md) -- `mongodb` - more details: [mongo configuration](mongo-configuration.md) -- `redis` - more details: [redis configuration](redis-configuration.md) - -### Shared values -All the components have the following `resources` field for adjusting memory resources: -```yaml - resources: - limits: - cpu: 1000m - memory: 2Gi - requests: - cpu: 1000m - memory: 2Gi -``` -For more information about the concept of `resources`, see the [kuberentes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -There is an option to create common annotations across all services. It can be set by: - -```yaml -commonAnnotations: - annotation_key: annotation_value -``` diff --git a/docs/configuration/worker-configuration.md b/docs/configuration/worker-configuration.md deleted file mode 100644 index 6734cd2c6..000000000 --- a/docs/configuration/worker-configuration.md +++ /dev/null @@ -1,249 +0,0 @@ -# Worker Configuration -The `worker` is a kubernetes pod which is responsible for the actual execution of polling, processing trap messages, and sending -data to Splunk. - -### Worker types - -SC4SNMP has two base functionalities: monitoring traps and polling. These operations are handled by 3 types of workers: - -1. The `trap` worker consumes all the trap related tasks produced by the trap pod. - -2. The `poller` worker consumes all the tasks related to polling. - -3. The `sender` worker handles sending data to Splunk. You need to always have at least one sender pod running. - -### Worker configuration file - -Worker configuration is kept in the `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender`, or `trap`, that refer to the workers' types. -`values.yaml` is used during the installation process for configuring Kubernetes values. -The `worker` default configuration is the following: - -```yaml -worker: - # There are 3 types of workers - trap: - # replicaCount: number of trap-worker pods which consumes trap tasks - replicaCount: 2 - # Use reverse dns lookup of trap ip address and send the hostname to splunk - resolveAddress: - enabled: false - cacheSize: 500 # maximum number of records in cache - cacheTTL: 1800 # time to live of the cached record in seconds - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - poller: - # replicaCount: number of poller-worker pods which consumes polling tasks - replicaCount: 2 - #autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - sender: - # replicaCount: number of sender-worker pods which consumes sending tasks - replicaCount: 1 - # autoscaling: use it instead of replicaCount in order to make pods scalable by itself - #autoscaling: - # enabled: true - # minReplicas: 2 - # maxReplicas: 10 - # targetCPUUtilizationPercentage: 80 - # udpConnectionTimeout: timeout in seconds for SNMP operations - #udpConnectionTimeout: 5 - logLevel: "INFO" -``` - -All parameters are described in the [Worker parameters](#worker-parameters) section. - - -### Worker scaling - -You can adjust worker pods in two ways: set fixed value in `replicaCount`, -or enable `autoscaling`, which scales pods automatically. - -#### Real life scenario: I use SC4SNMP for only trap monitoring, and I want to use my resources effectively. - -If you don't use polling at all, set `worker.poller.replicaCount` to `0`. -If you want to use polling in the future, you need to increase `replicaCount`. To monitor traps, adjust `worker.trap.replicaCount` depending on your needs and `worker.sender.replicaCount` to send traps to Splunk. Usually, you need significantly fewer sender pods than trap pods. - -The following is an example of `values.yaml` without using autoscaling: - -```yaml -worker: - trap: - replicaCount: 4 - sender: - replicaCount: 1 - poller: - replicaCount: 0 - logLevel: "WARNING" -``` - -The following is an example of `values.yaml` with autoscaling: - -```yaml -worker: - trap: - autoscaling: - enabled: true - minReplicas: 4 - maxReplicas: 10 - targetCPUUtilizationPercentage: 80 - sender: - autoscaling: - enabled: true - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 80 - poller: - replicaCount: 0 - logLevel: "WARNING" -``` - -In the previous example, both trap and sender pods are autoscaled. During an upgrade process, the number of pods is created through -`minReplicas`, and then new ones are created only if the CPU threshold -exceeds the `targetCPUUtilizationPercentage`, which by default is 80%. This solution helps you to keep -resources usage adjusted to what you actually need. - -After the helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: - -```yaml -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -horizontalpodautoscaler.autoscaling/snmp-mibserver Deployment/snmp-mibserver 1%/80% 1 3 1 97m -horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-sender Deployment/snmp-splunk-connect-for-snmp-worker-sender 1%/80% 2 5 2 28m -horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-trap Deployment/snmp-splunk-connect-for-snmp-worker-trap 1%/80% 4 10 4 28m -``` - -If you see `/80%` in the `TARGETS` section instead of the CPU percentage, you probably don't have the `metrics-server` add-on enabled. -Enable it using `microk8s enable metrics-server`. - - -#### Real life scenario: I have a significant delay in polling - -Sometimes when polling is configured to be run frequently and on many devices, workers get overloaded -and there is a delay in delivering data to Splunk. To avoid these situations, scale poller and sender pods. -Because of the walk cycles, (walk is a costly operation that is only run once in a while), poller workers require more resources -for a short time. For this reason, enabling autoscaling is recommended. - -See the following example of `values.yaml` with autoscaling: - -```yaml -worker: - trap: - autoscaling: - enabled: true - minReplicas: 4 - maxReplicas: 10 - targetCPUUtilizationPercentage: 80 - sender: - autoscaling: - enabled: true - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 80 - poller: - autoscaling: - enabled: true - minReplicas: 2 - maxReplicas: 20 - targetCPUUtilizationPercentage: 80 - logLevel: "WARNING" -``` - -Remember that the system won’t scale itself infinitely. There is a finite amount of resources that you can allocate. By default, every worker has configured the following resources: - -```yaml - resources: - limits: - cpu: 500m - requests: - cpu: 250m -``` - - -#### I have autoscaling enabled and experience problems with Mongo and Redis pod - -If MongoDB and Redis pods are crushing, and some of the pods are in an infinite `Pending` state, that means -you've exhausted your resources and SC4SNMP cannot scale more. You should decrease the number of `maxReplicas` in -workers, so that it's not going beyond the available CPU. - -#### I don't know how to set autoscaling parameters and how many replicas I need - -The best way to see if pods are overloaded is to run the following command: - -```yaml -microk8s kubectl top pods -n sc4snmp -``` - -```yaml -NAME CPU(cores) MEMORY(bytes) -snmp-mibserver-7f879c5b7c-nnlfj 1m 3Mi -snmp-mongodb-869cc8586f-q8lkm 18m 225Mi -snmp-redis-master-0 10m 2Mi -snmp-splunk-connect-for-snmp-scheduler-558dccfb54-nb97j 2m 136Mi -snmp-splunk-connect-for-snmp-trap-5878f89bbf-24wrz 2m 129Mi -snmp-splunk-connect-for-snmp-trap-5878f89bbf-z9gd5 2m 129Mi -snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-cfqjm 260m 354Mi -snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-ztf7l 312m 553Mi -snmp-splunk-connect-for-snmp-worker-sender-579f796bbd-vmw88 14m 257Mi -snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-46zhf 3m 259Mi -snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-mjtpv 4m 259Mi -``` - -Here you can see how much CPU and Memory is being used by the pods. If the CPU is close to 500m, which is the limit for one pod by default, -enable autoscaling/increase maxReplicas or increase replicaCount with autoscaling off. - - -See [Horizontal Autoscaling.](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to adjust the maximum replica value to the resources you have. - -### Reverse DNS lookup in trap worker - -If you want to see the hostname instead of the IP address of the incoming traps in Splunk, you can enable reverse dns lookup -for the incoming traps using the following configuration: - -```yaml -worker: - trap: - resolveAddress: - enabled: true - cacheSize: 500 # maximum number of records in cache - cacheTTL: 1800 # time to live of the cached record in seconds -``` - -Trap worker uses in memory cache to store the results of the reverse dns lookup. If you restart the worker, the cache will be cleared. - -### Worker parameters - -| Variable | Description | Default | -|-----------------------------------------------------------|--------------------------------------------------------------------------------------|---------| -| worker.taskTimeout | Task timeout in seconds (usually necessary when the walk process takes a long time) | 2400 | -| worker.walkRetryMaxInterval | Maximum time interval between walk attempts | 180 | -| worker.poller.replicaCount | Number of poller worker replicas | 2 | -| worker.poller.autoscaling.enabled | Enabling autoscaling for poller worker pods | false | -| worker.poller.autoscaling.minReplicas | Minimum number of running poller worker pods when autoscaling is enabled | 2 | -| worker.poller.autoscaling.maxReplicas | Maximum number of running poller worker pods when autoscaling is enabled | 40 | -| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | -| worker.poller.resources.limits | The resources limits for poller worker container | {} | -| worker.poller.resources.requests | The requested resources for poller worker container | {} | -| worker.trap.replicaCount | Number of trap worker replicas | 2 | -| worker.trap.resolveAddress.enabled | Enable reverse dns lookup of the IP address of the processed trap | false | -| worker.trap.resolveAddress.cacheSize | Maximum number of reverse dns lookup result records stored in cache | 500 | -| worker.trap.resolveAddress.cacheTTL | Time to live of the cached reverse dns lookup record in seconds | 1800 | -| worker.trap.autoscaling.enabled | Enabling autoscaling for trap worker pods | false | -| worker.trap.autoscaling.minReplicas | Minimum number of running trap worker pods when autoscaling is enabled | 2 | -| worker.trap.autoscaling.maxReplicas | Maximum number of running trap worker pods when autoscaling is enabled | 40 | -| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | -| worker.trap.resources.limits | The resource limit for the poller worker container | {} | -| worker.trap.resources.requests | The requested resources for the poller worker container | {} | -| worker.sender.replicaCount | The number of sender worker replicas | 2 | -| worker.sender.autoscaling.enabled | Enabling autoscaling for sender worker pods | false | -| worker.sender.autoscaling.minReplicas | Minimum number of running sender worker pods when autoscaling is enabled | 2 | -| worker.sender.autoscaling.maxReplicas | Maximum number of running sender worker pods when autoscaling is enabled | 40 | -| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | -| worker.sender.resources.limits | The resource limit for the poller worker container | {} | -| worker.sender.resources.requests | The requested resources for the poller worker container | {} | diff --git a/docs/dashboard.md b/docs/dashboard.md index 0df053cf0..63b93f3b6 100644 --- a/docs/dashboard.md +++ b/docs/dashboard.md @@ -1,36 +1,39 @@ # Dashboard -Using dashboard you can monitor SC4SNMP and be sure that is healthy and working correctly. - +The dashboard is a monitoring tool to ensure that SC4SNMP is working correctly. It is a set of charts that +show the status of SC4SNMP tasks. ## Presetting !!! info - Dashboard compatible with SC4SNMP 1.11+ + Dashboard is compatible starting from version 1.11.0 -1. [Create metrics indexes](gettingstarted/splunk-requirements.md#requirements-for-splunk-enterprise-or-enterprise-cloud) in Splunk. +1. [Create metrics index](microk8s/splunk-requirements.md#requirements-for-splunk-enterprise-or-enterprise-cloud) in Splunk. 2. Enable metrics logging for your runtime: - * For K8S install [Splunk OpenTelemetry Collector for K8S](gettingstarted/sck-installation.md) - * For docker-compose use [Splunk logging driver for docker](dockercompose/9-splunk-logging.md) + * For Kubernetes install [Splunk OpenTelemetry Collector for K8S](microk8s/sck-installation.md) + * For Docker Compose use [Splunk logging driver for docker](dockercompose/9-splunk-logging.md) ## Install dashboard 1. In Splunk platform open **Search -> Dashboards**. -2. Click on **Create New Dashboard** and make an empty dashboard. Be sure to choose Classic Dashboards. -3. In the **Edit Dashboard** view, go to Source and replace the initial xml with the contents of **dashboard.xml**. This file you cand find on [release page](https://github.com/splunk/splunk-connect-for-snmp/releases) for your version in attachments. -4. Save your changes. Your dashboard is ready to use. - +2. Click on **Create New Dashboard** and make an empty dashboard. Be sure to choose **Classic Dashboards**. +3. In the **Edit Dashboard** view, go to **Source** and replace the initial xml with the contents of **dashboard.xml**. + The file can be found on [release page](https://github.com/splunk/splunk-connect-for-snmp/releases) in + attachments under your SC4SNMP version. +4. Save your changes. The dashboard is ready to use. ## Metrics explanation ### Polling dashboards -To check that polling on your device is working correctly first of all check **SNMP schedule of polling tasks** dashboard. -Using this chart you can understand when SC4SNMP scheduled polling for your SNMP device last time. The process works if it runs regularly. +To check that polling on your device is working correctly, look at **SNMP schedule of polling tasks** dashboard. +With this chart you can understand when SC4SNMP scheduled polling for your device last time. The process works if +it runs regularly. After double-checking that SC4SNMP scheduled polling tasks for your SNMP device we need to be sure that polling is working. For that look at another dashboard **SNMP polling status** and if everything is working you will see only **succeeded** status of polling. -If something is going wrong you will see also another status (like on screenshot), then use [troubleshooting docs for that](bestpractices.md) +If something is going wrong you will see also another status (like on screenshot), then use [troubleshooting docs +for that](troubleshooting/polling-issues.md). ![Polling dashboards](images/dashboard/polling_dashboard.png) @@ -39,22 +42,28 @@ If something is going wrong you will see also another status (like on screenshot To check that walk on your device is working correctly first of all check **SNMP schedule of walk tasks** dashboard. Using this chart you can understand when SC4SNMP scheduled walk for your SNMP device last time. The process works if it runs regularly. -After double-checking that SC4SNMP scheduled walk tasks for your SNMP device we need to be sure walk is working. +After double-checking that SC4SNMP scheduled walk tasks for your SNMP device we need to be sure walk is running. For that look at another dashboard **SNMP walk status** and if everything is working you will see only **succeeded** status of walk. -If something is going wrong you will see also another status (like on screenshot), then use [troubleshooting docs for that](bestpractices.md) +If something is going wrong you will see another status (like on screenshot), then use [troubleshooting docs +for that](troubleshooting/polling-issues.md). ![Walk dashboards](images/dashboard/walk_dashboard.png) ### Trap dashboards -First of all check **SNMP traps authorisation** dashboard, if you see only **succeeded** status it means that authorisation is configured correctly, otherwise please use [troubleshooting docs for that](bestpractices.md#identifying-traps-issues). +First of all check **SNMP traps authorisation** dashboard, if you see only **succeeded** status it means that authorisation +is configured correctly, otherwise please use [troubleshooting docs for that](troubleshooting/traps-issues.md). -After checking that we have not any authorisation traps issues we can check that trap tasks are working correctly. For that we need to go **SNMP trap status** dashboard, if we have only **succeeded** status it means that everything is working, otherwise we will see information with another status. +After checking that we do not have any authorisation traps issues we can check that trap tasks are working correctly. +For that we need to go **SNMP trap status** dashboard, if we have only **succeeded** status it means that everything is working, +otherwise we will see information with another status. ![Trap dashboards](images/dashboard/trap_dashboard.png) ### Other dashboards -We also have tasks that will be a callback for walk and poll. For example **send** will publish result in Splunk. We need to be sure that after successful walk and poll this callbacks finished. Please check that we have only successful status for this tasks. +We also have tasks that will be a callback for walk and poll. For example **send** will publish result in Splunk. +We need to be sure that after successful walk and poll those callbacks have completed. Please check that we have only +successful status for those tasks. ![Other dashboards](images/dashboard/other_dashboard.png) \ No newline at end of file diff --git a/docs/dockercompose/10-enable-ipv6.md b/docs/dockercompose/10-enable-ipv6.md index 6d73ea32a..93e9f305a 100644 --- a/docs/dockercompose/10-enable-ipv6.md +++ b/docs/dockercompose/10-enable-ipv6.md @@ -13,5 +13,6 @@ The default subnet used for SC4SNMP network in docker is `fd02::/64`, this and o changed in the `docker-compose-network.yml` file. Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `IPv6_TRAPS_PORT` parameter in `.env` file. +The IPv6 port and IPv4 port cannot be the same. For more information about IPv6 networking in docker, you can check the [official Docker documentation](https://docs.docker.com/engine/daemon/ipv6/). \ No newline at end of file diff --git a/docs/dockercompose/2-download-package.md b/docs/dockercompose/2-download-package.md index 7534e822e..cd23b953a 100644 --- a/docs/dockercompose/2-download-package.md +++ b/docs/dockercompose/2-download-package.md @@ -6,7 +6,7 @@ Package with docker compose configuration files (`docker_compose.zip`) can be do ## Configuration To configure the deployment, follow the instructions in [Inventory configuration](./3-inventory-configuration.md), [Scheduler configuration](./4-scheduler-configuration.md), [Traps configuration](./5-traps-configuration.md), -[.env file configuration](./6-env-file-configuration.md), [SNMPv3 secrets](./7-snmpv3-secrets.md) +[.env file configuration](./6-env-file-configuration.md), [SNMPv3 secrets](./7-snmpv3-secrets.md). ## Deploying the app After configuration, application can be deployed by running the diff --git a/docs/dockercompose/3-inventory-configuration.md b/docs/dockercompose/3-inventory-configuration.md index b14937fa2..0e5eca25a 100644 --- a/docs/dockercompose/3-inventory-configuration.md +++ b/docs/dockercompose/3-inventory-configuration.md @@ -1,7 +1,7 @@ # Inventory configuration Inventory configuration is stored in the `inventory.csv` file. Structure of this file is the same as the one of the -`poller.inventory` section in `values.yaml` file. Documentation of this section can be found in [configure inventory](../configuration/poller-configuration.md#configure-inventory). +`poller.inventory` section in `values.yaml` file. Documentation of this section can be found in [configure inventory](../microk8s/configuration/poller-configuration.md#configure-inventory). ## Example of the configuration diff --git a/docs/dockercompose/4-scheduler-configuration.md b/docs/dockercompose/4-scheduler-configuration.md index f1dc20d67..20da8eb8f 100644 --- a/docs/dockercompose/4-scheduler-configuration.md +++ b/docs/dockercompose/4-scheduler-configuration.md @@ -17,9 +17,9 @@ groups: ``` - `communities`: communities used for version `1` and `2c` of the `snmp`. The default one is `public`. -- `customTranslations`: configuration of the custom translations. Configuration of this section looks the same as in the `values.yaml` in `scheduler.customTranslations` section, which can be checked in the documentation of [custom translations](../configuration/configuring-profiles.md#custom-translations). -- `profiles`: configuration of the profiles. Configuration of this section looks the same as in the `values.yaml` in `scheduler.profiles` section, which can be checked in the documentation og [profiles configuration](../configuration/configuring-profiles.md). -- `groups`: configuration of the groups. Configuration of this section looks the same as in the `values.yaml` in `scheduler.groups` section, which can be checked in the documentation of [groups configuration](../configuration/configuring-groups.md). +- `customTranslations`: configuration of the custom translations. Configuration of this section looks the same as in the `values.yaml` in `scheduler.customTranslations` section, which can be checked in the documentation of [custom translations](../microk8s/configuration/configuring-profiles.md#custom-translations). +- `profiles`: configuration of the profiles. Configuration of this section looks the same as in the `values.yaml` in `scheduler.profiles` section, which can be checked in the documentation of [profiles configuration](../microk8s/configuration/configuring-profiles.md). +- `groups`: configuration of the groups. Configuration of this section looks the same as in the `values.yaml` in `scheduler.groups` section, which can be checked in the documentation of [groups configuration](../microk8s/configuration/configuring-groups.md). ## Example of the configuration diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md index e3c3caa72..9921db035 100644 --- a/docs/dockercompose/6-env-file-configuration.md +++ b/docs/dockercompose/6-env-file-configuration.md @@ -12,7 +12,7 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `TRAPS_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [traps-config.yaml](./5-traps-configuration.md) file | | `INVENTORY_FILE_ABSOLUTE_PATH` | Absolute path to [inventory.csv](./3-inventory-configuration.md) file | | `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` | -| `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Shouldn’t be changed | +| `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Should not be changed | | `SC4SNMP_VERSION` | Version of SC4SNMP | | `IPv6_ENABLED` | Enable receiving traps and polling from IPv6 devices | @@ -56,7 +56,7 @@ Inside the directory with the docker compose files, there is a `.env`. Variables |------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------| | `WALK_RETRY_MAX_INTERVAL` | Maximum time interval between walk attempts | | `WALK_MAX_RETRIES` | Maximum number of walk retries | -| `METRICS_INDEXING_ENABLED` | Details can be found in [append oid index part to the metrics](../configuration/poller-configuration.md#append-oid-index-part-to-the-metrics) | +| `METRICS_INDEXING_ENABLED` | Details can be found in [append oid index part to the metrics](../microk8s/configuration/poller-configuration.md#append-oid-index-part-to-the-metrics) | | `POLL_BASE_PROFILES` | Enable polling base profiles (with IF-MIB and SNMPv2-MIB) | | `IGNORE_NOT_INCREASING_OIDS` | Ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: IGNORE_NOT_INCREASING_OIDS=127.0.0.1:164,127.0.0.6 | | `WORKER_LOG_LEVEL` | Logging level of the workers, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | diff --git a/docs/dockercompose/9-splunk-logging.md b/docs/dockercompose/9-splunk-logging.md index 229f8862e..7de23c2e3 100644 --- a/docs/dockercompose/9-splunk-logging.md +++ b/docs/dockercompose/9-splunk-logging.md @@ -35,7 +35,7 @@ python3 manage_logs.py --path_to_compose /home/ubuntu/docker_compose --enable_lo ``` The script will add required configuration for logging under services in docker compose files. -To apply the changes run the +To apply the changes run the: ``` sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d ``` @@ -57,7 +57,7 @@ Example of disabling logs: python3 manage_logs.py --path_to_compose /home/ubuntu/docker_compose --disable_logs ``` -To apply the changes run the +To apply the changes run the: ``` sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d ``` diff --git a/docs/gui/enable-gui.md b/docs/gui/enable-gui.md deleted file mode 100644 index b159ea6bf..000000000 --- a/docs/gui/enable-gui.md +++ /dev/null @@ -1,46 +0,0 @@ -# SC4SNMP GUI - -SC4SNMP GUI is deployed in kubernetes and can be accessed through the web browser. - -## Enabling GUI - -To enable GUI, the following section must be added to `values.yaml` file and `UI.enable` variable must be set to `true`: - -```yaml -UI: - enable: true - frontEnd: - NodePort: 30001 - pullPolicy: "Always" - backEnd: - NodePort: 30002 - pullPolicy: "Always" - valuesFileDirectory: "" - valuesFileName: "" - keepSectionFiles: true -``` -- `NodePort`: port number on which GUI will be accessible. It has to be from a range `30000-32767`. -- `pullPolicy`: [kubernetes pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) -- `valuesFileDirectory`: this is an obligatory field if UI is used. It is an absolute directory path on the host machine where configuration files from the GUI will be generated. It is used to keep all the changes from the GUI so that users can easily switch back from using UI to the current sc4snmp version. It is advised to create new folder for those files, because this directory is mounted to the Kubernetes pod and GUI application has full write access to this directory. -- `valuesFileName`: [OPTIONAL] full name of the file with configuration (e.g. `values.yaml`) that is stored inside the `valuesFileDirectory` directory. If this file name is provided, and it exists in this directory, then GUI will update appropriate sections in provided `values.yaml` file. If this file name is not provided, or provided file name can’t be found inside `valuesFileDirectory` then inside that directory there will be created three files with the latest GUI configuration of groups, profiles and inventory. Those configuration can be copied and pasted to the appropriate sections in the original `values.yaml` file. - - Template of initial `values.yaml`: - - ```yaml - scheduler: - profiles: | - - groups: | - - poller: - inventory: |- - ``` - > This part of configuration can be also pasted to the `values.yaml` used for SC4SNMP installation. - -- `keepSectionFiles`: if valid `valuesFileName` was provided then by setting this variable to `true` or `false` user can decide whether to keep additional files with configuration of groups, profiles and inventory. If valid `valuesFileName` was NOT provided, then those files are created regardless of this variable. - - -To access the GUI, in the browser type the IP address of your Microk8s cluster followed by the NodePort number from the frontEnd section, e.g. `192.168.123.13:30001`. - - - diff --git a/docs/gui/inventory-gui.md b/docs/gui/inventory-gui.md deleted file mode 100644 index 0c6f52aa0..000000000 --- a/docs/gui/inventory-gui.md +++ /dev/null @@ -1,21 +0,0 @@ -# Configuring inventory in GUI - -SC4SNMP [inventory](../configuration/poller-configuration.md#poller-configuration-file) can be configured in `Inventory` tab. - -![Profiles tab](../images/ui_docs/inventory/inventory_tab.png){ style="border:2px solid" } - -
- -After pressing `Add device/group` button, new single device or group can be added. -Configuration of the device is the same as in the `inventory.yaml` file [(check here)](../configuration/poller-configuration.md#poller-configuration-file). - - -![New device/group](../images/ui_docs/inventory/add_device.png){style="border:2px solid; width:500px; height:auto" } - -
- -To edit a device or group, click the pencil icon next in the desired row. - - -![Edit device](../images/ui_docs/inventory/edit_device.png){style="border:2px solid; width:500px; height:auto" } -![Edit group](../images/ui_docs/inventory/edit_group.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/gui/profiles-gui.md b/docs/gui/profiles-gui.md deleted file mode 100644 index 4294a2082..000000000 --- a/docs/gui/profiles-gui.md +++ /dev/null @@ -1,35 +0,0 @@ -# Configuring profiles in GUI - -SC4SNMP [profiles](../configuration/configuring-profiles.md) can be configured in `Profiles` tab. - -![Profiles tab](../images/ui_docs/profiles/profiles_list.png){ style="border:2px solid" } - -
- -After pressing `Add profile` button, new profile will be added. -Configuration of the profile is the same as in the `values.yaml` file [(check here)](../configuration/configuring-profiles.md). - - -![Add standard profile](../images/ui_docs/profiles/add_standard_profile.png){style="border:2px solid; width:500px; height:auto" } - -
- -Type of the profile can be changed: - - -![Profile types](../images/ui_docs/profiles/profiles_types.png){ style="border:2px solid; width:500px; height:auto" } - -
- -Examples of configuration of `Smart` and `Conditional` profiles: - - -![Smart profile](../images/ui_docs/profiles/add_smart_profile.png){ style="border:2px solid; width:500px; height:auto" } -![Conditional profile](../images/ui_docs/profiles/add_conditional.png){ style="border:2px solid; width:500px; height:auto" } - -
- -All configured profiles can be edited by clicking the pencil icon: - - -![Edit confitional profile](../images/ui_docs/profiles/edit_conditional.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/ha.md b/docs/ha.md index ea811088d..288749d10 100644 --- a/docs/ha.md +++ b/docs/ha.md @@ -4,7 +4,8 @@ The SNMP protocol uses UDP as the transport protocol. Network reliability is a c Consider network architecture when designing for high availability: * When using a single node collector, ensure automatic recovery from virtual infrastructure, such as VMware or Openstack. -* When using a multi-node cluster, ensure nodes are not located in a way where the majority of nodes can be lost. For example, consider row, rack, network, power, and storage. +* When using a multi-node cluster, ensure nodes are not located in a way where the majority of nodes can be lost. +For example, consider row, rack, network, power and storage. * When determining the placement of clusters, the closest location by the number of network hops should be used. * For "data center" applications, collection should be local to the data center. * Consider using IP Anycast. diff --git a/docs/improved-polling.md b/docs/improved-polling.md index 3e824e991..b4651c66d 100644 --- a/docs/improved-polling.md +++ b/docs/improved-polling.md @@ -2,7 +2,7 @@ SC4SNMP now offers beta support for improved polling performance. -While this is in beta, we encourage users to explore it. Although we've conducted extensive testing, occasional issues may arise. +While this is in beta, we encourage users to explore it. Although we have conducted extensive testing, occasional issues may arise. Your feedback during this phase is crucial in refining and optimizing and can be shared using [issues](https://github.com/splunk/splunk-connect-for-snmp/issues). To get started, the zip file with helm chart must be downloaded. It can be found on [feat/improve-polling-time](https://github.com/splunk/splunk-connect-for-snmp/pull/976/checks) branch. @@ -26,7 +26,7 @@ image: Change the directory to `charts/splunk-connect-for-snmp` and run `microk8s helm3 dep update`. You can exit `charts/splunk-connect-for-snmp` directory. While running `microk8s helm3 install` or `microk8s helm3 upgrade` commands, path to the helm chart must be modified. -In the [sc4snmp installation](./gettingstarted/sc4snmp-installation.md#install-sc4snmp) documentation, the following commands are presented: +In the [sc4snmp installation](./microk8s/sc4snmp-installation.md#install-sc4snmp) documentation, the following commands are presented: ``` bash microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` diff --git a/docs/index.md b/docs/index.md index ee4a75864..0b23b2726 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,7 +7,8 @@ Splunk Connect for SNMP is an edge-deployed, containerized, and highly available solution for collecting SNMP data for Splunk Enterprise, Splunk Enterprise Cloud, and Splunk Infrastructure Monitoring. -SC4SNMP provides context-full information. It not only forwards SNMP data to Splunk, but also integrates the data into meaningful objects. For example, you don't need to write queries in order to gather information about +SC4SNMP provides context-full information. It not only forwards SNMP data to Splunk, but also integrates the data into +meaningful objects. For example, you do not need to write queries in order to gather information about interfaces of the device, because SC4SNMP does that automatically: [![Interface metrics](images/interface_metrics.png)](images/interface_metrics.png) diff --git a/docs/mib-request.md b/docs/mib-request.md index c63224524..141e565c2 100644 --- a/docs/mib-request.md +++ b/docs/mib-request.md @@ -6,17 +6,18 @@ They are stored in the MIB server, which is one of the components of SC4SNMP. See the following link for a list of currently available MIBs: [https://pysnmp.github.io/mibs/index.csv](https://pysnmp.github.io/mibs/index.csv) -An alternative way to check if the MIB you're interested in is being served is to check the following link: +An alternative way to check if the MIB you are interested in is being served is to check the following link: `https://pysnmp.github.io/mibs/asn1/@mib@` where `@mib@` is the name of MIB, for example, `IF-MIB`. If the file is downloading, that means the MIB file exists in the MIB server. -## Submit a new MIB file +## Submit new MIB file In case you want to add a new MIB file to the MIB server, see the following steps: 1. Create a fork of the [https://github.com/pysnmp/mibs](https://github.com/pysnmp/mibs) repository. -2. Put one or more MIB files under `src/vendor/@vendor_name@` where `@vendor_name@` is the name of the MIB file's vendor. If there is currently no directory of vendors that you need, create it yourself. +2. Put one or more MIB files under `src/vendor/@vendor_name@` where `@vendor_name@` is the name of the MIB file's vendor. +If there is currently no directory of vendors that you need, create it yourself. 3. Create a pull request to a `main` branch. @@ -30,13 +31,14 @@ the vendor. ## Update your instance of SC4SNMP with the newest MIB server Usually SC4SNMP is released with the newest version of MIB server every time the new MIB files are added. -But, if you want to use the newest MIB server right after its released, you can do it manually using the `values.yaml` file: +But, if you want to use the newest MIB server right after it is released, you can do it manually using the `values. +yaml` file: 1. Append `mibserver` configuration to the values.yaml, with the `mibserver.image.tag` of a value of the newest `mibserver`, for example: ``` mibserver: image: - tag: "1.14.5" + tag: "1.15.13" ``` Check all the MIB server releases in https://github.com/pysnmp/mibs/releases. @@ -60,7 +62,7 @@ In order to add your MIB files to the MIB server in standalone SC4SNMP installat 1. Create or choose a directory on the machine where SC4SNMP is installed, for example, `/home/user/local_mibs`. 2. Create vendor directories inside. For example, if you have MIB files from `VENDOR1` and `VENDOR2`, create `/home/user/local_mibs/VENDOR1` and `/home/user/local_mibs/VENDOR2` and put files inside accordingly. Putting wrong -vendor names won't make compilation fail, this is more for the logging purposes. Segregating your files will make +vendor names will not make compilation fail, this is more for the logging purposes. Segregating your files will make troubleshooting easier. 3. MIB files should be named the same as the contained MIB module. The MIB module name is specified at the beginning of the MIB file before `::= BEGIN` keyword. @@ -72,7 +74,7 @@ mibserver: pathToMibs: "/home/user/local_mibs" ``` -To verify that the process of compilation was completed successfully, check the the mibserver logs using the following command: +To verify that the process of compilation was completed successfully, check the mibserver logs using the following command: ```bash microk8s kubectl logs -f deployments/snmp-mibserver -n sc4snmp @@ -90,5 +92,5 @@ microk8s kubectl rollout restart deployment snmp-mibserver -n sc4snmp For a multi-node Kubernetes installation, create pvc beforehand, copy files onto it, and add it to the MIB server using `persistence.existingClaim`. If you go with the `localMibs.pathToMibs` solution for a multi-node installation (with `nodeSelector` set up to schedule MIB server pods on the same node where the MIB files are), -when the Node with the mapped hostPath fails, you'll have to access the MIB files on another node. +when the Node with the mapped hostPath fails, you will have to access the MIB files on another node. diff --git a/docs/configuration/configuring-groups.md b/docs/microk8s/configuration/configuring-groups.md similarity index 100% rename from docs/configuration/configuring-groups.md rename to docs/microk8s/configuration/configuring-groups.md diff --git a/docs/configuration/configuring-profiles.md b/docs/microk8s/configuration/configuring-profiles.md similarity index 76% rename from docs/configuration/configuring-profiles.md rename to docs/microk8s/configuration/configuring-profiles.md index 5a54c61fc..d8c50038f 100644 --- a/docs/configuration/configuring-profiles.md +++ b/docs/microk8s/configuration/configuring-profiles.md @@ -1,7 +1,7 @@ # Configuring profiles -Profiles are where you can configure what you want to poll, and then assign them to the device. The definition of profile can be found in the `values.yaml` file -under the `scheduler` section. +Profiles are the part of configuration where you can specify what you want to poll, and then assign them to the device. +The definition of profile can be found in the `values.yaml` file under the `scheduler` section. See the following instructions on how to use profiles: [Update Inventory and Profile](../poller-configuration/#update-inventory). @@ -9,7 +9,7 @@ There are two types of profiles in general: 1. Static profile: Polling starts when the profile is added to the `profiles` field in the `inventory` of the device. 2. Smart profile: Polling starts when configured conditions are fulfilled, and the device to poll from has `smart_profiles` enabled in inventory. -Smart profiles are useful when you have many devices of the same kind, and you don't want to configure each of them individually with static profiles. +Smart profiles are useful when you have many devices of the same kind, and you do not want to configure each of them individually with static profiles. In order to configure smart profile, do the following: @@ -64,7 +64,7 @@ scheduler: - ['IP-MIB'] ``` -If you only want to enable the option `static_profile` polling for the host `10.202.4.202`, you would configure a similar inventory: +If you only want to enable the option of `static_profile` polling for the host `10.202.4.202`, you would configure a similar inventory: ```yaml poller: @@ -86,7 +86,7 @@ Afterwards, if the device `sysDescr` matches the `'.*linux.*'` filter, the `smar ## varBinds configuration -`varBinds` is short for "variable binding" in the SNMP. It is the combination of an Object Identifier (OID) and a value. +`VarBinds` is short name for "variable binding" in the SNMP. It is the combination of an Object Identifier (OID) and a value. `varBinds` are used for defining what OIDs should be requested from SNMP Agents. `varBinds` is a required subsection of each profile. The syntax configuration of `varBinds` looks like the following: @@ -117,7 +117,7 @@ To configure Static Profile, the following value needs to be set in the `profile - Define `ProfileName` as a subsection key in `profiles`. - Define `frequency` as the interval between SNMP execution in seconds. - - Define `varBinds` as var binds to query. + - Define `varBinds` as variable bindings to query. See the following example: ```yaml @@ -137,7 +137,8 @@ Sometimes static profiles have additional functionalities to be used in specific #### WALK profile -If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of the `walk` type: +If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile +definition of the `walk` type: ```yaml scheduler: profiles: | @@ -147,8 +148,8 @@ scheduler: varBinds: - ['UDP-MIB'] ``` -This profile should be placed in the profiles section of the inventory definition. It will be executed with the frequency defined in `walk_interval`. -If multiple profiles of type `walk` were placed in profiles, the last one will be used. +This profile should be placed in the profiles section of the inventory definition. It will be executed with the frequency +defined in `walk_interval` field from `inventory`. If multiple profiles of type `walk` were placed in profiles, the last one will be used. See the following example on how to use `walk` in profiles: @@ -159,9 +160,9 @@ poller: 10.202.4.202,,2c,public,,,2000,small_walk,, ``` -NOTE: When small walk is configured, `SNMPv2-MIB` is enabled by default (we need it to create the state of the device in the database). -For example, if you used `small_walk` from the previous example, you'll only be able to poll `UDP-MIB` and `SNMPv2-MIB` OIDs. - +!!! info + When small walk is configured, `SNMPv2-MIB` is polled by default (we need it to create the state of the device in the database). + For example, if you used `small_walk` from the previous example, you will only be able to poll `UDP-MIB` and `SNMPv2-MIB` OIDs. ## SmartProfile configuration SmartProfile is executed when the SmartProfile flag in the inventory is set to true and the conditions defined in profile match. @@ -169,18 +170,19 @@ See [Inventory configuration](../poller-configuration/#configure-inventory) for To configure SmartProfile, the following values needs to be set in the `profiles` section: - - For`ProfileName`, define it as a subsection key in `profiles`. - - For`frequency`, define it as the interval between SNMP execution in seconds. - - For `condition`, define the conditions to match the profile. - - For `type`, define it as the key for the `condition` section that defines the type of condition. The allowed values are `base` or `field` (`walk` type is also allowed here, but it's not part of smart profiles). - - The `base` type of condition will be executed when `SmartProfile` in inventory is set to true. - - The`field` type of condition will be executed if it matches `pattern` for the defined `field`. Supported fields are: - - "SNMPv2-MIB.sysDescr" - - "SNMPv2-MIB.sysObjectID" - - For `field`, define the field name for the field condition type. - - For`pattern`, define the list of regular expression patterns for the MIB object field defined in the `field` section, for example: - - ".*linux.*" - - For `varBinds`, define var binds to query. +- For`ProfileName`, define it as a subsection key in `profiles`. +- For`frequency`, define it as the interval between SNMP execution in seconds. +- For `condition`, define the conditions to match the profile. +- For `type`, define it as the key for the `condition` section that defines the type of condition. The allowed + values are `base` or `field` (`walk` type is also allowed here, but it is not part of smart profiles). +- The `base` type of condition will be executed when `SmartProfile` in inventory is set to true. +- The`field` type of condition will be executed if it matches `pattern` for the defined `field`. Supported fields are: + - "SNMPv2-MIB.sysDescr" + - "SNMPv2-MIB.sysObjectID" +- For `field`, define the field name for the field condition type. +- For`pattern`, define the list of regular expression patterns for the MIB object field defined in the `field` section, for example: + - ".*linux.*" +- For `varBinds`, define variable bindings to query. See the following example of a `base` type profile: ```yaml @@ -211,12 +213,15 @@ scheduler: - ['SNMPv2-MIB', 'sysName'] ``` -NOTE: Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. In case you changed the frequency, or a profile type, the change will be reflected only after the next walk. -There is also a 5 minute time to live (TTL) for an inventory pod. SC4SNMP allows one inventory upgrade and then it block updates for the next 5 minutes. +!!! info + Be aware that profile changes may not be reflected immediately. It can take up to 1 minute for changes to propagate. + In case you changed the frequency, or a profile type, the change will be reflected only after the next walk. + There is also a 5 minute time to live (TTL) for an inventory pod. SC4SNMP allows one inventory upgrade and then it + block updates for the next 5 minutes. ## Conditional profiles -There is a way to not explicitly list what SNMP objects you want to poll, but, instead, only give the conditions that must be fulfilled to -qualify an object for polling. +There is a way to not explicitly list what SNMP objects you want to poll, but, instead, only give the conditions that must +be fulfilled to qualify an object for polling. See the following example of a conditional profile: @@ -239,7 +244,8 @@ IF_conditional_profile: When the such profile is defined and added to a device in an inventory, it will poll all interfaces where `ifAdminStatus` and `ifOperStatus` is up. Conditional profiles are being evaluated during the walk process (on every `walk_interval`), -and, if the status changes in between, the scope of the conditional profile won't be modified. Therefore, status changes are only implemented when walk_interval is executed. +and, if the status changes in between, the scope of the conditional profile will not be modified. Therefore, status +changes are only implemented when walk_interval is executed. See the following operations that can be used in conditional profiles: @@ -295,8 +301,8 @@ varBinds: ## Custom translations -If the user wants to use custom names/translations of MIB names, it can be configured under the customTranslations section under scheduler config. -Translations are grouped by the MIB family. In the following example, IF-MIB.ifInDiscards will be translated to IF-MIB.myCustomName1: +If the user wants to use custom names/translations of MIB names, it can be configured under the `customTranslations` section under scheduler config. +Translations are grouped by the MIB family. In the following example, `IF-MIB.ifInDiscards` will be translated to `IF-MIB.myCustomName1`: ```yaml scheduler: customTranslations: diff --git a/docs/configuration/coredns-configuration.md b/docs/microk8s/configuration/coredns-configuration.md similarity index 100% rename from docs/configuration/coredns-configuration.md rename to docs/microk8s/configuration/coredns-configuration.md diff --git a/docs/microk8s/configuration/deployment-configuration.md b/docs/microk8s/configuration/deployment-configuration.md new file mode 100644 index 000000000..6d93bb670 --- /dev/null +++ b/docs/microk8s/configuration/deployment-configuration.md @@ -0,0 +1,54 @@ +#Deployment Configuration + +The `values.yaml` is the main point of SC4SNMP management. You can check all the default values of Helm dependencies using the following command: + +``` +microk8s helm3 inspect values splunk-connect-for-snmp/splunk-connect-for-snmp > values.yaml +``` + +The whole file is divided into the following parts: + +To configure the endpoint for sending SNMP data: + +- `splunk` - in case you use Splunk Enterprise/Cloud. +- `sim` - in case you use Splunk Observability Cloud. For more details see [sim configuration](sim-configuration.md). + +For polling purposes: + +- `scheduler` - For more details see [scheduler configuration](scheduler-configuration.md). +- `poller` - For more details see [poller configuration](poller-configuration.md). + +For traps receiving purposes: + +- `traps` - For more details see [trap configuration](trap-configuration.md). + +Shared components: + +- `inventory` - For more details see [inventory configuration](../poller-configuration#configure-inventory). +- `mibserver` - For more details see [mibserver configuration](../../mib-request.md). +- `mongodb` - For more details see [mongo configuration](mongo-configuration.md). +- `redis` - For more details see [redis configuration](redis-configuration.md). +- `ui` - For more details see [ui configuration](../gui/enable-gui.md). +- `worker` - For more details see [worker configuration](worker-configuration.md). + +### Shared values +All the components have the following `resources` field for adjusting memory resources: + +```yaml + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 1000m + memory: 2Gi +``` +For more information about the concept of `resources`, see the [kuberentes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). +For more information about scaling resources see the [scaling with microk8s](../../mk8s/k8s-microk8s-scaling). + +There is an option to create common annotations across all services. It can be set by: + +```yaml +commonAnnotations: + annotation_key: annotation_value +``` diff --git a/docs/configuration/mongo-configuration.md b/docs/microk8s/configuration/mongo-configuration.md similarity index 100% rename from docs/configuration/mongo-configuration.md rename to docs/microk8s/configuration/mongo-configuration.md diff --git a/docs/configuration/poller-configuration.md b/docs/microk8s/configuration/poller-configuration.md similarity index 52% rename from docs/configuration/poller-configuration.md rename to docs/microk8s/configuration/poller-configuration.md index ffe032528..376d191ba 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/microk8s/configuration/poller-configuration.md @@ -28,7 +28,8 @@ poller: 10.202.4.202,,2c,public,,,2000,,, ``` -NOTE: The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. +!!! info + The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. ### Define log level The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` or `FATAL`. @@ -68,7 +69,7 @@ out of this object: ### Disable automatic polling of base profiles -There are [two profiles](https://github:com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml) that are being polled by default, so that even without any configuration set up, you can see +There are [two profiles](https://github.com/splunk/splunk-connect-for-snmp/blob/main/splunk_connect_for_snmp/profiles/base.yaml) that are being polled by default, so that even without any configuration set up, you can see the data in Splunk. You can disable it with the following `pollBaseProfiles` parameter: ```yaml @@ -82,18 +83,18 @@ To update inventory, see [Update Inventory and Profile](#update-inventory). The `inventory` section in `poller` has the following fields to configure: - - `address` (REQUIRED) is the IP address which SC4SNMP should collect data from, or name of the group of hosts. General -information about groups can be found on the [Configuring Groups](configuring-groups.md) page. - - `port` (OPTIONAL) is an SNMP listening port. The default value is `161`. - - `version` (REQUIRED) is the SNMP version, and the allowed values are `1`, `2c`, or `3`. - - `community` (OPTIONAL) is the SNMP community string, and a field is required when the `version` is `1` or `2c`. - - `secret` (OPTIONAL) is the reference to the secret from `poller.usernameSecrets` that should be used to poll from the device. - - `security_engine` (OPTIONAL) is the security engine ID required by SNMPv3. If it is not provided for version `3`, it will be autogenerated. - - `walk_interval` (OPTIONAL) is the interval in seconds for SNMP walk, with a default value of `42000`. This value needs to be between `1800` and `604800`. - - `profiles` (OPTIONAL) is a list of SNMP profiles used for the device. More than one profile can be added by a semicolon -separation, for example, `profile1;profile2`. For more information about profiles, see [Profile Configuration](../configuring-profiles). - - `smart_profiles` (OPTIONAL) enables smart profiles, and by default it's set to `true`. Its allowed values are `true` or `false`. - - `delete` (OPTIONAL) is a flag that defines if the inventory should be deleted from the scheduled tasks for WALKs and GETs. Its allowed value are `true`or `false`. The default value is `false`. +| Field | Description | Default | Required | +|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `address` | The IP address which SC4SNMP should collect data from, or name of the group of hosts. General information about groups can be found on the [Configuring Groups](configuring-groups.md) page. | | YES | +| `port` | SNMP listening port. | `161` | NO | +| `version` | SNMP version, the allowed values are `1`, `2c`, or `3`. | | YES | +| `community` | SNMP community string, this field is required when the `version` is `1` or `2c`. | | NO | +| `secret` | The reference to the secret from `poller.usernameSecrets` that should be used to poll from the device. | | NO | +| `security_engine` | The security engine ID required by SNMPv3. If it is not provided for version `3`, it will be autogenerated. | | NO | +| `walk_interval` | The interval in seconds for SNMP walk. This value needs to be between `1800` and `604800`. | `42000` | NO | +| `profiles` | A list of SNMP profiles used for the device. More than one profile can be added by a semicolon separation, for example, `profile1;profile2`. For more information about profiles, see [Profile Configuration](../configuring-profiles). | | NO | +| `smart_profiles` | Enables smart profiles. Its allowed values are `true` or `false`. | `true` | NO | +| `delete` | A flag that defines if the inventory should be deleted from the scheduled tasks for WALKs and GETs. Its allowed value are `true`or `false`. The default value is `false`. | `false` | NO | See the following example: ```yaml @@ -106,7 +107,9 @@ poller: ### Update Inventory -Adding new devices for `values.yaml` is resource expensive, and can impact performance. As it interacts with hardware networking devices, the updating process requires several checks before applying changes. SC4SNMP was designed to prevent changes in inventory tasks more often than every 5 minutes. +Adding new devices for `values.yaml` is resource expensive, and can impact performance. As it interacts with hardware networking devices, +the updating process requires several checks before applying changes. SC4SNMP was designed to prevent changes in inventory tasks +more often than every 5 minutes. To apply inventory changes in `values.yaml`, the following steps need to be executed: @@ -117,7 +120,7 @@ To apply inventory changes in `values.yaml`, the following steps need to be exec microk8s kubectl -n sc4snmp get pods | grep inventory ``` -If the command does not return any pods, wait and continue to execute the command again, until the inventory job finishes. +If the command return pods, wait and continue to execute the command again, until the inventory job finishes. If you really need to apply changes immediately, you can get around the limitation by deleting the inventory job using the following command: @@ -133,12 +136,14 @@ After running this command, you can proceed with upgrading without a need to wai microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -NOTE: If you decide to change the frequency of the profile without changing the inventory data, the change will be reflected after -the next walk process for the host. The walk happens every `walk_interval`, or during any change in inventory. +!!! info + If you decide to change the frequency of the profile without changing the inventory data, the change will be reflected after + the next walk process for the host. The walk happens every `walk_interval`, or during any change in inventory. #### Upgrade with the csv file -You can update inventory by making changes outside of the `values.yaml`. It can be put into a separate csv file and upgraded using `--set-file poller.inventory=`. +You can update inventory by making changes outside of the `values.yaml`. It can be put into a separate csv file and upgraded +using `--set-file poller.inventory=`. See the following example of an CSV file configuration: diff --git a/docs/configuration/redis-configuration.md b/docs/microk8s/configuration/redis-configuration.md similarity index 100% rename from docs/configuration/redis-configuration.md rename to docs/microk8s/configuration/redis-configuration.md diff --git a/docs/configuration/scheduler-configuration.md b/docs/microk8s/configuration/scheduler-configuration.md similarity index 71% rename from docs/configuration/scheduler-configuration.md rename to docs/microk8s/configuration/scheduler-configuration.md index 8fc074887..57fe8b1e4 100644 --- a/docs/configuration/scheduler-configuration.md +++ b/docs/microk8s/configuration/scheduler-configuration.md @@ -1,5 +1,5 @@ # Scheduler configuration -The scheduler is a service that manages schedules for SNMP walks and GETs. The definitions of the schedules +The scheduler is a service that manages schedules for SNMP WALKs and GETs. The definitions of the schedules are stored in MongoDB. ### Scheduler configuration file @@ -29,10 +29,13 @@ The log level for the scheduler can be set by changing the value for the `logLev The default value is `WARNING`. ### Define resource requests and limits + +To change the resource requests ad limits for cpu and memory, modify the `resources` section of the scheduler configuration. + ```yaml scheduler: - #The following resource specification is appropriate for most deployments to scale the - #Larger inventories may require more memory but should not require additional cpu + # The following resource specification is appropriate for most deployments to scale the + # Larger inventories may require more memory but should not require additional cpu resources: limits: cpu: 1 @@ -45,7 +48,7 @@ scheduler: ### Define groups of hosts For more information on when to use groups, see [Configuring Groups](configuring-groups.md). -See the following example group configuration: +See the following example of group configuration: ```yaml scheduler: groups: | @@ -66,13 +69,14 @@ scheduler: port: 999 ``` -The one obligatory field for the host configuration is `address`. If `port` isn't configured its default value is `161`. -Other fields that can be modified here are: `community`, `secret`, `version`, and `security_engine`. +The one obligatory field for the host configuration is `address`. If `port` is not configured its default value is `161`. +Other fields that can be modified are: `community`, `secret`, `version`, and `security_engine`. However, if they remain unspecified in the host configuration, they will be derived from the inventory record. ### Define the expiration time for tasks -Define the time, in seconds, when polling or walk tasks will be revoked if they haven't been picked up by the worker. See the [celery documentation](https://docs.celeryq.dev/en/stable/userguide/calling.html#expiration) for more details. +Define the time, in seconds, when polling or walk tasks will be revoked if they have not been picked up by the worker. +See the [celery documentation](https://docs.celeryq.dev/en/stable/userguide/calling.html#expiration) for more details. ```yaml scheduler: tasksExpiryTime: 300 diff --git a/docs/configuration/sim-configuration.md b/docs/microk8s/configuration/sim-configuration.md similarity index 86% rename from docs/configuration/sim-configuration.md rename to docs/microk8s/configuration/sim-configuration.md index 1861f1c87..fc8e01219 100644 --- a/docs/configuration/sim-configuration.md +++ b/docs/microk8s/configuration/sim-configuration.md @@ -41,9 +41,10 @@ sim: name: signalfx ``` -Note: After the initial installation, if you change `sim.signalfxToken` and/or `sim.signalfxRealm` and no `sim.secret.name` is given, -the `sim` pod will sense the update by itself (after `helm3 upgrade` command) and trigger the recreation. But, when you edit secret created outside -of `values.yaml` (given by `sim.secret.name`), you need to roll out the deployment by yourself or delete the pod to update the data. +!!! info + After the initial installation, if you change `sim.signalfxToken` and/or `sim.signalfxRealm` and no `sim.secret.name` is given, + the `sim` pod will sense the update by itself (after `helm3 upgrade` command) and trigger the recreation. But, when you edit secret created outside + of `values.yaml` (given by `sim.secret.name`), you need to roll out the deployment by yourself or delete the pod to update the data. ### Define annotations diff --git a/docs/configuration/snmp-data-format.md b/docs/microk8s/configuration/snmp-data-format.md similarity index 94% rename from docs/configuration/snmp-data-format.md rename to docs/microk8s/configuration/snmp-data-format.md index e4fb2c899..ceceae45a 100644 --- a/docs/configuration/snmp-data-format.md +++ b/docs/microk8s/configuration/snmp-data-format.md @@ -29,13 +29,12 @@ ifOperStatus OBJECT-TYPE -- lower-layer interface(s) } ``` -[source](https://www.circitor.fr/Mibs/Mib/I/IF-MIB.mib) Here a numeric value is expected, but actually what SNMP Agents ends up receiving from the device is a `string` value, like `up`. To avoid setting textual value as a metric, SC4SNMP does an additional check and tries to cast the numeric value to float. If the check fails, the value is classified as a textual field. -See the following simple example. You just added a device and didn't configure anything special. The data from a walk +See the following simple example. You just added a device and did not configure anything special. The data from a walk in Splunk's metrics index is: ``` @@ -103,7 +102,7 @@ profile_with_one_metric: - ['IF-MIB', 'ifInUcastPkts'] ``` -The record that you'll see in Splunk `| mpreview index=net*` for the same case as the previous one would be: +The record that you will see in Splunk `| mpreview index=net*` for the same case as the previous one would be: ``` ifAdminStatus: up @@ -117,7 +116,7 @@ The record that you'll see in Splunk `| mpreview index=net*` for the same case a ``` Only fields specified in `varBinds` are actively polled from the device. In the case of the previous `profile_with_one_metric`, the textual fields `ifAdminStatus`, `ifDescr`, `ifIndex`, `ifOperStatus` and `ifPhysAddress` are taken from the database cache. This is updated on every walk process. This is fine in most cases, as values such as -MAC address, interface type, or interface status shouldn't change frequently if at all. +MAC address, interface type, or interface status should not change frequently if at all. If you want to keep `ifOperStatus` and `ifAdminStatus` up to date all the time, define profile using the following example: diff --git a/docs/configuration/snmpv3-configuration.md b/docs/microk8s/configuration/snmpv3-configuration.md similarity index 92% rename from docs/configuration/snmpv3-configuration.md rename to docs/microk8s/configuration/snmpv3-configuration.md index cd288c1ae..b3c59eb7a 100644 --- a/docs/configuration/snmpv3-configuration.md +++ b/docs/microk8s/configuration/snmpv3-configuration.md @@ -2,7 +2,7 @@ Configuration of SNMP v3, when supported by the monitored devices, is the most secure choice available for authentication and data privacy. Each set of credentials will be stored as "Secret" objects in k8s, -and will be referenced in values.yaml. This allows the secret to be created once, including automation +and will be referenced in `values.yaml`. This allows the secret to be created once, including automation by third-party password managers, then consumed without storing sensitive data in plain text. ```bash diff --git a/docs/configuration/step-by-step-poll.md b/docs/microk8s/configuration/step-by-step-poll.md similarity index 93% rename from docs/configuration/step-by-step-poll.md rename to docs/microk8s/configuration/step-by-step-poll.md index be4dcd65a..f3d2b09f7 100644 --- a/docs/configuration/step-by-step-poll.md +++ b/docs/microk8s/configuration/step-by-step-poll.md @@ -61,13 +61,14 @@ poller: The provided example configuration will make: -1. Walk devices from `switch_group` with `IF-MIB` and `UCD-SNMP-MIB` every 2000 seconds -2. Poll specific `IF-MIB` fields and the whole `UCD-SNMP-MIB` every 60 seconds +1. Walk devices from `switch_group` with `IF-MIB` and `UCD-SNMP-MIB` every 2000 seconds. +2. Poll specific `IF-MIB` fields and the whole `UCD-SNMP-MIB` every 60 seconds. -Note: You can also limit the walk profile even more if you want to enhance the performance. +!!! info + You can also limit the walk profile even more if you want to enhance the performance. -It makes sense to put the textual values in the walk that aren't required to be constantly monitored, and monitor only the metrics -you're interested in: +It makes sense to put the textual values in the walk that are not required to be constantly monitored, and monitor only the metrics +you are interested in: ``` small_walk: @@ -95,7 +96,7 @@ switch_profile: Afterwards, every metric object will be enriched with the textual values gathered from a walk process. See [here](snmp-data-format.md) for more information about SNMP format. -Now you're ready to reload SC4SNMP. Run the following `helm3 upgrade` command: +Now you are ready to reload SC4SNMP. Run the following `helm3 upgrade` command: ```yaml microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace @@ -146,7 +147,7 @@ Successfully connected to http://snmp-mibserver/index.csv {"message": "New Record address='10.202.4.204' port=163 version='2c' community='public' secret=None security_engine=None walk_interval=2000 profiles=['switch_profile'] smart_profiles=True delete=False", "time": "2022-09-05T14:30:30.607641", "level": "INFO"} ``` -In some time (depending on how long the walk takes), we'll see events using the following query: +In some time (depending on how long the walk takes), we will see events using the following query: ```yaml | mpreview index=netmetrics | search profiles=switch_profile @@ -159,7 +160,8 @@ When groups are used, we can also use querying by the group name, for example: ``` Querying by profiles/group in Splunk is only possible in the metrics index. Every piece of data being sent -by SC4SNMP is formed based on the MIB file's definition of the SNMP object's index. The object is forwarded to an event index only if it doesn't have any metric value inside. +by SC4SNMP is formed based on the MIB file's definition of the SNMP object's index. The object is forwarded to an event +index only if it does not have any metric value inside. The following is a Splunk `raw` metrics example: diff --git a/docs/configuration/trap-configuration.md b/docs/microk8s/configuration/trap-configuration.md similarity index 77% rename from docs/configuration/trap-configuration.md rename to docs/microk8s/configuration/trap-configuration.md index 3f5eb2e58..e12a20879 100644 --- a/docs/configuration/trap-configuration.md +++ b/docs/microk8s/configuration/trap-configuration.md @@ -20,7 +20,7 @@ traps: - secretv3 - sc4snmp-homesecure-sha-des - # Overrides the image tag whose default is the chart appVersion. + # Overrides the logLevel tag whose default is the chart logLevel: "WARN" # replicas: Number of replicas for trap container should be 2x number of nodes replicas: 2 @@ -51,8 +51,8 @@ traps: ``` ### Configure user secrets for SNMPv3 -The `usernameSecrets` key in the `traps` section defines the SNMPv3 secrets for the trap messages sent by the SNMP device. `usernameSecrets` defines which secrets -in "Secret" objects in k8s should be used, as a value it needs the name of "Secret" objects. +The `usernameSecrets` key in the `traps` section defines the SNMPv3 secrets for the trap messages sent by the SNMP device. +`usernameSecrets` defines which secrets in "Secret" objects in k8s should be used, as a value it needs the name of "Secret" objects. For more information on how to define the "Secret" object for SNMPv3, see [SNMPv3 Configuration](snmpv3-configuration.md). See the following example: @@ -65,9 +65,10 @@ traps: ### Define security engines ID for SNMPv3 -SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving -application for each USM user. The SNMP Engine ID is usually unique for the device, and the SC4SNMP as a trap receiver has to be aware of -which security engine IDs to accept. Define all of them under `traps.securityEngineId` in `values.yaml`. +SNMPv3 TRAPs require the configuration of SNMP Engine ID of the TRAP sending application for the USM users table of +the TRAP receiving application for each USM user. The SNMP Engine ID is usually unique for the device, and the SC4SNMP +as a trap receiver has to be aware of which security engine IDs to accept. Define all of them under `traps.securityEngineId` +in `values.yaml`. By default, it is set to a one-element list: `[80003a8c04]`, for example: @@ -77,7 +78,7 @@ traps: - "80003a8c04" ``` -The security engine ID is a substitute of the `-e` variable in `snmptrap`. +The `securityEngineID` is a substitute of the `-e` variable in `snmptrap`. The following is an example of an SNMPv3 trap: ```yaml @@ -113,8 +114,8 @@ traps: ``` Using this method, the SNMP trap will always be forwarded to one of the trap receiver pods listening on port 30000 (like in the -example above, you can configure to any other port). So, it doesn't matter that IP address of which node you use. Adding -nodePort will make it end up in the correct place everytime. +example above, you can configure to any other port). So, it does not matter that IP address of which node you use. +Adding nodePort will make it end up in the correct place everytime. A good practice is to create an IP floating address/Anycast pointing to the healthy nodes, so the traffic is forwarded in case of the failover. To do this, create an external LoadBalancer that balances the traffic between nodes. @@ -149,14 +150,17 @@ In case you want to see traps events collected as one event inside Splunk, you c traps: aggregateTrapsEvents: "true" ``` +After that run the upgrade command. ### Updating trap configuration -If you need to update part of the traps configuration, you can do it by editing the `values.yaml` and then running the following command to restart the pod deployment: +If you need to update part of the traps configuration that changes the configmap, you can do it by editing the `values. +yaml` and then running the following command to restart the pod deployment: ``` microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-trap -n sc4snmp ``` -NOTE: The name of the deployment can differ based on the helm installation name. This can be checked with the following command: -``` -microk8s kubectl get deployments -n sc4snmp -``` +!!! info + The name of the deployment can differ based on the helm installation name. This can be checked with the following command: + ``` + microk8s kubectl get deployments -n sc4snmp + ``` diff --git a/docs/configuration/values-params-description.md b/docs/microk8s/configuration/values-params-description.md similarity index 93% rename from docs/configuration/values-params-description.md rename to docs/microk8s/configuration/values-params-description.md index 9fd069a5f..a057dd650 100644 --- a/docs/configuration/values-params-description.md +++ b/docs/microk8s/configuration/values-params-description.md @@ -2,7 +2,7 @@ ## Image Section -Detailed documentation about configuring images section can be found in [kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/) +Detailed documentation about configuring images section can be found in [kubernetes documentation](https://kubernetes.io/docs/concepts/containers/images/). Below are the most common options: | Variable | Description | Example | @@ -13,7 +13,7 @@ Below are the most common options: ## UI section -Detailed documentation about configuring UI can be found in [Enable GUI](../gui/enable-gui.md) +Detailed documentation about configuring UI can be found in [Enable GUI](../gui/enable-gui.md). | Variable | Description | Default | |-----------------------|------------------------------------------------------------------------------------------------------|---------------------------------------| @@ -45,33 +45,35 @@ Detailed documentation about configuring UI can be found in [Enable GUI](../gui/ ## Sim section -Detailed documentation about configuring sim can be found in [Splunk Infrastructure Monitoring](../configuration/sim-configuration.md) - -| Variable | Description | Default | -|-------------------------------------------------|--------------------------------------------------------------------------------|---------| -| `enabled` | Enables sending data to Splunk Observability Cloud/ SignalFx | `false` | -| `signalfxToken` | Splunk Observability org access token | | -| `signalfxRealm` | Splunk Observability realm to send telemetry data to | | -| `resources` | CPU and memory limits and requests for pod | | -| `service.annotations` | Annotations to append under sim service | | -| `secret.create` | Option to configure `signalfxToken` and `signalfxRealm` as kubernetes secrets | `true` | -| `secret.name` | Name of existing secret in kubernetes with `signalfxToken` and `signalfxRealm` | | -| `replicaCount` | Number of created replicas when autoscaling disabled | `1` | -| `autoscaling.enabled` | Enables autoscaling for pods | `false` | -| `image` | Refer to [Image Section](./#image-section) | | -| `autoscaling.minReplicas` | Minimum number of running pods when autoscaling is enabled | | -| `autoscaling.maxReplicas` | Maximum number of running pods when autoscaling is enabled | | -| `autoscaling.targetCPUUtilizationPercentage` | CPU % threshold that must be exceeded on pods to spawn another replica | | -| `autoscaling.targetMemoryUtilizationPercentage` | Memory % threshold that must be exceeded on pods to spawn another replica | | +Detailed documentation about configuring sim can be found in [Splunk Infrastructure Monitoring](sim-configuration.md). + +| Variable | Description | Default | +|-------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|---------| +| `enabled` | Enables sending data to Splunk Observability Cloud/ SignalFx | `false` | +| `signalfxToken` | Splunk Observability org access token | | +| `signalfxRealm` | Splunk Observability realm to send telemetry data to | | +| `resources` | CPU and memory limits and requests for pod | | +| `service.annotations` | Annotations to append under sim service | | +| `secret.create` | Option to configure `signalfxToken` and `signalfxRealm` as kubernetes secrets | `true` | +| `secret.name` | Name of existing secret in kubernetes with `signalfxToken` and `signalfxRealm` | | +| `replicaCount` | Number of created replicas when autoscaling is disabled | `1` | +| `autoscaling.enabled` | Enables autoscaling for pods | `false` | +| `image` | Refer to [Image Section](./#image-section) | | +| `autoscaling.minReplicas` | Minimum number of running pods when autoscaling is enabled | | +| `autoscaling.maxReplicas` | Maximum number of running pods when autoscaling is enabled | | +| `autoscaling.targetCPUUtilizationPercentage` | CPU % threshold that must be exceeded on pods to spawn another replica | | +| `autoscaling.targetMemoryUtilizationPercentage` | Memory % threshold that must be exceeded on pods to spawn another replica | | | `podAntiAffinity` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | `soft` | | `nodeSelector` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | | ## Scheduler -Detailed documentation about configuring scheduler can be found in [Scheduler](../configuration/scheduler-configuration.md) -Detailed documentation about configuring groups can be found in [Configuring Groups](../configuration/configuring-groups.md) -Detailed documentation about configuring profiles can be found in [Configuring Profiles](../configuration/configuring-profiles.md) +Detailed documentation about configuring: + + - scheduler can be found in [Scheduler](scheduler-configuration.md). + - groups can be found in [Configuring Groups](configuring-groups.md). + - profiles can be found in [Configuring Profiles](configuring-profiles.md). | Variable | Description | Default | |----------------------|---------------------------------------------------------------------------------------------------------------------------------|---------| @@ -88,7 +90,7 @@ Detailed documentation about configuring profiles can be found in [Configuring P ## Poller -Detailed documentation about configuring poller can be found in [Poller](../configuration/poller-configuration.md) +Detailed documentation about configuring poller can be found in [Poller](poller-configuration.md). | Variable | Description | Default | |--------------------------|---------------------------------------------------------------|---------| @@ -101,7 +103,7 @@ Detailed documentation about configuring poller can be found in [Poller](../conf ## Worker -Detailed documentation about configuring worker can be found in [Worker](../configuration/worker-configuration.md) +Detailed documentation about configuring worker can be found in [Worker](worker-configuration.md). | Variable | Description | Default | |------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------| @@ -138,7 +140,7 @@ Detailed documentation about configuring worker can be found in [Worker](../conf ## Inventory -Detailed documentation about configuring inventory can be found in [Poller](../configuration/poller-configuration/#configure-inventory) +Detailed documentation about configuring inventory can be found in [Poller](../poller-configuration#configure-inventory). | Variable | Description | Default | |-----------------------|-------------------------------------------------------------------------------------------------------------------|---------| @@ -151,7 +153,7 @@ Detailed documentation about configuring inventory can be found in [Poller](../c ## Traps -Detailed documentation about configuring traps can be found in [Traps](../configuration/trap-configuration.md) +Detailed documentation about configuring traps can be found in [Traps](trap-configuration.md). | Variable | Description | Default | |-------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------| @@ -193,11 +195,13 @@ Detailed documentation about configuring traps can be found in [Traps](../config ## MongoDb -Detailed documentation about configuring mongodb can be found in [MongoDB](../configuration/mongo-configuration.md). It is advised to not change those settings. +Detailed documentation about configuring mongodb can be found in [MongoDB](mongo-configuration.md). It is advised to +not change those settings. ## Redis -Detailed documentation about configuring redis can be found in [Redis](../configuration/redis-configuration.md). It is advised to not change those settings. +Detailed documentation about configuring redis can be found in [Redis](redis-configuration.md). It is advised to not +change those settings. ## Others diff --git a/docs/microk8s/configuration/worker-configuration.md b/docs/microk8s/configuration/worker-configuration.md new file mode 100644 index 000000000..084d8d02c --- /dev/null +++ b/docs/microk8s/configuration/worker-configuration.md @@ -0,0 +1,373 @@ +# Worker Configuration +The `worker` is a kubernetes pod which is responsible for the actual execution of polling, processing trap messages, and sending +data to Splunk. + +### Worker types + +SC4SNMP has two base functionalities: monitoring traps and polling. These operations are handled by 3 types of workers: + +1. The `trap` worker consumes all the trap related tasks produced by the trap pod. + +2. The `poller` worker consumes all the tasks related to polling. + +3. The `sender` worker handles sending data to Splunk. You need to always have at least one sender pod running. + +### Worker configuration file + +Worker configuration is kept in the `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender`, and `trap`, that refer to the workers types. +`values.yaml` is used during the installation process for configuring Kubernetes values. +The `worker` default configuration is the following: + +```yaml +worker: + # workers are responsible for the actual execution of polling, processing trap messages, and sending data to Splunk. + # More: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/worker-configuration/ + + # The poller worker consumes all the tasks related to polling + poller: + # number of the poller replicas when autoscaling is set to false + replicaCount: 2 + # minimum number of threads in a pod + concurrency: 4 + # how many tasks are consumed from the queue at once + prefetch: 1 + autoscaling: + # enabling autoscaling for poller worker pods + enabled: false + # minimum number of running poller worker pods when autoscaling is enabled + minReplicas: 2 + # maximum number of running poller worker pods when autoscaling is enabled + maxReplicas: 10 + # CPU % threshold that must be exceeded on poller worker pods to spawn another replica + targetCPUUtilizationPercentage: 80 + + resources: + # the resources limits for poller worker container + limits: + cpu: 500m + # the resources requests for poller worker container + requests: + cpu: 250m + + # The trap worker consumes all the trap related tasks produced by the trap pod + trap: + # number of the trap replicas when autoscaling is set to false + replicaCount: 2 + # Use reverse dns lookup of trap ip address and send the hostname to splunk + resolveAddress: + enabled: false + cacheSize: 500 # maximum number of records in cache + cacheTTL: 1800 # time to live of the cached record in seconds + # minimum number of threads in a pod + concurrency: 4 + # how many tasks are consumed from the queue at once + prefetch: 30 + autoscaling: + # enabling autoscaling for trap worker pods + enabled: false + # minimum number of running trap worker pods when autoscaling is enabled + minReplicas: 2 + # maximum number of running trap worker pods when autoscaling is enabled + maxReplicas: 10 + # CPU % threshold that must be exceeded on traps worker pods to spawn another replica + targetCPUUtilizationPercentage: 80 + resources: + # the resources limits for trap worker container + limits: + cpu: 500m + requests: + # the resources requests for trap worker container + cpu: 250m + # The sender worker handles sending data to Splunk + sender: + # number of the sender replicas when autoscaling is set to false + replicaCount: 1 + # minimum number of threads in a pod + concurrency: 4 + # how many tasks are consumed from the queue at once + prefetch: 30 + autoscaling: + # enabling autoscaling for sender worker pods + enabled: false + # minimum number of running sender worker pods when autoscaling is enabled + minReplicas: 2 + # maximum number of running sender worker pods when autoscaling is enabled + maxReplicas: 10 + # CPU % threshold that must be exceeded on sender worker pods to spawn another replica + targetCPUUtilizationPercentage: 80 + resources: + # the resources limits for sender worker container + limits: + cpu: 500m + # the resources requests for sender worker container + requests: + cpu: 250m + # Liveness probes are used in Kubernetes to know when a pod is alive or dead. + # A pod can be in a dead state for a number of reasons; + # the application could be crashed, some error in the application etc. + livenessProbe: + # whether it should be turned on or not + enabled: false + # The exec command for the liveness probe to run in the container. + exec: + command: + - sh + - -c + - test $(($(date +%s) - $(stat -c %Y /tmp/worker_heartbeat))) -lt 10 + # Number of seconds after the container has started before liveness probes are initiated. + initialDelaySeconds: 80 + # How often (in seconds) to perform the probe. + periodSeconds: 10 + + # Readiness probes are used to know when a pod is ready to serve traffic. + # Until a pod is ready, it won't receive traffic from Kubernetes services. + readinessProbe: + # whether it should be turned on or not + enabled: false + # The exec command for the readiness probe to run in the container. + exec: + command: + - sh + - -c + - test -e /tmp/worker_ready + # Number of seconds after the container has started before readiness probes are initiated. + initialDelaySeconds: 30 + # How often (in seconds) to perform the probe. + periodSeconds: 5 + + + # task timeout in seconds (usually necessary when walk process takes a long time) + taskTimeout: 2400 + # maximum time interval between walk attempts + walkRetryMaxInterval: 180 + # maximum number of walk retries + walkMaxRetries: 5 + # ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: + # ignoreNotIncreasingOid: + # - "127.0.0.1:164" + # - "127.0.0.6" + ignoreNotIncreasingOid: [] + # logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL + logLevel: "INFO" + podAntiAffinity: soft + # udpConnectionTimeout timeout in seconds for SNMP operations + udpConnectionTimeout: 3 + + # in case of seeing "Empty SNMP response message" this variable can be set to true + ignoreEmptyVarbinds: false +``` + +All parameters are described in the [Worker parameters](#worker-parameters) section. + + +### Worker scaling + +You can adjust worker pods in two ways: set fixed value in `replicaCount`, +or enable `autoscaling`, which scales pods automatically. + +#### Real life scenario: I use SC4SNMP for only trap monitoring, and I want to use my resources effectively. + +If you do not use polling at all, set `worker.poller.replicaCount` to `0`. +If you want to use polling in the future, you need to increase `replicaCount`. +To monitor traps, adjust `worker.trap.replicaCount` depending on your needs and `worker.sender.replicaCount` to send traps to Splunk. +Usually, you need significantly fewer sender pods than trap pods. + +The following is an example of `values.yaml` without using autoscaling: + +```yaml +worker: + trap: + replicaCount: 4 + sender: + replicaCount: 1 + poller: + replicaCount: 0 + logLevel: "WARNING" +``` + +The following is an example of `values.yaml` with autoscaling: + +```yaml +worker: + trap: + autoscaling: + enabled: true + minReplicas: 4 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + sender: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + poller: + replicaCount: 0 + logLevel: "WARNING" +``` + +In the previous example, both trap and sender pods are autoscaled. During an upgrade process, the number of pods is created through +`minReplicas`, and then new ones are created only if the CPU threshold +exceeds the `targetCPUUtilizationPercentage`, which by default is 80%. This solution helps you to keep +resources usage adjusted to what you actually need. + +After the helm upgrade process, you will see `horizontalpodautoscaler` in `microk8s kubectl get all -n sc4snmp`: + +```yaml +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +horizontalpodautoscaler.autoscaling/snmp-mibserver Deployment/snmp-mibserver 1%/80% 1 3 1 97m +horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-sender Deployment/snmp-splunk-connect-for-snmp-worker-sender 1%/80% 2 5 2 28m +horizontalpodautoscaler.autoscaling/snmp-splunk-connect-for-snmp-worker-trap Deployment/snmp-splunk-connect-for-snmp-worker-trap 1%/80% 4 10 4 28m +``` + +If you see `/80%` in the `TARGETS` section instead of the CPU percentage, you probably do not have the `metrics-server` add-on enabled. +Enable it using `microk8s enable metrics-server`. + + +#### Real life scenario: I have a significant delay in polling + +Sometimes when polling is configured to be run frequently and on many devices, workers get overloaded +and there is a delay in delivering data to Splunk. To avoid these situations, scale poller and sender pods. +Because of the walk cycles, (walk is a costly operation that is only run once in a while), poller workers require more resources +for a short time. For this reason, enabling autoscaling is recommended. + +See the following example of `values.yaml` with autoscaling: + +```yaml +worker: + trap: + autoscaling: + enabled: true + minReplicas: 4 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + sender: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + poller: + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 20 + targetCPUUtilizationPercentage: 80 + logLevel: "WARNING" +``` + +Remember that the system will not scale itself infinitely. There is a finite amount of resources that you can allocate. +By default, every worker has configured the following resources: + +```yaml + resources: + limits: + cpu: 500m + requests: + cpu: 250m +``` + + +#### I have autoscaling enabled and experience problems with Mongo and Redis pod + +If MongoDB and Redis pods are crushing, and some of the pods are in an infinite `Pending` state, that means +you have exhausted your resources and SC4SNMP cannot scale more. You should decrease the number of `maxReplicas` in +workers, so that it is not going beyond the available CPU. + +#### I do not know how to set autoscaling parameters and how many replicas I need + +The best way to see if pods are overloaded is to run the following command: + +```yaml +microk8s kubectl top pods -n sc4snmp +``` + +```yaml +NAME CPU(cores) MEMORY(bytes) +snmp-mibserver-7f879c5b7c-nnlfj 1m 3Mi +snmp-mongodb-869cc8586f-q8lkm 18m 225Mi +snmp-redis-master-0 10m 2Mi +snmp-splunk-connect-for-snmp-scheduler-558dccfb54-nb97j 2m 136Mi +snmp-splunk-connect-for-snmp-trap-5878f89bbf-24wrz 2m 129Mi +snmp-splunk-connect-for-snmp-trap-5878f89bbf-z9gd5 2m 129Mi +snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-cfqjm 260m 354Mi +snmp-splunk-connect-for-snmp-worker-poller-599c7fdbfb-ztf7l 312m 553Mi +snmp-splunk-connect-for-snmp-worker-sender-579f796bbd-vmw88 14m 257Mi +snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-46zhf 3m 259Mi +snmp-splunk-connect-for-snmp-worker-trap-5474db6fc6-mjtpv 4m 259Mi +``` + +Here you can see how much CPU and Memory is being used by the pods. If the CPU is close to 500m, which is the limit for one pod by default, +enable autoscaling/increase maxReplicas or increase replicaCount with autoscaling off. + + +See [Horizontal Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to adjust the maximum replica value to the resources you have. + +See [Scaling with Microk8s](../mk8s/k8s-microk8s-scaling.md) for more information. + +### Reverse DNS lookup in trap worker + +If you want to see the hostname instead of the IP address of the incoming traps in Splunk, you can enable reverse dns lookup +for the incoming traps using the following configuration: + +```yaml +worker: + trap: + resolveAddress: + enabled: true + cacheSize: 500 # maximum number of records in cache + cacheTTL: 1800 # time to live of the cached record in seconds +``` + +Trap worker uses in memory cache to store the results of the reverse dns lookup. If you restart the worker, the cache will be cleared. + +### Worker parameters + +| Variable | Description | Default | +|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------| +| worker.poller.replicaCount | Number of poller worker replicas | 2 | +| worker.poller.concurrency | Minimum number of threads in a poller worker pod | 4 | +| worker.poller.prefetch | Number of tasks consumed from the queue at once | 1 | +| worker.poller.autoscaling.enabled | Enabling autoscaling for poller worker pods | false | +| worker.poller.autoscaling.minReplicas | Minimum number of running poller worker pods when autoscaling is enabled | 2 | +| worker.poller.autoscaling.maxReplicas | Maximum number of running poller worker pods when autoscaling is enabled | 10 | +| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | +| worker.poller.resources.limits | The resources limits for poller worker container | cpu: 500m | +| worker.poller.resources.requests | The requested resources for poller worker container | cpu: 250m | +| worker.trap.replicaCount | Number of trap worker replicas | 2 | +| worker.trap.concurrency | Minimum number of threads in a trap worker pod | 4 | +| worker.trap.prefetch | Number of tasks consumed from the queue at once | 30 | +| worker.trap.resolveAddress.enabled | Enable reverse dns lookup of the IP address of the processed trap | false | +| worker.trap.resolveAddress.cacheSize | Maximum number of reverse dns lookup result records stored in cache | 500 | +| worker.trap.resolveAddress.cacheTTL | Time to live of the cached reverse dns lookup record in seconds | 1800 | +| worker.trap.autoscaling.enabled | Enabling autoscaling for trap worker pods | false | +| worker.trap.autoscaling.minReplicas | Minimum number of running trap worker pods when autoscaling is enabled | 2 | +| worker.trap.autoscaling.maxReplicas | Maximum number of running trap worker pods when autoscaling is enabled | 10 | +| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | +| worker.trap.resources.limits | The resource limit for the poller worker container | cpu: 500m | +| worker.trap.resources.requests | The requested resources for the poller worker container | cpu: 250m | +| worker.sender.replicaCount | The number of sender worker replicas | 1 | +| worker.sender.concurrency | Minimum number of threads in a sender worker pod | 4 | +| worker.sender.prefetch | Number of tasks consumed from the queue at once | 30 | +| worker.sender.autoscaling.enabled | Enabling autoscaling for sender worker pods | false | +| worker.sender.autoscaling.minReplicas | Minimum number of running sender worker pods when autoscaling is enabled | 2 | +| worker.sender.autoscaling.maxReplicas | Maximum number of running sender worker pods when autoscaling is enabled | 10 | +| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | +| worker.sender.resources.limits | The resource limit for the poller worker container | cpu: 500m | +| worker.sender.resources.requests | The requested resources for the poller worker container | cpu: 250m | +| worker.livenessProbe.enabled | Whether the liveness probe is enabled | false | +| worker.livenessProbe.exec.command | The exec command for the liveness probe to run in the container | Check values.yaml | +| worker.livenessProbe.initialDelaySeconds | Number of seconds after the container has started before liveness probe is initiated | 80 | +| worker.livenessProbe.periodSeconds | Frequency of performing the probe in seconds | 10 | +| worker.readinessProbe.enabled | Whether the readiness probe should be turned on or not | false | +| worker.readinessProbe.exec.command | The exec command for the readiness probe to run in the container | Check values.yaml | +| worker.readinessProbe.initialDelaySeconds | Number of seconds after the container has started before readiness probe is initiated | 30 | +| worker.readinessProbe.periodSeconds | Frequency of performing the probe in seconds | 5 | +| worker.taskTimeout | Task timeout in seconds when process takes a long time | 2400 | +| worker.walkRetryMaxInterval | Maximum time interval between walk attempts | 180 | +| worker.walkMaxRetries | Maximum number of walk retries | 5 | +| worker.ignoreNotIncreasingOid | Ignoring `occurred: OID not increasing` issues for hosts specified in the array | [] | +| worker.logLevel | Logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | INFO | +| worker.podAntiAffinity | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | soft | +| worker.udpConnectionTimeout | Timeout for SNMP operations in seconds | 3 | +| worker.ignoreEmptyVarbinds | Ignores “Empty SNMP response message” in responses | false | diff --git a/docs/gettingstarted/enable-ipv6.md b/docs/microk8s/enable-ipv6.md similarity index 90% rename from docs/gettingstarted/enable-ipv6.md rename to docs/microk8s/enable-ipv6.md index 954f7bb41..cd9a5f5f3 100644 --- a/docs/gettingstarted/enable-ipv6.md +++ b/docs/microk8s/enable-ipv6.md @@ -21,13 +21,14 @@ metadata: spec: natOutgoing: true ``` -You can check with command `microk8s kubectl get ippools -n kube-system` the default name of the ip pool for IPv6. If it differs from `default-ipv6-ippool` you need to change the name in the yaml file. +You can check with command `microk8s kubectl get ippools -n kube-system` the default name of the ip pool for IPv6. +If it differs from `default-ipv6-ippool` you need to change the name in the yaml file. Then apply the configuration with the following command: ``` microk8s kubectl apply -f calico-ippool.yaml ``` -After those changes you can restart the microk8s fot the changes to be applied with the following commands: +After those changes you can restart the microk8s for the changes to be applied with the following commands: ``` microk8s stop microk8s start @@ -62,4 +63,5 @@ traps: ipFamilyPolicy: RequireDualStack ipFamilies: ["IPv4", "IPv6"] ``` -Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `traps.service.ipv6Port` parameter. \ No newline at end of file +Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `traps.service.ipv6Port` parameter. +The IPv6 port and IPv4 port cannot be the same. \ No newline at end of file diff --git a/docs/gui/apply-changes.md b/docs/microk8s/gui/apply-changes.md similarity index 75% rename from docs/gui/apply-changes.md rename to docs/microk8s/gui/apply-changes.md index 6530e3316..7b0c8adb0 100644 --- a/docs/gui/apply-changes.md +++ b/docs/microk8s/gui/apply-changes.md @@ -6,7 +6,7 @@ after the previous one was applied. If the `Apply changes` button is clicked ear and the following message with ETA will be displayed: -![ETA](../images/ui_docs/apply_changes/update_time.png){ style="border:2px solid; width:500px; height:auto" } +![ETA](../../images/ui_docs/apply_changes/update_time.png){ style="border:2px solid; width:500px; height:auto" } Scheduled update triggers new kubernetes job `job/snmp-splunk-connect-for-snmp-inventory`. If the ETA elapsed and the @@ -15,4 +15,4 @@ creation of the new job will be retried 10 times. If `Apply changes` is clicked will be displayed: -![Retries](../images/ui_docs/apply_changes/retries.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file +![Retries](../../images/ui_docs/apply_changes/retries.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/microk8s/gui/enable-gui.md b/docs/microk8s/gui/enable-gui.md new file mode 100644 index 000000000..88257a113 --- /dev/null +++ b/docs/microk8s/gui/enable-gui.md @@ -0,0 +1,58 @@ +# SC4SNMP GUI + +SC4SNMP GUI is deployed in kubernetes and can be accessed through the web browser. + +## Enabling GUI + +To enable GUI, the following section must be added to `values.yaml` file and `UI.enable` variable must be set to `true`: + +```yaml +UI: + enable: true + frontEnd: + NodePort: 30001 + pullPolicy: "Always" + backEnd: + NodePort: 30002 + pullPolicy: "Always" + valuesFileDirectory: "" + valuesFileName: "" + keepSectionFiles: true +``` + +- `NodePort`: port number on which GUI will be accessible. It has to be from a range `30000-32767`. +- `pullPolicy`: [kubernetes pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). +- `valuesFileDirectory`: this is an obligatory field if UI is used. It is an absolute directory path on the host machine +where configuration files from the GUI will be generated. It is used to keep all the changes from the GUI so that users can +easily switch back from using UI to the current sc4snmp version. It is advised to create new folder for those files, +because this directory is mounted to the Kubernetes pod and GUI application has full write access to this directory. +- `valuesFileName`: [OPTIONAL] full name of the file with configuration (e.g. `values.yaml`) that is stored inside the +`valuesFileDirectory` directory. If this file name is provided, and it exists in this directory, then GUI will update +appropriate sections in provided `values.yaml` file. If this file name is not provided, or provided file name cannot be +found inside `valuesFileDirectory` then inside that directory there will be created three files with the latest GUI configuration +of groups, profiles and inventory. Those configuration can be copied and pasted to the appropriate sections in the original `values.yaml` file. + + Template of initial `values.yaml`: + + ```yaml + scheduler: + profiles: | + + groups: | + + poller: + inventory: |- + ``` + + > This part of configuration can be also pasted to the `values.yaml` used for SC4SNMP installation. + +- `keepSectionFiles`: if valid `valuesFileName` was provided then by setting this variable to `true` or `false` user can +decide whether to keep additional files with configuration of groups, profiles and inventory. If valid `valuesFileName` +was NOT provided, then those files are created regardless of this variable. + + +To access the GUI, in the browser type the IP address of your Microk8s cluster followed by the NodePort number from the +frontEnd section, e.g. `192.168.123.13:30001`. + + + diff --git a/docs/gui/groups-gui.md b/docs/microk8s/gui/groups-gui.md similarity index 56% rename from docs/gui/groups-gui.md rename to docs/microk8s/gui/groups-gui.md index a07bb9e4e..1e0cdc0fb 100644 --- a/docs/gui/groups-gui.md +++ b/docs/microk8s/gui/groups-gui.md @@ -2,34 +2,34 @@ SC4SNMP [groups](../configuration/configuring-groups.md) can be configured in `Groups` tab. -![Groups tab](../images/ui_docs/groups/groups_tab.png){ style="border:2px solid" } +![Groups tab](../../images/ui_docs/groups/groups_tab.png){ style="border:2px solid" }
After pressing `Add group` button or plus sign next to the `Group`, new group can be added. -![New group](../images/ui_docs/groups/add_group.png){style="border:2px solid; width:500px; height:auto" } +![New group](../../images/ui_docs/groups/add_group.png){style="border:2px solid; width:500px; height:auto" }
Configured groups are displayed on the left-hand side, under the `Group name` label. After clicking on the group name, all devices belonging to the given group are displayed. To add a new device, click the plus sign next to the group name. -Configuration of the device is the same as in the `values.yaml` file [(check here)](../configuration/configuring-groups.md). +Configuration of the device is the same as in the `values.yaml` file. For details check [Configuring groups](../configuration/configuring-groups.md). -![Add a device](../images/ui_docs/groups/add_device.png){style="border:2px solid; width:500px; height:auto" } +![Add a device](../../images/ui_docs/groups/add_device.png){style="border:2px solid; width:500px; height:auto" }
To edit a group name, click the pencil icon next to the group name. -![Edit group](../images/ui_docs/groups/edit_group.png){style="border:2px solid; width:500px; height:auto" } +![Edit group](../../images/ui_docs/groups/edit_group.png){style="border:2px solid; width:500px; height:auto" }
To edit device, click the pencil icon in the row of the given device. -![Edit device](../images/ui_docs/groups/edit_device.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file +![Edit device](../../images/ui_docs/groups/edit_device.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/microk8s/gui/inventory-gui.md b/docs/microk8s/gui/inventory-gui.md new file mode 100644 index 000000000..264f2d03d --- /dev/null +++ b/docs/microk8s/gui/inventory-gui.md @@ -0,0 +1,21 @@ +# Configuring inventory in GUI + +SC4SNMP [inventory](../configuration/poller-configuration.md#poller-configuration-file) can be configured in `Inventory` tab. + +![Profiles tab](../../images/ui_docs/inventory/inventory_tab.png){ style="border:2px solid" } + +
+ +After pressing `Add device/group` button, new single device or group can be added. +Configuration of the device is the same as in the `inventory.yaml` file. For details check [Poller configuration](../configuration/poller-configuration.md#poller-configuration-file). + + +![New device/group](../../images/ui_docs/inventory/add_device.png){style="border:2px solid; width:500px; height:auto" } + +
+ +To edit a device or group, click the pencil icon next in the desired row. + + +![Edit device](../../images/ui_docs/inventory/edit_device.png){style="border:2px solid; width:500px; height:auto" } +![Edit group](../../images/ui_docs/inventory/edit_group.png){style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/microk8s/gui/profiles-gui.md b/docs/microk8s/gui/profiles-gui.md new file mode 100644 index 000000000..21cee93cb --- /dev/null +++ b/docs/microk8s/gui/profiles-gui.md @@ -0,0 +1,35 @@ +# Configuring profiles in GUI + +SC4SNMP [profiles](../configuration/configuring-profiles.md) can be configured in `Profiles` tab. + +![Profiles tab](../../images/ui_docs/profiles/profiles_list.png){ style="border:2px solid" } + +
+ +After pressing `Add profile` button, new profile will be added. +Configuration of the profile is the same as in the `values.yaml` file. For details check [Configuring profiles](../configuration/configuring-profiles.md). + + +![Add standard profile](../../images/ui_docs/profiles/add_standard_profile.png){style="border:2px solid; width:500px; height:auto" } + +
+ +Type of the profile can be changed: + + +![Profile types](../../images/ui_docs/profiles/profiles_types.png){ style="border:2px solid; width:500px; height:auto" } + +
+ +Examples of configuration of `Smart` and `Conditional` profiles: + + +![Smart profile](../../images/ui_docs/profiles/add_smart_profile.png){ style="border:2px solid; width:500px; height:auto" } +![Conditional profile](../../images/ui_docs/profiles/add_conditional.png){ style="border:2px solid; width:500px; height:auto" } + +
+ +All configured profiles can be edited by clicking the pencil icon: + + +![Edit confitional profile](../../images/ui_docs/profiles/edit_conditional.png){ style="border:2px solid; width:500px; height:auto" } \ No newline at end of file diff --git a/docs/gettingstarted/mk8s/k8s-microk8s-scaling.md b/docs/microk8s/mk8s/k8s-microk8s-scaling.md similarity index 96% rename from docs/gettingstarted/mk8s/k8s-microk8s-scaling.md rename to docs/microk8s/mk8s/k8s-microk8s-scaling.md index 2c1b0abc3..f917d3131 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s-scaling.md +++ b/docs/microk8s/mk8s/k8s-microk8s-scaling.md @@ -10,8 +10,8 @@ Below is the formula that can help with deciding when to scale the system. where: -* `inventory_size` - How many items we have on inventory (`values.yaml`). -* `workers_count` - How many workers for `polling` / `walk` we have (pod workers). +* `inventory_size` - Amount of item in inventory (`values.yaml`). +* `workers_count` - Amount of running workers for `polling` / `walk` (pod workers). * `task_period` - `walk` / `polling` period time (`values.yaml`). * `periodic_task_exec_time` - Execution time of `polling` / `walk` task (metrics at screenshot). @@ -70,7 +70,7 @@ i-0b27bcc06fc5c660e Ready 25h v1.30.5 1. [Install SC4SNMP](../sc4snmp-installation.md) if it is not installed yet. -2. Add `worker` section on `values.yaml`: +2. Add `worker` section in `values.yaml`: ```yaml worker: @@ -165,13 +165,13 @@ traps: microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace ``` -4. Checked that SC4SNMP scaled: +4. Check that SC4SNMP scaled: ```bash microk8s kubectl get po -n sc4snmp ``` -After scaling of each worker and trap service 5-10 instances will appear: +After applying the changes, each worker and trap service will have from 5 to 10 instances: ```bash NAME READY STATUS RESTARTS AGE diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/microk8s/mk8s/k8s-microk8s.md similarity index 72% rename from docs/gettingstarted/mk8s/k8s-microk8s.md rename to docs/microk8s/mk8s/k8s-microk8s.md index 2df38aacb..05141de30 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s.md +++ b/docs/microk8s/mk8s/k8s-microk8s.md @@ -1,6 +1,7 @@ # Splunk Connect for SNMP using MicroK8s -See the following requirements to use any Linux deployment of Microk8s to support SC4SMP. The minimum requirements below are suitable for proof of value and small installations, and actual requirements will differ. +See the following requirements to use any Linux deployment of Microk8s to support SC4SNMP. +The minimum requirements below are suitable for proof of value and small installations, actual requirements will differ. Single node minimum: @@ -21,7 +22,7 @@ in the MicroK8s [documentation](https://microk8s.io/docs), including offline and ## Enabling IPv6 -If you plan to poll or receive trap notifications from IPv6 addresses, firstly check the instructions for [enabling +If you plan to poll or receive trap notifications from IPv6 addresses, firstly check the instruction for [enabling IPv6](../enable-ipv6.md). ## Install MicroK8s using Snap @@ -37,14 +38,14 @@ sudo chown -f -R $USER ~/.kube su - $USER ``` -Wait for Installation of Mk8S to complete: +Wait for Installation of microk8s to complete: ```bash microk8s status --wait-ready ``` ## Install required services for SC4SNMP -The following commands can be issued from any one node in a cluster: +The following commands can be issued from any node in a cluster: ```bash sudo systemctl enable iscsid @@ -55,7 +56,7 @@ microk8s enable metrics-server microk8s status --wait-ready ``` -Install the DNS server for mk8s and configure the forwarding DNS servers. Replace the IP addressed below (opendns) with +Install the DNS server for microk8s and configure the forwarding DNS servers. Replace the IP addressed below (opendns) with the allowed values for your network: ```bash @@ -69,7 +70,7 @@ When installing Metallb, you will be prompted for one or more IPs to use as entr into the cluster. If you plan to enable clustering, this IP should not be assigned to the host (floats). If you do not plan to cluster, then this IP should be the IP of your host. -Note2: a single IP in cidr format is x.x.x.x/32. Use CIDR or range syntax for single server installations. This can be +Note: a single IP in cidr format is `x.x.x.x/32`. Use CIDR or range syntax for single server installations. This can be the same as the primary IP. ```bash @@ -79,4 +80,4 @@ microk8s status --wait-ready ## Add nodes (optional) -If you need cluster mode please use following [guide](k8s-microk8s-scaling.md#make-microk8s-cluster). \ No newline at end of file +If you need cluster mode use following [guide](k8s-microk8s-scaling.md#make-microk8s-cluster). \ No newline at end of file diff --git a/docs/offlineinstallation/offline-microk8s.md b/docs/microk8s/offlineinstallation/offline-microk8s.md similarity index 87% rename from docs/offlineinstallation/offline-microk8s.md rename to docs/microk8s/offlineinstallation/offline-microk8s.md index 0ff5c96c3..4896938cf 100644 --- a/docs/offlineinstallation/offline-microk8s.md +++ b/docs/microk8s/offlineinstallation/offline-microk8s.md @@ -1,6 +1,7 @@ # Offline Microk8s installation issues -See [install alternatives](https://microk8s.io/docs/install-alternatives#heading--offline) for offline installation of Microk8s, but there are additional steps to install microk8s offline. See the following steps to install offline: +See [install alternatives](https://microk8s.io/docs/install-alternatives#heading--offline) for offline installation of Microk8s. There are additional steps to install microk8s offline. +See the following steps to install offline: ## Importing images @@ -24,7 +25,9 @@ kube-system calico-kube-controllers-7c9c8dd885-fg8f2 0/1 Pending 0 kube-system calico-node-zg4c4 0/1 Init:0/3 0 23s ``` -The pods are in the `Pending`/`Init` state because they’re trying to download images, which is impossible to do offline. In order to make them download, you need to download all the images on a different server with an internet connection, pack it up, and import it to a microk8s image registry on your offline server. +The pods are in the `Pending`/`Init` state because they are trying to download images, which is impossible to do offline. +In order to make them download, you need to download all the images on a different server with an internet connection, +pack it up, and import it to a microk8s image registry on your offline server. ### Packing up images for an offline environment @@ -41,7 +44,7 @@ kube-system 0s Warning Failed pod/calico-node-sc784 kube-system 0s Warning Failed pod/calico-node-sc784 Error: ErrImagePull ``` -The previous information shows you that you lack a `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. +The previous information shows that you lack a `docker.io/calico/cni:v3.21.4` image, and need to import it in order to fix the issue. The process to do this action is always the following: @@ -93,7 +96,8 @@ microk8s ctr image import pause.tar microk8s ctr image import metrics.tar ``` -NOTE: for other versions of `microk8s`, tags of images may differ. +!!! info + For other versions of `microk8s`, tags of images may differ. After running the following: @@ -115,14 +119,14 @@ kube-system metrics-server-5f8f64cb86-x7k29 1/1 Running ## Enabling DNS and Metallb -`dns` and `metallb` don’t require importing any images, so you can enable them simply through the following commands: +`dns` and `metallb` do not require importing any images, so you can enable them simply through the following commands: ```yaml microk8s enable dns microk8s enable metallb ``` -For more information on `metallb`, see [Install metallb](../gettingstarted/mk8s/k8s-microk8s.md#install-metallb). +For more information on `metallb`, see [Install metallb](../mk8s/k8s-microk8s.md#install-metallb). ## Installing helm3 diff --git a/docs/offlineinstallation/offline-sc4snmp.md b/docs/microk8s/offlineinstallation/offline-sc4snmp.md similarity index 94% rename from docs/offlineinstallation/offline-sc4snmp.md rename to docs/microk8s/offlineinstallation/offline-sc4snmp.md index 319e33e8b..2a2e3e8fa 100644 --- a/docs/offlineinstallation/offline-sc4snmp.md +++ b/docs/microk8s/offlineinstallation/offline-sc4snmp.md @@ -9,14 +9,14 @@ to the SC4SNMP installation server. Those packages are: - `dependencies-images.tar` - `splunk-connect-for-snmp-chart.tar` -Additionally, you'll need +Additionally, you will need: - `pull_mibserver.sh` script - `pull_gui_images.sh` script to easily pull and export mibserver image and GUI images. -Moreover, the SC4SNMP Docker image must be pulled, saved as a `.tar` package, and then moved to the server as well. +Moreover, the SC4SNMP docker image must be pulled, saved as a `.tar` package, and then moved to the server as well. This process requires Docker to be installed locally. Images can be pulled from the following repository: `ghcr.io/splunk/splunk-connect-for-snmp/container:`. @@ -64,7 +64,7 @@ microk8s ctr image import snmp_image.tar microk8s ctr image import mibserver.tar ``` -Afterwards, create `values.yaml`. It's a little different from `values.yaml` used in an online installation. +Afterwards, create `values.yaml`. It is a little different from `values.yaml` used in an online installation. The difference between the two files is the following, which is used for automatic image pulling: ```yaml diff --git a/docs/offlineinstallation/offline-sck.md b/docs/microk8s/offlineinstallation/offline-sck.md similarity index 66% rename from docs/offlineinstallation/offline-sck.md rename to docs/microk8s/offlineinstallation/offline-sck.md index 459efdbaa..71fa311c0 100644 --- a/docs/offlineinstallation/offline-sck.md +++ b/docs/microk8s/offlineinstallation/offline-sck.md @@ -37,13 +37,12 @@ microk8s helm3 install sck \ ### Variables description - -| Placeholder | Description | Example | -|---|---|---| -| splunk_endpoint | host address of splunk instance | https://endpoint.example.com:8088/services/collector | -| insecure_skip_verify | is insecure ssl allowed | false | -| splunk_token | Splunk HTTP Event Collector token | 450a69af-16a9-4f87-9628-c26f04ad3785 | -| cluster_name | name of the cluster | my-cluster | +| Placeholder | Description | Example | +|----------------------|-----------------------------------|--------------------------------------------------------| +| splunk_endpoint | host address of splunk instance | `https://endpoint.example.com:8088/services/collector` | +| insecure_skip_verify | is insecure ssl allowed | `false` | +| splunk_token | Splunk HTTP Event Collector token | `450a69af-16a9-4f87-9628-c26f04ad3785` | +| cluster_name | name of the cluster | `my-cluster` | An example of a correctly filled command is: ```bash @@ -78,13 +77,13 @@ splunk-otel-collector ### Variables description -| Placeholder | Description | Example | -|---|---|---| -| cluster_name | name of the cluster | my_cluster | -| realm | Realm obtained from the Splunk Observability Cloud environment | us0 | -| token | Token obtained from the Splunk Observability Cloud environment | BCwaJ_Ands4Xh7Nrg | -| ingest_url | Ingest URL from the Splunk Observability Cloud environment | https://ingest..signalfx.com | -| api_url | API URL from the Splunk Observability Cloud environment | https://api..signalfx.com | +| Placeholder | Description | Example | +|--------------|----------------------------------------------------------------|--------------------------------| +| cluster_name | name of the cluster | `my_cluster` | +| realm | Realm obtained from the Splunk Observability Cloud environment | `us0` | +| token | Token obtained from the Splunk Observability Cloud environment | `BCwaJ_Ands4Xh7Nrg` | +| ingest_url | Ingest URL from the Splunk Observability Cloud environment | `https://ingest..signalfx.com` | +| api_url | API URL from the Splunk Observability Cloud environment | `https://api..signalfx.com` | An example of a correctly filled command is: ```bash diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/microk8s/sc4snmp-installation.md similarity index 93% rename from docs/gettingstarted/sc4snmp-installation.md rename to docs/microk8s/sc4snmp-installation.md index 8753c031f..7bd42cbe8 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/microk8s/sc4snmp-installation.md @@ -14,7 +14,7 @@ requires checking to see if the firewall is not blocking any of the [required mi ### Offline installation -For offline installation instructions see [this page](../offlineinstallation/offline-sc4snmp.md). +For offline installation instructions see [this page](offlineinstallation/offline-sc4snmp.md). ### Online installation @@ -42,7 +42,7 @@ The installation of SC4SNMP requires the creation of a `values.yaml` file, which 2. Review the [examples][examples_link] to determine which areas require configuration. 3. For more advanced configuration options, refer to the complete default [values.yaml](https://github.com/splunk/splunk-connect-for-snmp/blob/main/charts/splunk-connect-for-snmp/values.yaml) or download it directly from Helm using the command `microk8s helm3 show values splunk-connect-for-snmp/splunk-connect-for-snmp` -4. In order to learn more about each of the config parts, check [configuration](../configuration/deployment-configuration.md) section. +4. In order to learn more about each of the configuration parts, check [configuration](configuration/deployment-configuration.md) section. It is recommended to start by completing the base template and gradually add additional configurations as needed. @@ -71,7 +71,7 @@ microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp/splunk-connec ``` From now on, when editing SC4SNMP configuration, the configuration change must be -inserted in the corresponding section of `values.yaml`. For more details see [configuration](../configuration/deployment-configuration.md) section. +inserted in the corresponding section of `values.yaml`. For more details see [configuration](configuration/deployment-configuration.md) section. Use the following command to propagate configuration changes: ``` bash @@ -124,7 +124,7 @@ snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.33 10.202.9.21 ``` If you see `` communicate instead of the IP address, that means you either provided the wrong IP address -in `traps.loadBalancerIP` or there's something wrong with the `metallb` microk8s addon. +in `traps.loadBalancerIP` or there is something wrong with the `metallb` microk8s addon. In the following example, the default indexes are used, the metric data goes to `netmetrics`, and the events goes to `netops`. @@ -189,11 +189,11 @@ When the walk finishes, events appear in Splunk. ## Next Steps -A good way to start with SC4SNMP polling is to follow the [Step by Step guide for polling](../configuration/step-by-step-poll.md). -Advanced configuration of polling is available in the [Poller configuration](../configuration/poller-configuration.md) section. -The SNMP data format is explained in the [SNMP data format](../configuration/snmp-data-format.md) section. +A good way to start with SC4SNMP polling is to follow the [Step by Step guide for polling](configuration/step-by-step-poll.md). +Advanced configuration of polling is available in the [Poller configuration](configuration/poller-configuration.md) section. +The SNMP data format is explained in the [SNMP data format](configuration/snmp-data-format.md) section. -For advanced trap configuration, see the [Traps configuration](../configuration/trap-configuration.md) section. +For advanced trap configuration, see the [Traps configuration](configuration/trap-configuration.md) section. ## Uninstall Splunk Connect for SNMP To uninstall SC4SNMP run the following commands: diff --git a/docs/gettingstarted/sck-installation.md b/docs/microk8s/sck-installation.md similarity index 96% rename from docs/gettingstarted/sck-installation.md rename to docs/microk8s/sck-installation.md index aa6daf0e9..856dfb168 100644 --- a/docs/gettingstarted/sck-installation.md +++ b/docs/microk8s/sck-installation.md @@ -1,7 +1,7 @@ # Splunk OpenTelemetry Collector for Kubernetes installation Splunk OpenTelemetry Collector for Kubernetes is not required for SC4SNMP installation. However, Splunk OpenTelemetry Collector for Kubernetes sends logs and metrics from a k8s cluster to a Splunk instance, which makes SC4SNMP easier to debug. -You can do the same using the `microk8s kubectl logs` command on instances you're interested in, but if you're not proficient in Kubernetes, +You can do the same using the `microk8s kubectl logs` command on instances you are interested in, but if you are not proficient in Kubernetes, Splunk OpenTelemetry Collector for Kubernetes is recommended. The following steps are sufficient for a Splunk OpenTelemetry Collector installation for the SC4SNMP project with Splunk Enterprise/Enterprise Cloud. @@ -9,7 +9,7 @@ In order to learn more about Splunk OpenTelemetry Collector, visit [Splunk OpenT ### Offline installation -For offline installation instructions see [Splunk OpenTelemetry Collector for Kubernetes offline installation](../offlineinstallation/offline-sck.md). +For offline installation instructions see [Splunk OpenTelemetry Collector for Kubernetes offline installation](offlineinstallation/offline-sck.md). ### Add Splunk OpenTelemetry Collector repository to HELM diff --git a/docs/gettingstarted/splunk-requirements.md b/docs/microk8s/splunk-requirements.md similarity index 100% rename from docs/gettingstarted/splunk-requirements.md rename to docs/microk8s/splunk-requirements.md diff --git a/docs/upgrade.md b/docs/microk8s/upgrade.md similarity index 100% rename from docs/upgrade.md rename to docs/microk8s/upgrade.md diff --git a/docs/releases.md b/docs/releases.md index bdbf780a4..799252b37 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -1,11 +1,11 @@ # Base Information ## Known Issues -The list of open known issues is available under [Known issue link](https://github.com/splunk/splunk-connect-for-snmp/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) +The list of open known issues is available under [Known issue link](https://github.com/splunk/splunk-connect-for-snmp/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). ## Open issues to the product To open an issue for Splunk Connect for SNMP, go to the [github SC4SNMP](https://github.com/splunk/splunk-connect-for-snmp/issues) -project and open am issue. +project and open an issue. ## Releases To check Splunk Connect for SNMP releases, see: [SC4SNMP Releases](https://github.com/splunk/splunk-connect-for-snmp/releases) diff --git a/docs/small-environment.md b/docs/small-environment.md index 2508be1cb..bc10dc653 100644 --- a/docs/small-environment.md +++ b/docs/small-environment.md @@ -3,12 +3,11 @@ SC4SNMP can be successfully installed in small environments with 2 CPUs and 4 GB of memory. However, Splunk OpenTelemetry Collector for Kubernetes cannot be installed in a small environment along with SC4SNMP. Additionally, the `resources` limits must be set for Kubernetes -pods. See the example of `values.yaml` with the appropriate resources [here][lightweight_doc_link]. +pods or Docker containers. See the example of `values.yaml` with the appropriate resources [here][lightweight_doc_link]. -The rest of the installation is the same as [online](gettingstarted/sc4snmp-installation.md), or the -[offline](offlineinstallation/offline-sc4snmp.md) installation. +For the rest of installation process you can follow the instructions from **Getting started** section with the deployment of your choice. -Keep in mind that a lightweight instance of SC4SNMP won't be able to poll from many devices and may experience delays +Keep in mind that a lightweight instance of SC4SNMP will not be able to poll from many devices and may experience delays if there is frequent polling. [lightweight_doc_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/lightweight_installation.yaml diff --git a/docs/troubleshooting/configuring-logs.md b/docs/troubleshooting/configuring-logs.md index 6e74cbd3e..32c44bd16 100644 --- a/docs/troubleshooting/configuring-logs.md +++ b/docs/troubleshooting/configuring-logs.md @@ -15,11 +15,11 @@ Log level configuration can be set for `worker`, `poller`, `scheduler` and `trap ## Accessing SC4SNMP logs -SC4SNMP logs can be browsed in Splunk in `em_logs` index, provided that [sck-otel](../gettingstarted/sck-installation.md) +SC4SNMP logs can be browsed in Splunk in `em_logs` index, provided that [sck-otel](../microk8s/sck-installation.md) is installed. Logs can be also accessed directly in kubernetes using terminal. ### Accessing logs via Splunk -If [sck-otel](../gettingstarted/sck-installation.md) is installed, browse `em_logs` index. Logs can be further filtered +If [sck-otel](../microk8s/sck-installation.md) is installed, browse `em_logs` index. Logs can be further filtered for example by the sourcetype field. Example search command to get logs from poller: ``` index=em_logs sourcetype="kube:container:splunk-connect-for-snmp-worker-poller" diff --git a/docs/troubleshooting/k8s-commands.md b/docs/troubleshooting/k8s-commands.md index aea96a9f7..50515e16d 100644 --- a/docs/troubleshooting/k8s-commands.md +++ b/docs/troubleshooting/k8s-commands.md @@ -264,4 +264,4 @@ When having the issues with dual-stack configuration the `IP Family Policy` and ### Check service configuration Checking the service configuration can be useful when having issues with the traps connectivity. -For better explanation refer to: [Wrong IP or port](../traps-issues#wrong-ip-or-port) +For better explanation refer to: [Wrong IP or port](../traps-issues#wrong-ip-or-port). diff --git a/docs/troubleshooting/polling-issues.md b/docs/troubleshooting/polling-issues.md index 5a6cfb1b8..621c2ba73 100644 --- a/docs/troubleshooting/polling-issues.md +++ b/docs/troubleshooting/polling-issues.md @@ -1,7 +1,7 @@ # Identifying Polling and Walk Issues ## Check when SNMP WALK was executed last time for the device -1. [Configure Splunk OpenTelemetry Collector for Kubernetes](../gettingstarted/sck-installation.md) or [Configure Docker Logs for Splunk](../dockercompose/9-splunk-logging.md) +1. [Configure Splunk OpenTelemetry Collector for Kubernetes](../microk8s/sck-installation.md) or [Configure Docker Logs for Splunk](../dockercompose/9-splunk-logging.md). 2. Go to your Splunk and execute search: `index="em_logs" "Sending due task" "sc4snmp;;walk"` and replace with the pertinent IP Address. @@ -42,7 +42,7 @@ If you put in only the IP address (for example, `127.0.0.1`), then errors will b ## Walking a device takes too much time -See [Configure small walk profile](../../configuration/configuring-profiles/#walk-profile) to enable the small walk +See [Configure small walk profile](../../microk8s/configuration/configuring-profiles/#walk-profile) to enable the small walk functionality. ## An error of SNMP isWalk=True blocks traffic on the SC4SNMP instance @@ -83,11 +83,11 @@ An example for an appropriate Splunk query would be the following: ### Unknown USM user In case of polling SNMPv3 devices, `Unknown USM user` error suggests wrong username. Verify -that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)). +that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../microk8s/configuration/snmpv3-configuration.md)). ### Wrong SNMP PDU digest In case of polling SNMPv3 devices, `Wrong SNMP PDU digest` error suggests wrong authentication key. Verify -that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)). +that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](../microk8s/configuration/snmpv3-configuration.md)). ### No SNMP response received before timeout `No SNMP response received before timeout` error might have several root causes. Some of them are: @@ -126,7 +126,8 @@ The following groups have invalid configuration and won't be used: ['group1']. P The following profiles have invalid configuration and won't be used: ['standard_profile', 'walk_profile']. Please check indentation and keywords spelling inside mentioned profiles configuration. ``` Errors above indicate, that the mentioned groups or profiles might have wrong indentation or some keywords were omitted or misspelled. Refer to: -- kubernetes: [Configuring profiles](../configuration/configuring-profiles.md) or [Configuring Groups](../configuration/configuring-groups.md) + +- kubernetes: [Configuring profiles](../microk8s/configuration/configuring-profiles.md) or [Configuring Groups](../microk8s/configuration/configuring-groups.md) - docker: [Scheduler configuration](../dockercompose/4-scheduler-configuration.md) sections to check how the correct configuration should look like. \ No newline at end of file diff --git a/docs/troubleshooting/traps-issues.md b/docs/troubleshooting/traps-issues.md index 0df1f8fd4..7b22c64d1 100644 --- a/docs/troubleshooting/traps-issues.md +++ b/docs/troubleshooting/traps-issues.md @@ -53,7 +53,7 @@ While sending SNMP v3 traps in case of wrong username or engine id configuration 2024-02-06 15:42:14,091 ERROR Security Model failure for device ('18.226.181.199', 46066): Unknown SNMP security name encountered ``` -If this error occurs, verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)). +If this error occurs, verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../microk8s/configuration/snmpv3-configuration.md)). After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. Check that the correct snmp engine id is configured under `traps.securityEngineId`. See the following example of a `values.yaml` with configured secret and engine id: ```yaml @@ -70,7 +70,7 @@ While sending SNMP v3 traps in case of wrong authentication protocol or password ``` 2024-02-06 15:42:14,642 ERROR Security Model failure for device ('18.226.181.199', 54806): Authenticator mismatched ``` -If this error occurs, verify that the kubernetes secret with the correct authentication protocol and password has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)). +If this error occurs, verify that the kubernetes secret with the correct authentication protocol and password has been created ([SNMPv3 configuration](../microk8s/configuration/snmpv3-configuration.md)). After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret: ```yaml traps: @@ -83,7 +83,7 @@ While sending SNMP v3 traps in case of wrong privacy protocol or password config ``` 2024-02-06 15:42:14,780 ERROR Security Model failure for device ('18.226.181.199', 48249): Ciphering services not available or ciphertext is broken ``` -If this error occurs, verify that the kubernetes secret with the correct privacy protocol and password has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)). +If this error occurs, verify that the kubernetes secret with the correct privacy protocol and password has been created ([SNMPv3 configuration](../microk8s/configuration/snmpv3-configuration.md)). After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret: ```yaml traps: diff --git a/examples/offline_installation_values.md b/examples/offline_installation_values.md index e72e7634b..02dbf2f0c 100644 --- a/examples/offline_installation_values.md +++ b/examples/offline_installation_values.md @@ -10,7 +10,7 @@ splunk: port: "###SPLUNK_PORT###" image: #Fill ###TAG## with the SC4SNMP version downloaded before with docker pull command - # according to the documentation: https://splunk.github.io/splunk-connect-for-snmp/main/offlineinstallation/offline-sc4snmp/ + # according to the documentation: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/offlineinstallation/offline-sc4snmp/ tag: ###TAG### pullPolicy: Never traps: @@ -59,6 +59,6 @@ mibserver: pullPolicy: Never ``` -Fill `###` variables according to the description from [online installation](https://splunk.github.io/splunk-connect-for-snmp/main/gettingstarted/sc4snmp-installation/#configure-splunk-enterprise-or-splunk-cloud-connection). +Fill `###` variables according to the description from [online installation](https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/sc4snmp-installation/#configure-splunk-enterprise-or-splunk-cloud-connection). Additionally, fill `###TAG###` ith the same tag used before to `docker pull` an SC4SNMP image. \ No newline at end of file diff --git a/examples/polling_and_traps_v3.yaml b/examples/polling_and_traps_v3.yaml index b91ca9424..9eb76e497 100644 --- a/examples/polling_and_traps_v3.yaml +++ b/examples/polling_and_traps_v3.yaml @@ -7,7 +7,7 @@ splunk: port: "8088" traps: # Remember to create sc4snmp-homesecure-sha-aes and sc4snmp-homesecure-sha-des secrets beforehand - # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/snmpv3-configuration/ + # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/microk8s/configuration/snmpv3-configuration/ usernameSecrets: - sc4snmp-homesecure-sha-aes - sc4snmp-homesecure-sha-des @@ -23,7 +23,7 @@ scheduler: - ['UCD-SNMP-MIB'] poller: # Remember to create sc4snmp-hlab-sha-aes secret beforehand - # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/configuration/snmpv3-configuration/ + # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/snmpv3-configuration/ usernameSecrets: - sc4snmp-hlab-sha-aes inventory: | diff --git a/examples/polling_values.yaml b/examples/polling_values.yaml index f0de6e5c7..47fddf2e9 100644 --- a/examples/polling_values.yaml +++ b/examples/polling_values.yaml @@ -7,7 +7,7 @@ splunk: port: "8088" # in the worker section you can adjust scaling parameters # full description is here: -# https://splunk.github.io/splunk-connect-for-snmp/main/configuration/worker-configuration +# https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/worker-configuration worker: poller: replicaCount: 5 diff --git a/mkdocs.yml b/mkdocs.yml index d7561c56f..d41550efb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -39,41 +39,10 @@ theme: nav: - Home: "index.md" - - Getting Started: - - Splunk Requirements: "gettingstarted/splunk-requirements.md" - - Platform Microk8s: "gettingstarted/mk8s/k8s-microk8s.md" - - Install Splunk OpenTelemetry Collector for Kubernetes: "gettingstarted/sck-installation.md" - - Install SC4SNMP: "gettingstarted/sc4snmp-installation.md" - - Enable IPv6: "gettingstarted/enable-ipv6.md" - - Scaling with Microk8s: "gettingstarted/mk8s/k8s-microk8s-scaling.md" - - Configuration: - - Deployment: "configuration/deployment-configuration.md" - - Configurable values: "configuration/values-params-description.md" - - Polling: - - Poller: "configuration/poller-configuration.md" - - Scheduler: "configuration/scheduler-configuration.md" - - Configuring Profiles: "configuration/configuring-profiles.md" - - Configuring Groups: "configuration/configuring-groups.md" - - Step by Step polling example: "configuration/step-by-step-poll.md" - - SNMP data format: "configuration/snmp-data-format.md" - - Traps: "configuration/trap-configuration.md" - - Worker: "configuration/worker-configuration.md" - - Mongo DB: "configuration/mongo-configuration.md" - - Redis: "configuration/redis-configuration.md" - - SNMPv3 configuration: "configuration/snmpv3-configuration.md" - - Splunk Infrastructure Monitoring: "configuration/sim-configuration.md" - - CoreDNS: "configuration/coredns-configuration.md" - - Offline Installation: - - Install Microk8s: "offlineinstallation/offline-microk8s.md" - - Install Splunk OpenTelemetry Collector for Kubernetes: "offlineinstallation/offline-sck.md" - - Install SC4SNMP: "offlineinstallation/offline-sc4snmp.md" - - GUI: - - Enable GUI: "gui/enable-gui.md" - - Configuring Profiles: "gui/profiles-gui.md" - - Configuring Groups: "gui/groups-gui.md" - - Configuring Inventory: "gui/inventory-gui.md" - - Apply changes: "gui/apply-changes.md" - - Docker compose: + - Architecture: + - High-level design: "architecture/design.md" + - Infrastructure Planning: "architecture/planning.md" + - Getting Started with Docker Compose: - Install Docker: "dockercompose/1-install-docker.md" - Download package: "dockercompose/2-download-package.md" - Inventory configuration: "dockercompose/3-inventory-configuration.md" @@ -84,20 +53,57 @@ nav: - Offline installation: "dockercompose/8-offline-installation.md" - Sending logs to Splunk: "dockercompose/9-splunk-logging.md" - Enable IPv6: "dockercompose/10-enable-ipv6.md" + - Getting Started with Microk8s: + - Installation: + - Splunk Requirements: "microk8s/splunk-requirements.md" + - Platform Microk8s: "microk8s/mk8s/k8s-microk8s.md" + - Install Splunk OpenTelemetry Collector for Kubernetes: "microk8s/sck-installation.md" + - Install SC4SNMP: "microk8s/sc4snmp-installation.md" + - Enable IPv6: "microk8s/enable-ipv6.md" + - Scaling with Microk8s: "microk8s/mk8s/k8s-microk8s-scaling.md" + - Configuration: + - Deployment: "microk8s/configuration/deployment-configuration.md" + - Configurable values: "microk8s/configuration/values-params-description.md" + - Polling: + - Poller: "microk8s/configuration/poller-configuration.md" + - Scheduler: "microk8s/configuration/scheduler-configuration.md" + - Configuring Profiles: "microk8s/configuration/configuring-profiles.md" + - Configuring Groups: "microk8s/configuration/configuring-groups.md" + - Step by Step polling example: "microk8s/configuration/step-by-step-poll.md" + - SNMP data format: "microk8s/configuration/snmp-data-format.md" + - Traps: "microk8s/configuration/trap-configuration.md" + - Worker: "microk8s/configuration/worker-configuration.md" + - MongoDB: "microk8s/configuration/mongo-configuration.md" + - Redis: "microk8s/configuration/redis-configuration.md" + - SNMPv3 configuration: "microk8s/configuration/snmpv3-configuration.md" + - Splunk Infrastructure Monitoring: "microk8s/configuration/sim-configuration.md" + - CoreDNS: "microk8s/configuration/coredns-configuration.md" + - Offline Installation: + - Install Microk8s: "microk8s/offlineinstallation/offline-microk8s.md" + - Install Splunk OpenTelemetry Collector for Kubernetes: "microk8s/offlineinstallation/offline-sck.md" + - Install SC4SNMP: "microk8s/offlineinstallation/offline-sc4snmp.md" + - GUI: + - Enable GUI: "microk8s/gui/enable-gui.md" + - Configuring Profiles: "microk8s/gui/profiles-gui.md" + - Configuring Groups: "microk8s/gui/groups-gui.md" + - Configuring Inventory: "microk8s/gui/inventory-gui.md" + - Apply changes: "microk8s/gui/apply-changes.md" + - Upgrade SC4SNMP: "microk8s/upgrade.md" + - High Availability: "ha.md" + - Improved polling performance: "improved-polling.md" - Lightweight installation: "small-environment.md" - - Architecture: - - High-level design: "architecture/design.md" - - Infrastructure Planning: "architecture/planning.md" - - Security: "security.md" + - Monitoring dashboard: "dashboard.md" + - Releases: "releases.md" - Request MIB: "mib-request.md" - - Upgrade SC4SNMP: "upgrade.md" + - Security: "security.md" - Troubleshooting: - Accessing and configuring logs: "troubleshooting/configuring-logs.md" + - Docker commands: "troubleshooting/docker-commands.md" + - Kubernetes commands: "troubleshooting/k8s-commands.md" - Polling issues: "troubleshooting/polling-issues.md" - Traps issues: "troubleshooting/traps-issues.md" - - Kubernetes commands: "troubleshooting/k8s-commands.md" - - Docker commands: "troubleshooting/docker-commands.md" - - Releases: "releases.md" - - High Availability: "ha.md" - - Improved polling performance: "improved-polling.md" - - Monitoring dashboard: "dashboard.md" + + + + + From feab1e201e0ca38d46be6585050c5f8a3d746fba Mon Sep 17 00:00:00 2001 From: ajasnosz <139114006+ajasnosz@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:56:05 +0100 Subject: [PATCH 06/10] fix: refactor docker compose files (#1105) --- CHANGELOG.md | 2 + docker_compose/docker-compose-coredns.yaml | 15 -- .../docker-compose-dependencies.yaml | 40 --- docker_compose/docker-compose-inventory.yaml | 36 --- docker_compose/docker-compose-network.yaml | 11 - docker_compose/docker-compose-scheduler.yaml | 33 --- docker_compose/docker-compose-secrets.yaml | 2 - docker_compose/docker-compose-traps.yaml | 51 ---- .../docker-compose-worker-poller.yaml | 69 ----- .../docker-compose-worker-sender.yaml | 70 ----- .../docker-compose-worker-trap.yaml | 74 ------ docker_compose/docker-compose.yaml | 248 ++++++++++++++++++ docker_compose/manage_logs.py | 87 ++---- docker_compose/manage_secrets.py | 221 +++++++--------- docs/dockercompose/10-enable-ipv6.md | 2 +- docs/dockercompose/2-download-package.md | 8 +- docs/dockercompose/5-traps-configuration.md | 1 - docs/dockercompose/7-snmpv3-secrets.md | 11 +- docs/dockercompose/9-splunk-logging.md | 4 +- .../configuration/trap-configuration.md | 28 +- integration_tests/automatic_setup_compose.sh | 4 +- integration_tests/splunk_test_utils.py | 2 +- 22 files changed, 393 insertions(+), 626 deletions(-) delete mode 100644 docker_compose/docker-compose-coredns.yaml delete mode 100644 docker_compose/docker-compose-dependencies.yaml delete mode 100644 docker_compose/docker-compose-inventory.yaml delete mode 100644 docker_compose/docker-compose-network.yaml delete mode 100644 docker_compose/docker-compose-scheduler.yaml delete mode 100644 docker_compose/docker-compose-secrets.yaml delete mode 100644 docker_compose/docker-compose-traps.yaml delete mode 100644 docker_compose/docker-compose-worker-poller.yaml delete mode 100644 docker_compose/docker-compose-worker-sender.yaml delete mode 100644 docker_compose/docker-compose-worker-trap.yaml create mode 100644 docker_compose/docker-compose.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 28a82f404..7abd495ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ ## Unreleased ### Changed +- general refactor of documentation +- merge docker compose files into one ### Fixed diff --git a/docker_compose/docker-compose-coredns.yaml b/docker_compose/docker-compose-coredns.yaml deleted file mode 100644 index dcabb1496..000000000 --- a/docker_compose/docker-compose-coredns.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: '3.8' -services: - coredns: - image: ${COREDNS_IMAGE}:${COREDNS_TAG:-latest} - command: ["-conf", "/Corefile"] - container_name: coredns - restart: on-failure - expose: - - '53' - - '53/udp' - volumes: - - '${COREFILE_ABS_PATH}:/Corefile' - networks: - sc4snmp_network: - ipv4_address: ${COREDNS_ADDRESS} diff --git a/docker_compose/docker-compose-dependencies.yaml b/docker_compose/docker-compose-dependencies.yaml deleted file mode 100644 index 73434af1c..000000000 --- a/docker_compose/docker-compose-dependencies.yaml +++ /dev/null @@ -1,40 +0,0 @@ -version: '3.8' -services: - snmp-mibserver: - image: ${MIBSERVER_IMAGE}:${MIBSERVER_TAG:-latest} - container_name: snmp-mibserver - environment: - - NGINX_ENTRYPOINT_QUIET_LOGS=${NGINX_ENTRYPOINT_QUIET_LOGS:-1} - volumes: - - snmp-mibserver-tmp:/tmp/ - depends_on: - - coredns - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} - - redis: - image: ${REDIS_IMAGE}:${REDIS_TAG:-latest} - container_name: redis - restart: always - environment: - - ALLOW_EMPTY_PASSWORD=yes - depends_on: - - coredns - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} - mongo: - image: ${MONGO_IMAGE}:${MONGO_TAG:-latest} - container_name: mongo - restart: always - depends_on: - - coredns - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} -volumes: - snmp-mibserver-tmp: null diff --git a/docker_compose/docker-compose-inventory.yaml b/docker_compose/docker-compose-inventory.yaml deleted file mode 100644 index 91cee276c..000000000 --- a/docker_compose/docker-compose-inventory.yaml +++ /dev/null @@ -1,36 +0,0 @@ -version: '3.8' -services: - inventory: - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - container_name: sc4snmp-inventory - command: ["inventory"] - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - - # Inventory configuration - - LOG_LEVEL=${INVENTORY_LOG_LEVEL:-INFO} - - CHAIN_OF_TASKS_EXPIRY_TIME=${CHAIN_OF_TASKS_EXPIRY_TIME:-500} - - CONFIG_FROM_MONGO=${CONFIG_FROM_MONGO:-false} - depends_on: - - redis - - mongo - - coredns - volumes: - - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - ${INVENTORY_FILE_ABSOLUTE_PATH}:/app/inventory/inventory.csv:ro - - inventory-pysnmp-cache-volume:/.pysnmp/:rw - - inventory-tmp:/tmp/:rw - restart: on-failure - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} -volumes: - inventory-tmp: null - inventory-pysnmp-cache-volume: null diff --git a/docker_compose/docker-compose-network.yaml b/docker_compose/docker-compose-network.yaml deleted file mode 100644 index f7fa80a7e..000000000 --- a/docker_compose/docker-compose-network.yaml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3.8' -networks: - sc4snmp_network: - name: sc4snmp_network - enable_ipv6: ${IPv6_ENABLED:-false} - ipam: - config: - - subnet: 172.28.0.0/16 - gateway: 172.28.0.1 - - subnet: fd02::/64 - gateway: fd02::1 \ No newline at end of file diff --git a/docker_compose/docker-compose-scheduler.yaml b/docker_compose/docker-compose-scheduler.yaml deleted file mode 100644 index f74c1e072..000000000 --- a/docker_compose/docker-compose-scheduler.yaml +++ /dev/null @@ -1,33 +0,0 @@ -version: '3.8' -services: - scheduler: - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - container_name: sc4snmp-scheduler - command: ["celery", "beat"] - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - - # Scheduler configuration - - LOG_LEVEL=${SCHEDULER_LOG_LEVEL:-INFO} - depends_on: - - redis - - mongo - - coredns - volumes: - - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - scheduler-pysnmp-cache-volume:/.pysnmp/:rw - - scheduler-tmp:/tmp/:rw - restart: on-failure - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} -volumes: - scheduler-tmp: null - scheduler-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/docker-compose-secrets.yaml b/docker_compose/docker-compose-secrets.yaml deleted file mode 100644 index c1dae5281..000000000 --- a/docker_compose/docker-compose-secrets.yaml +++ /dev/null @@ -1,2 +0,0 @@ -secrets: {} -version: '3.8' diff --git a/docker_compose/docker-compose-traps.yaml b/docker_compose/docker-compose-traps.yaml deleted file mode 100644 index 1abba1b02..000000000 --- a/docker_compose/docker-compose-traps.yaml +++ /dev/null @@ -1,51 +0,0 @@ -services: - traps: - command: - - trap - container_name: sc4snmp-traps - depends_on: - - redis - - mongo - - coredns - dns: - - ${COREDNS_ADDRESS} - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - - LOG_LEVEL=${TRAP_LOG_LEVEL:-INFO} - - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} - - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} - - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} - - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} - - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} - - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} - - SNMP_V3_SECURITY_ENGINE_ID=${SNMP_V3_SECURITY_ENGINE_ID:-80003a8c04} - - PYSNMP_DEBUG=${PYSNMP_DEBUG} - - IPv6_ENABLED=${IPv6_ENABLED:-false} - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - networks: - - sc4snmp_network - ports: - - mode: host - protocol: udp - published: ${TRAPS_PORT} - target: 2162 - - mode: host - protocol: udp - published: ${IPv6_TRAPS_PORT} - target: 2163 - restart: on-failure - secrets: [] - volumes: - - ${TRAPS_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - traps-pysnmp-cache-volume:/.pysnmp/:rw - - traps-tmp:/tmp/:rw -version: '3.8' -volumes: - traps-pysnmp-cache-volume: null - traps-tmp: null diff --git a/docker_compose/docker-compose-worker-poller.yaml b/docker_compose/docker-compose-worker-poller.yaml deleted file mode 100644 index 8f52118cf..000000000 --- a/docker_compose/docker-compose-worker-poller.yaml +++ /dev/null @@ -1,69 +0,0 @@ -services: - worker-poller: - command: - - celery - - worker-poller - depends_on: - - redis - - mongo - - coredns - dns: - - ${COREDNS_ADDRESS} - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} - - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} - - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} - - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} - - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} - - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} - - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} - - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} - - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} - - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} - - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} - - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} - - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} - - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} - - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} - - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} - - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} - - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} - - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} - - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} - - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} - - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} - - WORKER_CONCURRENCY=${WORKER_POLLER_CONCURRENCY:-2} - - PREFETCH_COUNT=${PREFETCH_POLLER_COUNT:-1} - - PYSNMP_DEBUG=${PYSNMP_DEBUG} - - IPv6_ENABLED=${IPv6_ENABLED:-false} - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - networks: - - sc4snmp_network - restart: on-failure - secrets: [] - volumes: - - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - worker-poller-pysnmp-cache-volume:/.pysnmp/:rw - - worker-poller-tmp:/tmp/:rw - deploy: - mode: replicated - replicas: ${WORKER_POLLER_REPLICAS:-2} - resources: - limits: - cpus: ${WORKER_POLLER_CPU_LIMIT:-0.50} - memory: ${WORKER_POLLER_MEMORY_LIMIT:-500M} - reservations: - cpus: ${WORKER_POLLER_CPU_RESERVATIONS:-0.25} - memory: ${WORKER_POLLER_MEMORY_RESERVATIONS:-250M} -version: '3.8' -volumes: - worker-poller-pysnmp-cache-volume: null - worker-poller-tmp: null diff --git a/docker_compose/docker-compose-worker-sender.yaml b/docker_compose/docker-compose-worker-sender.yaml deleted file mode 100644 index f0040e6ae..000000000 --- a/docker_compose/docker-compose-worker-sender.yaml +++ /dev/null @@ -1,70 +0,0 @@ -version: '3.8' -services: - worker-sender: - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - command: ["celery", "worker-sender"] - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - #- OTEL_METRICS_URL= #If sim enabled - - # Splunk instance configuration - - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} - - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} - - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} - - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} - - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} - - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} - - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} - - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} - - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} - - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} - - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} - - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} - - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} - - # Workers configuration - - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} - - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} - - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} - - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} - - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} - - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} - - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} - - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} - - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} - - WORKER_CONCURRENCY=${WORKER_SENDER_CONCURRENCY:-2} - - PREFETCH_COUNT=${PREFETCH_SENDER_COUNT:-1} - - PYSNMP_DEBUG=${PYSNMP_DEBUG} - depends_on: - - redis - - mongo - - coredns - volumes: - - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - worker-sender-pysnmp-cache-volume:/.pysnmp/:rw - - worker-sender-tmp:/tmp/:rw - restart: on-failure - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} - deploy: - mode: replicated - replicas: ${WORKER_SENDER_REPLICAS:-1} - resources: - limits: - cpus: ${WORKER_SENDER_CPU_LIMIT:-0.50} - memory: ${WORKER_SENDER_MEMORY_LIMIT:-500M} - reservations: - cpus: ${WORKER_SENDER_CPU_RESERVATIONS:-0.25} - memory: ${WORKER_SENDER_MEMORY_RESERVATIONS:-250M} -volumes: - worker-sender-tmp: null - worker-sender-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/docker-compose-worker-trap.yaml b/docker_compose/docker-compose-worker-trap.yaml deleted file mode 100644 index 89f61e54f..000000000 --- a/docker_compose/docker-compose-worker-trap.yaml +++ /dev/null @@ -1,74 +0,0 @@ -version: '3.8' -services: - worker-trap: - image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} - command: ["celery", "worker-trap"] - environment: - - CONFIG_PATH=/app/config/config.yaml - - REDIS_URL=redis://redis:6379/1 - - CELERY_BROKER_URL=redis://redis:6379/0 - - MONGO_URI=mongodb://mongo:27017/ - - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} - - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ - - MIB_INDEX=http://snmp-mibserver:8000/index.csv - - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt - #- OTEL_METRICS_URL= #If sim enabled - - # Splunk instance configuration - - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} - - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} - - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} - - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} - - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} - - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} - - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} - - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} - - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} - - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} - - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} - - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} - - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} - - # Workers configuration - - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} - - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} - - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} - - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} - - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} - - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} - - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} - - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} - - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} - - WORKER_CONCURRENCY=${WORKER_TRAP_CONCURRENCY:-2} - - PREFETCH_COUNT=${PREFETCH_TRAP_COUNT:-1} - - RESOLVE_TRAP_ADDRESS=${RESOLVE_TRAP_ADDRESS:-false} - - MAX_DNS_CACHE_SIZE_TRAPS=${MAX_DNS_CACHE_SIZE_TRAPS:-500} - - TTL_DNS_CACHE_TRAPS=${TTL_DNS_CACHE_TRAPS:-1800} - - PYSNMP_DEBUG=${PYSNMP_DEBUG} - - IPv6_ENABLED=${IPv6_ENABLED:-false} - depends_on: - - redis - - mongo - - coredns - volumes: - - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - - worker-trap-pysnmp-cache-volume:/.pysnmp/:rw - - worker-trap-tmp:/tmp/:rw - restart: on-failure - networks: - - sc4snmp_network - dns: - - ${COREDNS_ADDRESS} - deploy: - mode: replicated - replicas: ${WORKER_TRAP_REPLICAS:-2} - resources: - limits: - cpus: ${WORKER_TRAP_CPU_LIMIT:-0.50} - memory: ${WORKER_TRAP_MEMORY_LIMIT:-500M} - reservations: - cpus: ${WORKER_TRAP_CPU_RESERVATIONS:-0.25} - memory: ${WORKER_TRAP_MEMORY_RESERVATIONS:-250M} -volumes: - worker-trap-tmp: null - worker-trap-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/docker-compose.yaml b/docker_compose/docker-compose.yaml new file mode 100644 index 000000000..7fd14e719 --- /dev/null +++ b/docker_compose/docker-compose.yaml @@ -0,0 +1,248 @@ +x-general_sc4snmp_data: &general_sc4snmp_data + CONFIG_PATH: /app/config/config.yaml + REDIS_URL: redis://redis:6379/1 + CELERY_BROKER_URL: redis://redis:6379/0 + MONGO_URI: mongodb://mongo:27017/ + MIB_SOURCES: http://snmp-mibserver:8000/asn1/@mib@ + MIB_INDEX: http://snmp-mibserver:8000/index.csv + MIB_STANDARD: http://snmp-mibserver:8000/standard.txt + +x-splunk_general_setup: &splunk_general_setup + SPLUNK_HEC_HOST: ${SPLUNK_HEC_HOST} + SPLUNK_HEC_SCHEME: ${SPLUNK_HEC_PROTOCOL:-https} + SPLUNK_HEC_PORT: ${SPLUNK_HEC_PORT} + SPLUNK_HEC_TOKEN: ${SPLUNK_HEC_TOKEN} + SPLUNK_HEC_INSECURESSL: ${SPLUNK_HEC_INSECURESSL:-false} + SPLUNK_HEC_PATH: ${SPLUNK_HEC_PATH:-/services/collector} + +x-splunk_extended_setup: &splunk_extended_setup + SPLUNK_SOURCETYPE_TRAPS: ${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + SPLUNK_SOURCETYPE_POLLING_EVENTS: ${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + SPLUNK_SOURCETYPE_POLLING_METRICS: ${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + SPLUNK_HEC_INDEX_EVENTS: ${SPLUNK_HEC_INDEX_EVENTS:-netops} + SPLUNK_HEC_INDEX_METRICS: ${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + SPLUNK_AGGREGATE_TRAPS_EVENTS: ${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + +x-workers_general_setup: &workers_general_setup + SC4SNMP_VERSION: ${SC4SNMP_VERSION:-latest} + IGNORE_EMPTY_VARBINDS: ${IGNORE_EMPTY_VARBINDS:-false} + WALK_RETRY_MAX_INTERVAL: ${WALK_RETRY_MAX_INTERVAL:-180} + WALK_MAX_RETRIES: ${WALK_MAX_RETRIES:-5} + METRICS_INDEXING_ENABLED: ${METRICS_INDEXING_ENABLED:-false} + POLL_BASE_PROFILES: ${POLL_BASE_PROFILES:-true} + IGNORE_NOT_INCREASING_OIDS: ${IGNORE_NOT_INCREASING_OIDS:-} + LOG_LEVEL: ${WORKER_LOG_LEVEL:-INFO} + UDP_CONNECTION_TIMEOUT: ${UDP_CONNECTION_TIMEOUT:-3} + MAX_OID_TO_PROCESS: ${MAX_OID_TO_PROCESS:-70} + PROFILES_RELOAD_DELAY: ${PROFILES_RELOAD_DELAY:-60} + +x-ipv6: &ipv6 + IPv6_ENABLED: ${IPv6_ENABLED:-false} + +x-pysnmp_debug: &pysnmp_debug + PYSNMP_DEBUG: ${PYSNMP_DEBUG} + +x-dns_and_networks: &dns_and_networks + networks: + - sc4snmp_network + dns: + - ${COREDNS_ADDRESS} + +x-dependency_and_restart_policy: &dependency_and_restart_policy + depends_on: + - redis + - mongo + - coredns + restart: on-failure + +x-dependend_on_core_dns: &dependend_on_core_dns + depends_on: + - coredns + +secrets: {} + +networks: + sc4snmp_network: + name: sc4snmp_network + enable_ipv6: ${IPv6_ENABLED:-false} + ipam: + config: + - subnet: 172.28.0.0/16 + gateway: 172.28.0.1 + - subnet: fd02::/64 + gateway: fd02::1 + +services: + coredns: + image: ${COREDNS_IMAGE}:${COREDNS_TAG:-latest} + command: [-conf, /Corefile] + container_name: coredns + restart: on-failure + expose: + - '53' + - 53/udp + volumes: + - ${COREFILE_ABS_PATH}:/Corefile + networks: + sc4snmp_network: + ipv4_address: ${COREDNS_ADDRESS} + snmp-mibserver: + <<: [*dns_and_networks, *dependend_on_core_dns] + image: ${MIBSERVER_IMAGE}:${MIBSERVER_TAG:-latest} + container_name: snmp-mibserver + environment: + NGINX_ENTRYPOINT_QUIET_LOGS: ${NGINX_ENTRYPOINT_QUIET_LOGS:-1} + volumes: + - snmp-mibserver-tmp:/tmp/ + redis: + <<: [*dns_and_networks, *dependend_on_core_dns] + image: ${REDIS_IMAGE}:${REDIS_TAG:-latest} + container_name: redis + environment: + ALLOW_EMPTY_PASSWORD: yes + mongo: + <<: [*dns_and_networks, *dependend_on_core_dns] + image: ${MONGO_IMAGE}:${MONGO_TAG:-latest} + container_name: mongo + inventory: + <<: [*dns_and_networks, *dependency_and_restart_policy] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-inventory + command: [inventory] + environment: + <<: *general_sc4snmp_data + LOG_LEVEL: ${INVENTORY_LOG_LEVEL:-INFO} + CHAIN_OF_TASKS_EXPIRY_TIME: ${CHAIN_OF_TASKS_EXPIRY_TIME:-500} + CONFIG_FROM_MONGO: ${CONFIG_FROM_MONGO:-false} + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - ${INVENTORY_FILE_ABSOLUTE_PATH}:/app/inventory/inventory.csv:ro + - inventory-pysnmp-cache-volume:/.pysnmp/:rw + - inventory-tmp:/tmp/:rw + scheduler: + <<: [*dns_and_networks, *dependency_and_restart_policy] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-scheduler + command: [celery, beat] + environment: + <<: *general_sc4snmp_data + LOG_LEVEL: ${SCHEDULER_LOG_LEVEL:-INFO} + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - scheduler-pysnmp-cache-volume:/.pysnmp/:rw + - scheduler-tmp:/tmp/:rw + traps: + <<: [*dns_and_networks, *dependency_and_restart_policy] + command: + - trap + container_name: sc4snmp-traps + environment: + <<: [*general_sc4snmp_data, *splunk_general_setup, *pysnmp_debug, *ipv6] + LOG_LEVEL: ${TRAP_LOG_LEVEL:-INFO} + SNMP_V3_SECURITY_ENGINE_ID: ${SNMP_V3_SECURITY_ENGINE_ID:-80003a8c04} + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + ports: + - mode: host + protocol: udp + published: ${TRAPS_PORT} + target: 2162 + - mode: host + protocol: udp + published: ${IPv6_TRAPS_PORT} + target: 2163 + volumes: + - ${TRAPS_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - traps-pysnmp-cache-volume:/.pysnmp/:rw + - traps-tmp:/tmp/:rw + worker-poller: + <<: [*dns_and_networks, *dependency_and_restart_policy] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + command: + - celery + - worker-poller + deploy: + mode: replicated + replicas: ${WORKER_POLLER_REPLICAS:-2} + resources: + limits: + cpus: ${WORKER_POLLER_CPU_LIMIT:-0.50} + memory: ${WORKER_POLLER_MEMORY_LIMIT:-500M} + reservations: + cpus: ${WORKER_POLLER_CPU_RESERVATIONS:-0.25} + memory: ${WORKER_POLLER_MEMORY_RESERVATIONS:-250M} + environment: + <<: [*general_sc4snmp_data, *splunk_general_setup, *splunk_extended_setup, *workers_general_setup, + *pysnmp_debug, *ipv6] + WORKER_CONCURRENCY: ${WORKER_POLLER_CONCURRENCY:-2} + PREFETCH_COUNT: ${PREFETCH_POLLER_COUNT:-1} + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - worker-poller-pysnmp-cache-volume:/.pysnmp/:rw + - worker-poller-tmp:/tmp/:rw + worker-sender: + <<: [*dns_and_networks, *dependency_and_restart_policy] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + command: [celery, worker-sender] + environment: + <<: [*general_sc4snmp_data, *splunk_general_setup, *splunk_extended_setup, *workers_general_setup, + *pysnmp_debug] + # OTEL_METRICS_URL: #If sim enabled + # Workers configuration + WORKER_CONCURRENCY: ${WORKER_SENDER_CONCURRENCY:-2} + PREFETCH_COUNT: ${PREFETCH_SENDER_COUNT:-1} + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - worker-sender-pysnmp-cache-volume:/.pysnmp/:rw + - worker-sender-tmp:/tmp/:rw + deploy: + mode: replicated + replicas: ${WORKER_SENDER_REPLICAS:-1} + resources: + limits: + cpus: ${WORKER_SENDER_CPU_LIMIT:-0.50} + memory: ${WORKER_SENDER_MEMORY_LIMIT:-500M} + reservations: + cpus: ${WORKER_SENDER_CPU_RESERVATIONS:-0.25} + memory: ${WORKER_SENDER_MEMORY_RESERVATIONS:-250M} + worker-trap: + <<: [*dns_and_networks, *dependency_and_restart_policy] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + command: [celery, worker-trap] + environment: + <<: [*general_sc4snmp_data, *splunk_general_setup, *splunk_extended_setup, *workers_general_setup, + *pysnmp_debug, *ipv6] + # OTEL_METRICS_URL: #If sim enabled + # Workers configuration + WORKER_CONCURRENCY: ${WORKER_TRAP_CONCURRENCY:-2} + PREFETCH_COUNT: ${PREFETCH_TRAP_COUNT:-1} + RESOLVE_TRAP_ADDRESS: ${RESOLVE_TRAP_ADDRESS:-false} + MAX_DNS_CACHE_SIZE_TRAPS: ${MAX_DNS_CACHE_SIZE_TRAPS:-500} + TTL_DNS_CACHE_TRAPS: ${TTL_DNS_CACHE_TRAPS:-1800} + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - worker-trap-pysnmp-cache-volume:/.pysnmp/:rw + - worker-trap-tmp:/tmp/:rw + deploy: + mode: replicated + replicas: ${WORKER_TRAP_REPLICAS:-2} + resources: + limits: + cpus: ${WORKER_TRAP_CPU_LIMIT:-0.50} + memory: ${WORKER_TRAP_MEMORY_LIMIT:-500M} + reservations: + cpus: ${WORKER_TRAP_CPU_RESERVATIONS:-0.25} + memory: ${WORKER_TRAP_MEMORY_RESERVATIONS:-250M} +volumes: + snmp-mibserver-tmp: + inventory-tmp: + inventory-pysnmp-cache-volume: + scheduler-tmp: + scheduler-pysnmp-cache-volume: + traps-pysnmp-cache-volume: + traps-tmp: + worker-poller-pysnmp-cache-volume: + worker-poller-tmp: + worker-sender-tmp: + worker-sender-pysnmp-cache-volume: + worker-trap-tmp: + worker-trap-pysnmp-cache-volume: diff --git a/docker_compose/manage_logs.py b/docker_compose/manage_logs.py index a7c28f675..7fee46eb1 100644 --- a/docker_compose/manage_logs.py +++ b/docker_compose/manage_logs.py @@ -1,12 +1,10 @@ import argparse import os -import re from typing import Union import ruamel.yaml -DEPENDENCIES = ["snmp-mibserver", "redis", "mongo"] -DOCKER_COMPOSE_DEPENDENCIES = "docker-compose-dependencies.yaml" +DOCKER_COMPOSE = "docker-compose.yaml" def human_bool(flag: Union[str, bool], default: bool = False) -> bool: @@ -74,90 +72,35 @@ def load_template(environment: dict, service_name: str) -> dict: def create_logs(environment, path_to_compose_files): - files_list = os.listdir(path_to_compose_files) - compose_files = [ - f - for f in files_list - if re.match(r"docker-compose-(?!dependencies|network|secrets).*.yaml", f) - ] - - for filename in compose_files: - service_name = filename.removeprefix("docker-compose-").removesuffix(".yaml") - template_yaml = load_template(environment, service_name) - try: - yaml = ruamel.yaml.YAML() - with open(os.path.join(path_to_compose_files, filename)) as file: - yaml_file = yaml.load(file) - yaml_file["services"][service_name].update(template_yaml) - - with open(os.path.join(path_to_compose_files, filename), "w") as file: - yaml.dump(yaml_file, file) - except Exception as e: - print( - f"Problem with editing docker-compose-{service_name}.yaml. Error: {e}" - ) - try: - yaml2 = ruamel.yaml.YAML() - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES) - ) as file: - yaml_file = yaml2.load(file) + yaml = ruamel.yaml.YAML() + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE)) as file: + yaml_file = yaml.load(file) - for service_name in DEPENDENCIES: + for service_name in yaml_file["services"].keys(): template_yaml = load_template(environment, service_name) yaml_file["services"][service_name].update(template_yaml) - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES), "w" - ) as file: - yaml2.dump(yaml_file, file) + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE), "w") as file: + yaml.dump(yaml_file, file) except Exception as e: - print(f"Problem with editing docker-compose-dependencies.yaml. Error: {e}") + print(f"Problem with editing docker-compose.yaml. Error: {e}") def delete_logs(path_to_compose_files): - files_list = os.listdir(path_to_compose_files) - compose_files = [ - f - for f in files_list - if re.match(r"docker-compose-(?!dependencies|network|secrets).*.yaml", f) - ] - - for filename in compose_files: - service_name = filename.removeprefix("docker-compose-").removesuffix(".yaml") - try: - with open(os.path.join(path_to_compose_files, filename)) as file: - yaml = ruamel.yaml.YAML() - yaml_file = yaml.load(file) - - yaml_file["services"][service_name]["logging"]["driver"] = "json-file" - yaml_file["services"][service_name]["logging"].pop("options") - - with open(os.path.join(path_to_compose_files, filename), "w") as file: - yaml.dump(yaml_file, file) - except Exception as e: - print( - f"Problem with editing docker-compose-{service_name}.yaml. Error: {e}" - ) - try: - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES) - ) as file: - yaml2 = ruamel.yaml.YAML() - yaml_file = yaml2.load(file) + yaml = ruamel.yaml.YAML() + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE)) as file: + yaml_file = yaml.load(file) - for service_name in DEPENDENCIES: + for service_name in yaml_file["services"].keys(): yaml_file["services"][service_name]["logging"]["driver"] = "json-file" yaml_file["services"][service_name]["logging"].pop("options") - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES), "w" - ) as file: - yaml2.dump(yaml_file, file) + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE), "w") as file: + yaml.dump(yaml_file, file) except Exception as e: - print(f"Problem with editing docker-compose-dependencies.yaml. Error: {e}") + print(f"Problem with editing docker-compose.yaml. Error: {e}") def main(): diff --git a/docker_compose/manage_secrets.py b/docker_compose/manage_secrets.py index 89d1e274a..079d8890f 100644 --- a/docker_compose/manage_secrets.py +++ b/docker_compose/manage_secrets.py @@ -2,11 +2,10 @@ import os from typing import Union -import yaml +import ruamel.yaml -DOCKER_COMPOSE_SECRETS = "docker-compose-secrets.yaml" -DOCKER_COMPOSE_WORKER_POLLER = "docker-compose-worker-poller.yaml" -DOCKER_COMPOSE_TRAPS = "docker-compose-traps.yaml" +SERVICE_SECRETS = ["worker-poller", "traps"] +DOCKER_COMPOSE = "docker-compose.yaml" def human_bool(flag: Union[str, bool], default: bool = False) -> bool: @@ -80,125 +79,97 @@ def create_secrets( try: # Load docker-compose-secrets.yaml to a dictionary and update "secrets" section. If the same secret # has been already configured, stop processing further. - with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS)) as file: - secrets_file = yaml.load(file, Loader=yaml.FullLoader) - if secrets_file["secrets"] is None or "secrets" not in secrets_file: - secrets_file["secrets"] = {} + yaml = ruamel.yaml.YAML() + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE)) as file: + yaml_file = yaml.load(file) + if yaml_file["secrets"] is None or "secrets" not in yaml_file: + yaml_file["secrets"] = {} for new_secret in new_secrets: - if new_secret["secret_name"] in secrets_file["secrets"]: + if new_secret["secret_name"] in yaml_file["secrets"]: print(f"Secret {secret_name} already configured. New secret not added.") return - secrets_file["secrets"][new_secret["secret_name"]] = new_secret[ + yaml_file["secrets"][new_secret["secret_name"]] = new_secret[ "secret_config" ] - secrets_file_ready = True - except Exception: - print("Problem with editing docker-compose-secrets.yaml. Secret not added.") - secrets_file_ready = False + secrets_ready = True - if make_change_in_worker_poller: - worker_poller_file, worker_poller_file_ready = load_compose_worker_poller( - new_secrets_in_workers, path_to_compose_files - ) - else: - worker_poller_file = {} - worker_poller_file_ready = True + if make_change_in_worker_poller: + yaml_file, worker_poller_ready = load_compose_worker_poller( + new_secrets_in_workers, yaml_file + ) + else: + worker_poller_ready = True - if make_change_in_traps: - traps_file, traps_file_ready = load_compose_traps( - new_secrets_in_workers, path_to_compose_files + if make_change_in_traps: + yaml_file, traps_ready = load_compose_traps( + new_secrets_in_workers, yaml_file + ) + else: + traps_ready = True + + save_to_compose_files( + path_to_compose_files, + secret_name, + yaml_file, + secrets_ready, + traps_ready, + variables, + worker_poller_ready, ) - else: - traps_file = {} - traps_file_ready = True - - save_to_compose_files( - make_change_in_traps, - make_change_in_worker_poller, - path_to_compose_files, - secret_name, - secrets_file, - secrets_file_ready, - traps_file, - traps_file_ready, - variables, - worker_poller_file, - worker_poller_file_ready, - ) + except Exception as e: + print(f"Problem with adding secrets. Error: {e}") def save_to_compose_files( - make_change_in_traps, - make_change_in_worker_poller, path_to_compose_files, secret_name, - secrets_file, - secrets_file_ready, - traps_file, - traps_file_ready, + yaml_file, + secrets_ready, + traps_ready, variables, - worker_poller_file, - worker_poller_file_ready, + worker_poller_ready, ): - if secrets_file_ready and worker_poller_file_ready and traps_file_ready: + if secrets_ready and worker_poller_ready and traps_ready: # If all three files were loaded into dictionary and updated successfully, # save the latest configuration to files. - save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_SECRETS, secrets_file) with open(os.path.join(path_to_compose_files, ".env"), "a") as file: for k, v in variables.items(): if v: file.write(f"\n{secret_name}_{k}={v}") - if make_change_in_worker_poller: - save_to_yaml_file( - path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER, worker_poller_file - ) - if make_change_in_traps: - save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_TRAPS, traps_file) - -def save_to_yaml_file(file_path, file_name, file_content): - with open(os.path.join(file_path, file_name), "w") as file: - yaml.dump(file_content, file, default_flow_style=False) + yaml = ruamel.yaml.YAML() + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE), "w") as file: + yaml.dump(yaml_file, file) -def load_compose_traps(new_secrets_in_workers, path_to_compose_files): +def load_compose_traps(new_secrets_in_workers, yaml_file): # If the secret should be added to traps, load docker-compose-traps.yaml to a dictionary and # update "secrets" section. try: - with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS)) as file: - traps_file = yaml.load(file, Loader=yaml.FullLoader) - if "secrets" not in traps_file["services"]["traps"]: - traps_file["services"]["traps"]["secrets"] = [] - traps_file["services"]["traps"]["secrets"].extend(new_secrets_in_workers) - traps_file_ready = True - except Exception: - print("Problem with editing docker-compose-traps.yaml. Secret not added.") - traps_file = {} - traps_file_ready = False - return traps_file, traps_file_ready + if "secrets" not in yaml_file["services"]["traps"]: + yaml_file["services"]["traps"]["secrets"] = [] + yaml_file["services"]["traps"]["secrets"].extend(new_secrets_in_workers) + traps_ready = True + except Exception as e: + print(f"Problem with editing traps. Secret not added. Error {e}") + yaml_file = {} + traps_ready = False + return yaml_file, traps_ready -def load_compose_worker_poller(new_secrets_in_workers, path_to_compose_files): +def load_compose_worker_poller(new_secrets_in_workers, yaml_file): # If the secret should be added to worker poller, load docker-compose-worker-poller.yaml to a dictionary and # update "secrets" section. try: - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER) - ) as file: - worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) - if "secrets" not in worker_poller_file["services"]["worker-poller"]: - worker_poller_file["services"]["worker-poller"]["secrets"] = [] - worker_poller_file["services"]["worker-poller"]["secrets"].extend( - new_secrets_in_workers - ) - worker_poller_file_ready = True + if "secrets" not in yaml_file["services"]["worker-poller"]: + yaml_file["services"]["worker-poller"]["secrets"] = [] + yaml_file["services"]["worker-poller"]["secrets"].extend(new_secrets_in_workers) + worker_poller_ready = True except Exception: - print( - "Problem with editing docker-compose-worker-poller.yaml. Secret not added." - ) - worker_poller_file = {} - worker_poller_file_ready = False - return worker_poller_file, worker_poller_file_ready + print("Problem with editing worker-poller. Secret not added.") + yaml_file = {} + worker_poller_ready = False + return yaml_file, worker_poller_ready def store_secrets(secret_name, variables): @@ -242,45 +213,41 @@ def delete_secrets( for key in variables.keys(): secrets.append(f"{secret_name}_{key}") - secrets_file = load_compose_secrets(path_to_compose_files, secrets) + yaml = ruamel.yaml.YAML() + try: + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE)) as file: + yaml_file = yaml.load(file) - # Save the updated docker-compose-secrets.yaml configuration - save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_SECRETS, secrets_file) + yaml_file = load_compose_secrets(yaml_file, secrets) + # Save the updated docker-compose-secrets.yaml configuration - # Delete secrets from .env - delete_secrets_from_env(path_to_compose_files, secrets) + if make_change_in_worker_poller: + # filter out secrets destined for deletion - if make_change_in_worker_poller: - # Load docker-compose-worker-poller.yaml to dictionary and filter out secrets destined for deletion - with open( - os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER) - ) as file: - worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) - worker_poller_file["services"]["worker-poller"]["secrets"] = list( - filter( - lambda el: el["source"] not in secrets, - worker_poller_file["services"]["worker-poller"]["secrets"], + yaml_file["services"]["worker-poller"]["secrets"] = list( + filter( + lambda el: el["source"] not in secrets, + yaml_file["services"]["worker-poller"]["secrets"], + ) ) - ) - # Save updated docker-compose-worker-poller.yaml configuration - save_to_yaml_file( - path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER, worker_poller_file - ) - - if make_change_in_traps: - # Load docker-compose-traps.yaml to dictionary and filter out secrets destined for deletion - with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS)) as file: - traps_file = yaml.load(file, Loader=yaml.FullLoader) - traps_file["services"]["traps"]["secrets"] = list( - filter( - lambda el: el["source"] not in secrets, - traps_file["services"]["traps"]["secrets"], + if make_change_in_traps: + # Load docker-compose-traps.yaml to dictionary and filter out secrets destined for deletion + yaml_file["services"]["traps"]["secrets"] = list( + filter( + lambda el: el["source"] not in secrets, + yaml_file["services"]["traps"]["secrets"], + ) ) - ) - # Save updated docker-compose-traps.yaml configuration - save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_TRAPS, traps_file) + except Exception as e: + print(f"Problem with editing secrets section. Secret not added. Error: {e}") + + with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE), "w") as file: + yaml.dump(yaml_file, file) + + # Delete secrets from .env + delete_secrets_from_env(path_to_compose_files, secrets) def delete_secrets_from_env(path_to_compose_files, secrets): @@ -307,14 +274,12 @@ def delete_secrets_from_env(path_to_compose_files, secrets): print(f"Error: {e}") -def load_compose_secrets(path_to_compose_files, secrets): +def load_compose_secrets(yaml_file, secrets): # Load docker-compose-secrets.yaml file to a dictionary and delete desired secrets - with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS)) as file: - secrets_file = yaml.load(file, Loader=yaml.FullLoader) for secret in secrets: - if secret in secrets_file["secrets"]: - del secrets_file["secrets"][secret] - return secrets_file + if secret in yaml_file["secrets"]: + del yaml_file["secrets"][secret] + return yaml_file def main(): diff --git a/docs/dockercompose/10-enable-ipv6.md b/docs/dockercompose/10-enable-ipv6.md index 93e9f305a..82ff73301 100644 --- a/docs/dockercompose/10-enable-ipv6.md +++ b/docs/dockercompose/10-enable-ipv6.md @@ -10,7 +10,7 @@ To avoid any problem with configuring the network, it is recommended to use the To enable IPv6 for SC4SNMP, set `IPv6_ENABLED` variable to `true` in `.env` file. The default subnet used for SC4SNMP network in docker is `fd02::/64`, this and other network configuration can be -changed in the `docker-compose-network.yml` file. +changed in the `docker-compose.yaml` file in `networks` section. Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `IPv6_TRAPS_PORT` parameter in `.env` file. The IPv6 port and IPv4 port cannot be the same. diff --git a/docs/dockercompose/2-download-package.md b/docs/dockercompose/2-download-package.md index cd23b953a..6a00101aa 100644 --- a/docs/dockercompose/2-download-package.md +++ b/docs/dockercompose/2-download-package.md @@ -13,9 +13,13 @@ After configuration, application can be deployed by running the following command inside the `docker_compose` directory: ```shell -sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +sudo docker compose up -d ``` +!!! info + The installation process changed from version **1.12.1**. For lower version refer to the corresponding + documentation. + The same command can be run to apply any updated configuration changes. ## Uninstall the app @@ -23,5 +27,5 @@ The same command can be run to apply any updated configuration changes. To uninstall the app, run the following command inside the `docker_compose` directory: ```shell -sudo docker compose $(find docker* | sed -e 's/^/-f /') down +sudo docker compose down ``` \ No newline at end of file diff --git a/docs/dockercompose/5-traps-configuration.md b/docs/dockercompose/5-traps-configuration.md index 9ec5edf14..e601c44e0 100644 --- a/docs/dockercompose/5-traps-configuration.md +++ b/docs/dockercompose/5-traps-configuration.md @@ -1,4 +1,3 @@ -# Traps configuration Scheduler configuration is stored in the `traps-config.yaml` file. This file has the following sections: diff --git a/docs/dockercompose/7-snmpv3-secrets.md b/docs/dockercompose/7-snmpv3-secrets.md index 714f8ea39..e29e8ed65 100644 --- a/docs/dockercompose/7-snmpv3-secrets.md +++ b/docs/dockercompose/7-snmpv3-secrets.md @@ -3,6 +3,13 @@ Creating a secret requires updating configuration of several docker compose files. To simplify this process, inside the `docker_compose` package there is a `manage_secrets.py` file which will automatically manage secrets. +## Prerequisites + +Running script requires installation of `ruamel.yaml` package for python. It can be done with command: +``` +pip3 install ruamel.yaml +``` + ## Creating a new secret To create a new secret, `manage_secrets.py` must be run with the following flags: @@ -23,7 +30,7 @@ To create a new secret, `manage_secrets.py` must be run with the following flags This script, apart from updating configuration files, creates environmental variables with values of the secret at the end of the `.env` file in the `docker_compose` directory. To apply those secrets run the -`sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d` command inside the `docker_compose` directory. After execution of the command, plain text secrets +`sudo docker compose up -d` command inside the `docker_compose` directory. After execution of the command, plain text secrets from the `.env` file can be deleted. > **_NOTE:_** In case of any changes in `.env`, the secrets must be recreated by [deleting](#deleting-a-secret) any > previously existing secrets and creating them once again. Changes in `.env` include creating new secrets. @@ -43,7 +50,7 @@ python3 --path_to_compose \ Inside `docker_compose` directory run: ```shell -sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +sudo docker compose up -d ``` Now, the following lines from the `.env` can be deleted: diff --git a/docs/dockercompose/9-splunk-logging.md b/docs/dockercompose/9-splunk-logging.md index 7de23c2e3..05fc2e461 100644 --- a/docs/dockercompose/9-splunk-logging.md +++ b/docs/dockercompose/9-splunk-logging.md @@ -37,7 +37,7 @@ python3 manage_logs.py --path_to_compose /home/ubuntu/docker_compose --enable_lo The script will add required configuration for logging under services in docker compose files. To apply the changes run the: ``` -sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +sudo docker compose up -d ``` command inside the `docker_compose` directory. @@ -59,7 +59,7 @@ python3 manage_logs.py --path_to_compose /home/ubuntu/docker_compose --disable_l To apply the changes run the: ``` -sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +sudo docker compose up -d ``` command inside the `docker_compose` directory. diff --git a/docs/microk8s/configuration/trap-configuration.md b/docs/microk8s/configuration/trap-configuration.md index e12a20879..cc36c28e6 100644 --- a/docs/microk8s/configuration/trap-configuration.md +++ b/docs/microk8s/configuration/trap-configuration.md @@ -85,6 +85,19 @@ The following is an example of an SNMPv3 trap: snmptrap -v3 -e 80003a8c04 -l authPriv -u snmp-poller -a SHA -A PASSWORD1 -x AES -X PASSWORD1 10.202.13.233 '' 1.3.6.1.2.1.2.2.1.1.1 ``` +### Updating trap configuration +If you need to update part of the traps configuration, you can do it by editing the `values.yaml` and then running the following command to restart the pod deployment: +``` +microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-trap -n sc4snmp +``` + +!!! info + The name of the deployment can differ based on the helm installation name. + This can be checked with the following command: + ``` + microk8s kubectl get deployments -n sc4snmp + ``` + ### Define external gateway for traps If you use SC4SNMP on a single machine, configure `loadBalancerIP`. @@ -150,17 +163,4 @@ In case you want to see traps events collected as one event inside Splunk, you c traps: aggregateTrapsEvents: "true" ``` -After that run the upgrade command. - -### Updating trap configuration -If you need to update part of the traps configuration that changes the configmap, you can do it by editing the `values. -yaml` and then running the following command to restart the pod deployment: -``` -microk8s kubectl rollout restart deployment snmp-splunk-connect-for-snmp-trap -n sc4snmp -``` - -!!! info - The name of the deployment can differ based on the helm installation name. This can be checked with the following command: - ``` - microk8s kubectl get deployments -n sc4snmp - ``` +Then the upgrade command can be executed. \ No newline at end of file diff --git a/integration_tests/automatic_setup_compose.sh b/integration_tests/automatic_setup_compose.sh index 0bc47fcfc..54463c5fe 100755 --- a/integration_tests/automatic_setup_compose.sh +++ b/integration_tests/automatic_setup_compose.sh @@ -182,6 +182,7 @@ awk -v scheduler_path="$SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH" \ mv "$TEMP_ENV_FILE" .env # Create snmpv3 secret +python3 -m pip install ruamel.yaml python3 $(realpath "manage_secrets.py") --path_to_compose $(pwd) \ --secret_name sv3poller \ --userName r-wuser \ @@ -201,9 +202,8 @@ sudo docker run -d -p 1164:161/udp tandrup/snmpsim sudo docker run -d -p 1165:161/udp tandrup/snmpsim sudo docker run -d -p 1166:161/udp -v $(pwd)/snmpsim/data:/usr/local/snmpsim/data -e EXTRA_FLAGS="--variation-modules-dir=/usr/local/snmpsim/variation --data-dir=/usr/local/snmpsim/data" tandrup/snmpsim - echo $(green "Running up Docker Compose environment") -sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +sudo docker compose up -d wait_for_containers_to_be_up sudo docker ps diff --git a/integration_tests/splunk_test_utils.py b/integration_tests/splunk_test_utils.py index f48f1efac..4705538cc 100644 --- a/integration_tests/splunk_test_utils.py +++ b/integration_tests/splunk_test_utils.py @@ -137,7 +137,7 @@ def update_traps_secrets_compose(secrets): def upgrade_docker_compose(): - os.system("sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d") + os.system("sudo docker compose up -d") def create_v3_secrets_compose( From 5537f0283a99dff27e87b65c4aad49e477658dd9 Mon Sep 17 00:00:00 2001 From: srv-rr-github-token <94607705+srv-rr-github-token@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:01:13 +0000 Subject: [PATCH 07/10] chore(release): 1.12.1-beta.2 ## [1.12.1-beta.2](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.12.1-beta.1...v1.12.1-beta.2) (2024-10-29) ### Bug Fixes * refactor docker compose files ([#1105](https://github.com/splunk/splunk-connect-for-snmp/issues/1105)) ([feab1e2](https://github.com/splunk/splunk-connect-for-snmp/commit/feab1e201e0ca38d46be6585050c5f8a3d746fba)) --- charts/splunk-connect-for-snmp/Chart.yaml | 4 ++-- docker_compose/.env | 4 ++-- pyproject.toml | 2 +- splunk_connect_for_snmp/__init__.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index 1d02088c3..d94f0c719 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,12 +14,12 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.12.1-beta.1 +version: 1.12.1-beta.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.12.1-beta.1" +appVersion: "1.12.1-beta.2" # dependencies: - name: mongodb diff --git a/docker_compose/.env b/docker_compose/.env index 8a177ec38..a37e6c01e 100644 --- a/docker_compose/.env +++ b/docker_compose/.env @@ -1,12 +1,12 @@ # Deployment configuration SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container -SC4SNMP_TAG="1.12.1-beta.1" +SC4SNMP_TAG="1.12.1-beta.2" SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= COREDNS_ADDRESS=172.28.0.255 -SC4SNMP_VERSION="1.12.1-beta.1" +SC4SNMP_VERSION="1.12.1-beta.2" IPv6_ENABLED=false # Dependencies images diff --git a/pyproject.toml b/pyproject.toml index 7d37a98bd..08dbca53f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.12.1-beta.1" +version = "1.12.1-beta.2" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index 97ee6073c..e803d46da 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.12.1-beta.1" +__version__ = "1.12.1-beta.2" From 8209ba819c8cf9bcecf30a91f3c1b194f87c486c Mon Sep 17 00:00:00 2001 From: ajasnosz <139114006+ajasnosz@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:14:42 +0100 Subject: [PATCH 08/10] fix: add docker dns for ipv6 (#1115) --- docker_compose/.env | 1 + docker_compose/Corefile | 2 +- docker_compose/docker-compose.yaml | 2 ++ docs/dockercompose/6-env-file-configuration.md | 1 + .../microk8s/configuration/poller-configuration.md | 3 +++ integration_tests/.env | 6 ++++-- integration_tests/automatic_setup_compose.sh | 1 + splunk_connect_for_snmp/common/inventory_record.py | 6 +++--- splunk_connect_for_snmp/snmp/auth.py | 14 +++++++++++++- 9 files changed, 29 insertions(+), 7 deletions(-) diff --git a/docker_compose/.env b/docker_compose/.env index a37e6c01e..4ac820460 100644 --- a/docker_compose/.env +++ b/docker_compose/.env @@ -6,6 +6,7 @@ TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= COREDNS_ADDRESS=172.28.0.255 +COREDNS_ADDRESS_IPv6=fd02:0:0:0:7fff:ffff:ffff:ffff SC4SNMP_VERSION="1.12.1-beta.2" IPv6_ENABLED=false diff --git a/docker_compose/Corefile b/docker_compose/Corefile index 7ea43e1b2..23823b42d 100644 --- a/docker_compose/Corefile +++ b/docker_compose/Corefile @@ -3,5 +3,5 @@ errors auto reload - forward . 8.8.8.8 + forward . 8.8.8.8 2001:4860:4860::8888 } \ No newline at end of file diff --git a/docker_compose/docker-compose.yaml b/docker_compose/docker-compose.yaml index 7fd14e719..4b9727134 100644 --- a/docker_compose/docker-compose.yaml +++ b/docker_compose/docker-compose.yaml @@ -47,6 +47,7 @@ x-dns_and_networks: &dns_and_networks - sc4snmp_network dns: - ${COREDNS_ADDRESS} + - ${COREDNS_ADDRESS_IPv6} x-dependency_and_restart_policy: &dependency_and_restart_policy depends_on: @@ -86,6 +87,7 @@ services: networks: sc4snmp_network: ipv4_address: ${COREDNS_ADDRESS} + ipv6_address: ${COREDNS_ADDRESS_IPv6} snmp-mibserver: <<: [*dns_and_networks, *dependend_on_core_dns] image: ${MIBSERVER_IMAGE}:${MIBSERVER_TAG:-latest} diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md index 9921db035..6dd5d3d76 100644 --- a/docs/dockercompose/6-env-file-configuration.md +++ b/docs/dockercompose/6-env-file-configuration.md @@ -13,6 +13,7 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `INVENTORY_FILE_ABSOLUTE_PATH` | Absolute path to [inventory.csv](./3-inventory-configuration.md) file | | `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` | | `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Should not be changed | +| `COREDNS_ADDRESS_IPv6` | IPv6 address of the coredns inside docker network. Should not be changed | | `SC4SNMP_VERSION` | Version of SC4SNMP | | `IPv6_ENABLED` | Enable receiving traps and polling from IPv6 devices | diff --git a/docs/microk8s/configuration/poller-configuration.md b/docs/microk8s/configuration/poller-configuration.md index 376d191ba..bd98047c6 100644 --- a/docs/microk8s/configuration/poller-configuration.md +++ b/docs/microk8s/configuration/poller-configuration.md @@ -31,6 +31,9 @@ poller: !!! info The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. +### IPv6 hostname resolution +When IPv6 is enabled and device is dual stack, the hostname resolution will try to resolve the name to the IPv6 address first, then to the IPv4 address. + ### Define log level The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` or `FATAL`. The default value is `INFO`. diff --git a/integration_tests/.env b/integration_tests/.env index 14fe6d9b5..8cabcf4d9 100644 --- a/integration_tests/.env +++ b/integration_tests/.env @@ -6,8 +6,9 @@ TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= COREDNS_ADDRESS=172.28.0.255 -SC4SNMP_VERSION="1.11.0-beta.9" - +COREDNS_ADDRESS_IPv6=fd02:0:0:0:7fff:ffff:ffff:ffff +SC4SNMP_VERSION=latest +IPv6_ENABLED=false # Dependencies images COREDNS_IMAGE=coredns/coredns @@ -81,6 +82,7 @@ CHAIN_OF_TASKS_EXPIRY_TIME=500 # Traps configuration SNMP_V3_SECURITY_ENGINE_ID=80003a8c04 TRAPS_PORT=162 +IPv6_TRAPS_PORT=2163 TRAP_LOG_LEVEL=INFO # Scheduler configuration diff --git a/integration_tests/automatic_setup_compose.sh b/integration_tests/automatic_setup_compose.sh index 54463c5fe..6d6831630 100755 --- a/integration_tests/automatic_setup_compose.sh +++ b/integration_tests/automatic_setup_compose.sh @@ -48,6 +48,7 @@ deploy_poetry() { } wait_for_containers_to_be_up() { + echo $(sudo docker ps) while true; do CONTAINERS_SC4SNMP=$(sudo docker ps | grep "sc4snmp\|worker-poller\|worker-sender\|worker-trap" | grep -v "Name" | wc -l) if [ "$CONTAINERS_SC4SNMP" -gt 0 ]; then diff --git a/splunk_connect_for_snmp/common/inventory_record.py b/splunk_connect_for_snmp/common/inventory_record.py index 41bcdc47c..3757da153 100644 --- a/splunk_connect_for_snmp/common/inventory_record.py +++ b/splunk_connect_for_snmp/common/inventory_record.py @@ -33,8 +33,8 @@ class InventoryRecord(BaseModel): - address: InventoryStr port: InventoryInt = 161 + address: InventoryStr version: InventoryStr community: InventoryStr secret: InventoryStr @@ -53,7 +53,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @validator("address", pre=True) - def address_validator(cls, value): + def address_validator(cls, value, values): if value is None: raise ValueError("field address cannot be null") if value.startswith("#"): @@ -63,7 +63,7 @@ def address_validator(cls, value): ip_address(value) except ValueError: try: - socket.gethostbyname_ex(value) + socket.getaddrinfo(value, values["port"]) except socket.gaierror: raise ValueError( f"field address must be an IP or a resolvable hostname {value}" diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py index bb762d4fb..96ee4d7f9 100644 --- a/splunk_connect_for_snmp/snmp/auth.py +++ b/splunk_connect_for_snmp/snmp/auth.py @@ -14,6 +14,8 @@ # limitations under the License. # import os +import socket +from ipaddress import ip_address from typing import Any, Dict, Union from pysnmp.hlapi import ( @@ -28,11 +30,13 @@ from pysnmp.proto.api.v2c import OctetString from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType +from splunk_connect_for_snmp.common.hummanbool import human_bool from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.snmp.const import AuthProtocolMap, PrivProtocolMap from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError UDP_CONNECTION_TIMEOUT = int(os.getenv("UDP_CONNECTION_TIMEOUT", 1)) +IPv6_ENABLED = human_bool(os.getenv("IPv6_ENABLED", False)) def get_secret_value( @@ -87,7 +91,8 @@ def get_security_engine_id(logger, ir: InventoryRecord, snmp_engine: SnmpEngine) def setup_transport_target(ir): - if ":" in ir.address: + ip = get_ip_from_socket(ir) if IPv6_ENABLED else ir.address + if ip_address(ip).version == 6: transport = Udp6TransportTarget( (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT ) @@ -98,6 +103,13 @@ def setup_transport_target(ir): return transport +def get_ip_from_socket(ir): + # Example of response from getaddrinfo + # [(< AddressFamily.AF_INET6: 10 >, < SocketKind.SOCK_STREAM: 1 >, 6, '', ('2607:f8b0:4004:c09::64', 161, 0, 0)), + # (< AddressFamily.AF_INET: 2 >, < SocketKind.SOCK_STREAM: 1 >, 6, '', ('142.251.16.139', 161))] + return socket.getaddrinfo(ir.address, ir.port)[0][4][0] + + def fetch_security_engine_id(observer_context, error_indication, ipaddress): if "securityEngineId" in observer_context: return observer_context["securityEngineId"] From 3de43cfdf4b6fd3fa541a107fa9fb25e81a45f77 Mon Sep 17 00:00:00 2001 From: srv-rr-github-token <94607705+srv-rr-github-token@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:19:50 +0000 Subject: [PATCH 09/10] chore(release): 1.12.1-beta.3 ## [1.12.1-beta.3](https://github.com/splunk/splunk-connect-for-snmp/compare/v1.12.1-beta.2...v1.12.1-beta.3) (2024-11-05) ### Bug Fixes * add docker dns for ipv6 ([#1115](https://github.com/splunk/splunk-connect-for-snmp/issues/1115)) ([8209ba8](https://github.com/splunk/splunk-connect-for-snmp/commit/8209ba819c8cf9bcecf30a91f3c1b194f87c486c)) --- charts/splunk-connect-for-snmp/Chart.yaml | 4 ++-- docker_compose/.env | 4 ++-- pyproject.toml | 2 +- splunk_connect_for_snmp/__init__.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index d94f0c719..684339989 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,12 +14,12 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.12.1-beta.2 +version: 1.12.1-beta.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.12.1-beta.2" +appVersion: "1.12.1-beta.3" # dependencies: - name: mongodb diff --git a/docker_compose/.env b/docker_compose/.env index 4ac820460..e8b2261c3 100644 --- a/docker_compose/.env +++ b/docker_compose/.env @@ -1,13 +1,13 @@ # Deployment configuration SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container -SC4SNMP_TAG="1.12.1-beta.2" +SC4SNMP_TAG="1.12.1-beta.3" SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= COREDNS_ADDRESS=172.28.0.255 COREDNS_ADDRESS_IPv6=fd02:0:0:0:7fff:ffff:ffff:ffff -SC4SNMP_VERSION="1.12.1-beta.2" +SC4SNMP_VERSION="1.12.1-beta.3" IPv6_ENABLED=false # Dependencies images diff --git a/pyproject.toml b/pyproject.toml index 08dbca53f..77865a8be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.12.1-beta.2" +version = "1.12.1-beta.3" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index e803d46da..cb432b42c 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.12.1-beta.2" +__version__ = "1.12.1-beta.3" From bbd75fa904126a831aca141f72c2e1f353d58bb6 Mon Sep 17 00:00:00 2001 From: ajasnosz <139114006+ajasnosz@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:49:59 +0100 Subject: [PATCH 10/10] chore: update version (#1117) --- CHANGELOG.md | 7 +++++++ integration_tests/automatic_setup_compose.sh | 1 - 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7abd495ad..316333459 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,18 @@ ## Unreleased +### Changed + +### Fixed + +## [1.12.1] + ### Changed - general refactor of documentation - merge docker compose files into one ### Fixed +- dns resolution for ipv6 ## [1.12.0] diff --git a/integration_tests/automatic_setup_compose.sh b/integration_tests/automatic_setup_compose.sh index 6d6831630..54463c5fe 100755 --- a/integration_tests/automatic_setup_compose.sh +++ b/integration_tests/automatic_setup_compose.sh @@ -48,7 +48,6 @@ deploy_poetry() { } wait_for_containers_to_be_up() { - echo $(sudo docker ps) while true; do CONTAINERS_SC4SNMP=$(sudo docker ps | grep "sc4snmp\|worker-poller\|worker-sender\|worker-trap" | grep -v "Name" | wc -l) if [ "$CONTAINERS_SC4SNMP" -gt 0 ]; then