From 8fffe514fbbd30f8303e45c3ceb3892caebc40fc Mon Sep 17 00:00:00 2001 From: Sergey Petrov Date: Fri, 23 Feb 2024 16:45:06 -0800 Subject: [PATCH] chore: release v1.35.0 --- .gitlab-ci.yml | 56 +- CHANGELOG.md | 9 + SUPPORT.md | 1 + contributing/README.md | 2 +- contributing/docs_release.md | 51 + contributing/process_release.md | 7 +- docs/avr.rst | 9 + docs/index.html | 4 - docs/logging.rst | 37 +- docs/memory-monitor.rst | 289 +++ docs/pull-consumers.rst | 2 + docs/revision-history.rst | 12 +- docs/setting-up-consumer.rst | 16 + docs/telemetry-system.rst | 3 +- docs/troubleshooting.rst | 107 +- docs/using-ts.rst | 1 + docs/validate.rst | 2 +- examples/declarations/all_properties.json | 14 +- .../consumers/default_consumer.json | 15 + package-lock.json | 4 +- package.json | 2 +- scripts/build/buildRpm.sh | 5 +- src/lib/constants.js | 55 +- src/lib/dataPipeline.js | 101 +- src/lib/eventListener/index.js | 59 + src/lib/eventListener/networkService.js | 149 +- src/lib/eventListener/parser.js | 468 ++++- src/lib/eventListener/streamService.js | 5 +- src/lib/logger.js | 17 + src/lib/resourceMonitor/index.js | 457 +++++ src/lib/resourceMonitor/memoryMonitor.js | 438 +++++ src/lib/resourceMonitor/utils.js | 110 ++ src/lib/runtimeConfig/index.js | 256 +++ src/lib/runtimeConfig/task.js | 440 +++++ src/lib/runtimeConfig/updater.js | 432 +++++ src/lib/systemPoller.js | 85 +- src/lib/utils/device.js | 52 +- src/lib/utils/misc.js | 32 +- src/lib/utils/monitor.js | 198 -- src/lib/utils/structures/circularArray.js | 527 ++++- .../utils/structures/circularLinkedList.js | 398 +++- src/lib/utils/structures/index.js | 6 +- src/nodejs/restWorker.js | 37 +- src/schema/1.35.0/actions_schema.json | 187 ++ src/schema/1.35.0/base_schema.json | 310 +++ src/schema/1.35.0/consumer_schema.json | 1490 ++++++++++++++ src/schema/1.35.0/controls_schema.json | 166 ++ src/schema/1.35.0/endpoints_schema.json | 190 ++ src/schema/1.35.0/ihealth_poller_schema.json | 238 +++ src/schema/1.35.0/listener_schema.json | 85 + src/schema/1.35.0/namespace_schema.json | 92 + src/schema/1.35.0/pull_consumer_schema.json | 101 + src/schema/1.35.0/shared_schema.json | 50 + src/schema/1.35.0/system_poller_schema.json | 242 +++ src/schema/1.35.0/system_schema.json | 121 ++ src/schema/latest/base_schema.json | 4 +- src/schema/latest/controls_schema.json | 97 +- test/unit/constantsTests.js | 51 +- test/unit/dataPipelineTests.js | 113 +- test/unit/declaration/classControlsTests.js | 347 ++-- .../eventListener/data/parserTestsData.js | 732 ++++++- test/unit/eventListener/messageStreamTests.js | 538 ----- .../unit/eventListener/networkServiceTests.js | 9 +- test/unit/eventListener/parserTests.js | 468 ++++- test/unit/eventListener/streamTests.js | 174 +- test/unit/loggerTests.js | 2 +- .../resourceMonitor/memoryMonitorTests.js | 594 ++++++ .../resourceMonitor/resourceMonitorTests.js | 1448 ++++++++++++++ test/unit/resourceMonitor/utilsTests.js | 119 ++ test/unit/runtimeConfig/bigstart_restnode | 7 + test/unit/runtimeConfig/runtimeConfigTests.js | 1168 +++++++++++ test/unit/runtimeConfig/updaterTests.js | 837 ++++++++ test/unit/shared/stubs.js | 122 +- test/unit/systemPollerTests.js | 74 +- test/unit/utils/deviceTests.js | 72 - test/unit/utils/miscTests.js | 29 +- test/unit/utils/monitorTests.js | 293 --- .../utils/structures/circularArrayTests.js | 1725 ++++++++++++----- .../structures/circularLinkedListTests.js | 1239 +++++++++--- versions.json | 2 +- 80 files changed, 15835 insertions(+), 2671 deletions(-) create mode 100644 contributing/docs_release.md create mode 100644 docs/memory-monitor.rst create mode 100644 examples/declarations/consumers/default_consumer.json create mode 100644 src/lib/resourceMonitor/index.js create mode 100644 src/lib/resourceMonitor/memoryMonitor.js create mode 100644 src/lib/resourceMonitor/utils.js create mode 100644 src/lib/runtimeConfig/index.js create mode 100644 src/lib/runtimeConfig/task.js create mode 100644 src/lib/runtimeConfig/updater.js delete mode 100644 src/lib/utils/monitor.js create mode 100644 src/schema/1.35.0/actions_schema.json create mode 100644 src/schema/1.35.0/base_schema.json create mode 100644 src/schema/1.35.0/consumer_schema.json create mode 100644 src/schema/1.35.0/controls_schema.json create mode 100644 src/schema/1.35.0/endpoints_schema.json create mode 100644 src/schema/1.35.0/ihealth_poller_schema.json create mode 100644 src/schema/1.35.0/listener_schema.json create mode 100644 src/schema/1.35.0/namespace_schema.json create mode 100644 src/schema/1.35.0/pull_consumer_schema.json create mode 100644 src/schema/1.35.0/shared_schema.json create mode 100644 src/schema/1.35.0/system_poller_schema.json create mode 100644 src/schema/1.35.0/system_schema.json delete mode 100644 test/unit/eventListener/messageStreamTests.js create mode 100644 test/unit/resourceMonitor/memoryMonitorTests.js create mode 100644 test/unit/resourceMonitor/resourceMonitorTests.js create mode 100644 test/unit/resourceMonitor/utilsTests.js create mode 100644 test/unit/runtimeConfig/bigstart_restnode create mode 100644 test/unit/runtimeConfig/runtimeConfigTests.js create mode 100644 test/unit/runtimeConfig/updaterTests.js delete mode 100644 test/unit/utils/monitorTests.js diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b0bd388d..f85ba514 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,6 +19,14 @@ include: - project: automation-toolchain/atg-shared-templates file: security/base.gitlab-ci.yml +variables: + FORCE_DOCS_STAGGING: + value: "false" + options: + - "false" + - "true" + description: "Force docs to be build and published to GitLab on non-docs branches" + ############################################################## # # @@ -26,6 +34,16 @@ include: # # ############################################################## +.rules_config: + docs_only: + rules: + - if: $CI_COMMIT_BRANCH =~ /^docs/ + - if: $FORCE_DOCS_STAGGING == "true" + not_docs: + rules: + - if: $CI_COMMIT_BRANCH !~ /^docs/ + + .run_unittest_cmd: &run_unittest_cmd - npm run test-only @@ -51,6 +69,9 @@ include: .job_definition: &job_definition tags: - docker-executor + rules: + - !reference [.rules_config, not_docs, rules] + .test_job_definition: &test_job_definition extends: @@ -228,6 +249,8 @@ coverage: build_rpm: image: ${ATG_ARTIFACTORY_PUBLISH_URL}/${ATG_ARTIFACTORY_DOCKER_REPO}/f5-telemetry-streaming-rpm-builder-image:v1.2 + extends: + - .job_definition stage: build script: - npm run build @@ -242,6 +265,7 @@ build_rpm: build_docs: image: ${CONTAINTHEDOCS_IMAGE} stage: build + needs: [] script: # create 'dist' folder because it probably not exists yet - mkdir -p dist @@ -249,7 +273,7 @@ build_docs: # - node scripts/schema-check.js - node scripts/schema-to-rst.js - node scripts/poller-default-output-doc-gen.js - - if [ "$CI_COMMIT_REF_NAME" = "docs-staging" ] || [ "$CI_COMMIT_REF_NAME" = "docs-latest" ]; then + - if [ "$CI_COMMIT_REF_NAME" = "docs-latest" ]; then - rm -rf docs/_templates - fi - make html @@ -262,6 +286,8 @@ build_docs: paths: - docs/_build/html expire_in: 1 month + rules: + - !reference [.rules_config, docs_only, rules] ############################################################## # BEGIN VIO # @@ -420,6 +446,7 @@ teardown_env_aws: pages: image: ${CONTAINTHEDOCS_IMAGE} stage: doc + needs: [build_docs] environment: name: staging url: https://${CI_PROJECT_NAMESPACE}.${PAGES_DOMAIN}/${CI_PROJECT_NAME} @@ -436,37 +463,18 @@ pages: - PUBLIC_DOCS=${PUBLIC_DIR}/public-docs - mkdir -p ${PUBLIC_DOCS} - cp -R docs/_build/html/* ${PUBLIC_DOCS} - #### place code coverage docs under: /coverage-docs #### - - COVERAGE_DOCS=${PUBLIC_DIR}/coverage-docs - - mkdir -p ${COVERAGE_DOCS} - - cp -R coverage/* ${COVERAGE_DOCS} - #### place code contribution docs (mainly for presentation) under: /contribute-docs #### - - CONTRIBUTE_DOCS=${PUBLIC_DIR}/contribute-docs - - CONTRIBUTE_DIR=contributing - # install presentation site dependencies - - cd ${CONTRIBUTE_DIR} && npm install && cd .. - - mkdir -p ${CONTRIBUTE_DOCS} - - cp -R ${CONTRIBUTE_DIR}/* ${CONTRIBUTE_DOCS} - # make relative links absolute - this could be better... - # ![diagram](../test/README.md) -> ![diagram](https://base.url/../test/README.md) - - BASE_URL_FOR_LINKS=${CI_PROJECT_URL}/tree/${CI_COMMIT_REF_NAME}/${CONTRIBUTE_DIR}/ - - README=${CONTRIBUTE_DOCS}/README.md - # make absolute URL(s) for relative URL(s) outside current directory '../' - - sed -i -E 's/\[.*\]\(\.\./&SED_TEMP/' ${README} && sed -i "s|..SED_TEMP|${BASE_URL_FOR_LINKS}..|" ${README} artifacts: paths: - public - only: - # only update on designated, stable branch - - develop - - doc-release-branch - - joes-new-ts-wip - - debs-doc-wip + rules: + - !reference [.rules_config, docs_only, rules] + # Publish docs to clouddocs.f5.com publish_docs_to_production: image: ${CONTAINTHEDOCS_IMAGE} stage: doc + needs: [build_docs] environment: name: production url: https://clouddocs.f5.com/products/extensions/f5-telemetry-streaming/latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d2d365c..95984958 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,15 @@ # Changelog Changes to this project are documented in this file. More detail and links can be found in the Telemetry Streaming [Document Revision History](https://clouddocs.f5.com/products/extensions/f5-telemetry-streaming/latest/revision-history.html). +## 1.35.0 +### Added +- NEXTACC-414: Add Resource Monitor +- Runtime configuration options +### Fixed +- Event Listener performance and memory usage improvements +### Changed +### Removed + ## 1.34.0 ### Added - NEXTACC-387: Update Event Listener to utilize memory and CPU more efficient diff --git a/SUPPORT.md b/SUPPORT.md index 922f9bad..2c3cf7f4 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -25,6 +25,7 @@ Currently supported versions: |------------------|------------------------|---------------------|------------------| | TS 1.33.0 | LTS | 22-Mar-2023 | Maintenance mode | | TS 1.34.0 | Feature | 16-Jan-2024 | 16-Apr-2024 | +| TS 1.35.0 | Feature | 23-Feb-2024 | 23-May-2024 | Versions no longer supported: diff --git a/contributing/README.md b/contributing/README.md index 887fd181..a05fc9d3 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -108,7 +108,7 @@ How does the project handle a typical `POST` request? "trace": false, "format": "default" }, - "schemaVersion": "1.34.0" + "schemaVersion": "1.35.0" } } ``` diff --git a/contributing/docs_release.md b/contributing/docs_release.md new file mode 100644 index 00000000..52d3b79d --- /dev/null +++ b/contributing/docs_release.md @@ -0,0 +1,51 @@ +# TS Docs Release Process + +Docs branches are: +- docs-staging +- docs-latest + +`docs-latest` has the most recent version of publicly available documentation on **clouddocs** + +`docs-staging` has the most recent version of privately available documentation on **GitLab Pages**. (for more info see [GitLab docs](https://docs.gitlab.com/ee/user/project/pages/)) + +Workflow is following: + +If you are working on new feature or fix: +- create `feature` (any name allowed) branch off `develop` +- do docs changes along with the feature in the same feature branch. +- once the work done, create MR to merge `feature` branch to `develop` + +If you need to update docs for not published release (aka 'in progress'): +- create `docs` (any name allowed) branch off `develop` +- do docs changes +- once the work done, create MR to merge `docs` branch to `develop` + +If you need to update docs for the most recent publicly available release: +- create `docs` (any name allowed) branch off `docs-staging` +- do docs changes +- once the work done, create MR to merge `docs` branch to `docs-staging` +- review your changes once deployed to GitLab Pages +- if everything is OK then create MR to merge `docs-staging` to `docs-latest` +- merge and review your changes once deployed to the clouddocs +- merge `docs-latest` back to `develop` + +If you need to update docs for the LST release: +- create `docs` (any name allowed) branch off `docs-X.Y.Z`, where X.Y.Z is the LTS release version +- do docs changes +- once the work done, create MR to merge `docs` branch to `docs-X.Y.Z-staging` +- review your changes once deployed to GitLab Pages +- if everything is OK then create MR to merge `docs-X.Y.Z-staging` to `docs-X.Y.Z-latest` +- merge and review your changes once deployed to the clouddocs + +If you need to release docs from `develop` branch: +- merge `develop` to `docs-staging` +- review your changes once deployed to GitLab Pages + - do not forget bump version number in `versions.json` and `docs/conf.py` files +- if everything is OK then create MR to merge `docs-staging` to `docs-latest` +- merge and review your changes once deployed to the clouddocs +- merge `docs-latest` back to `develop` + +NOTE: + +You can published docs to internal GitLab Page at any time to review your changes by kicking-off CI/CD pipeline and setting `FORCE_DOCS_STAGGING` env variable to `true` + diff --git a/contributing/process_release.md b/contributing/process_release.md index 1de34067..caecdff0 100644 --- a/contributing/process_release.md +++ b/contributing/process_release.md @@ -23,12 +23,8 @@ * Make sure RC branch has actual release version and build numbers. **Note:** atg-build bumps the build number (e.g. to get first build vX.Y.Z-1, you must set package and package-lock to vX.Y.Z-0). Check the following files and do corrections if needed: * [package.json](package.json) * [package-lock.json](package-lock.json) - * [project.spec](project.spec) (not required starting from 1.5) - * [versions.json](versions.json) - * [src/lib/constants.js](src/lib/constants.js) (not required starting from 1.10) * [src/schema/latest/base_schema.json](src/schema/latest/base_schema.json) * [contributing/README.md](contributing/README.md) (example of response, optional) - * [docs/conf.py](docs/conf.py) * do simple `grep` in repository to ensure that no unexpected files with old version left * A new directory should be added for the new release version (same files that are in [src/schema/latest](src/schema/latest) go here) * There should be exact same files across following directories: @@ -70,6 +66,7 @@ * 1.32.0 - 20.5 MB * 1.33.0 - 22.1 MB * 1.34.0 - 18.4 MB + * 1.35.0 - 18.4 MB * Install build to BIG-IP, navigate to folder `/var/config/rest/iapps/f5-telemetry/` and check following: * Run `du -sh` and check that folder's size (shouldn't be much greater than previous versions): * 1.4.0 - 65 MB @@ -102,7 +99,7 @@ * 1.31.0 - 153 MB (NOTE: inclusion of OpenTelemetry and grpc-js libraries) * 1.32.0 - 154 MB * 1.33.0 - 164 MB - * 1.34.0 - 136 MB + * 1.35.0 - 164 MB * Check `node_modules` folder - if you see `eslint`, `mocha` or something else from [package.json](package.json) `devDependencies` section - something wrong with build process. Probably some `npm` flags are work as not expected and it MUST BE FIXED before publishing. * Ensure that all tests (unit tests and functional tests passed) * Optional: Ensure that your local tags match remote. If not, remove all and re-fetch: diff --git a/docs/avr.rst b/docs/avr.rst index b62411d5..a26b0eba 100644 --- a/docs/avr.rst +++ b/docs/avr.rst @@ -25,11 +25,20 @@ To use AVR with BIG-IP Telemetry Streaming, you must modify the AVR logging conf Use the following TMSH command, but be sure to change **telemetry_publisher** to the name of your Log Publisher if your publisher has a different name. +AS3 compatible command for TMSH: + .. code-block:: bash modify analytics global-settings { external-logging-publisher /Common/Shared/telemetry_publisher offbox-protocol hsl use-offbox enabled } +TMSH command: + +.. code-block:: bash + + modify analytics global-settings { external-logging-publisher /Common/telemetry_publisher offbox-protocol hsl use-offbox enabled } + + | diff --git a/docs/index.html b/docs/index.html index b577ae78..94d6b16f 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2,7 +2,6 @@ - @@ -24,9 +23,6 @@
diff --git a/docs/logging.rst b/docs/logging.rst index 553b480d..556ce83a 100644 --- a/docs/logging.rst +++ b/docs/logging.rst @@ -4,7 +4,7 @@ Logging ======= F5 BIG-IP Telemetry Streaming logs to **/var/log/restnoded.log**. -The logging level is set in the "controls" class with possible values of "debug", "info", and "error". The default value is **info**. To change the logging level, submit the declaration with logLevel set to the preferred value. +The logging level is set in the "controls" class with possible values of allowed log levels (in increasing order of verbosity) are **error**, **info**, **debug**, **verbose**. The default value is **info**. To change the logging level, submit the declaration with logLevel set to the preferred value. .. code-block:: json :linenos: @@ -21,6 +21,20 @@ The logging level is set in the "controls" class with possible values of "debug" Example log entries for different levels ---------------------------------------- +error +````` +The error value will log only errors. + +.. code-block:: bash + + Thu, 24 Jan 2019 02:22:03 GMT - info: [telemetry] Global logLevel set to 'error' + Thu, 24 Jan 2019 02:22:08 GMT - severe: [telemetry] validateAndApply error: [{"keyword":"enum","dataPath":"['controls'].logLevel","schemaPath":"controls_schema.json#/allOf/0/then/properties/logLevel/enum","params":{"allowedValues":["debug","info","error"]},"message":"should be equal to one of the allowed values"}] + Traceback: + Error: [{"keyword":"enum","dataPath":"['controls'].logLevel","schemaPath":"controls_schema.json#/allOf/0/then/properties/logLevel/enum","params":{"allowedValues":["debug","info","error"]},"message":"should be equal to one of the allowed values"}] + at validator.then.catch (/var/config/rest/iapps/f5-telemetry/nodejs/config.js:237:41) + at + at process._tickCallback (internal/process/next_tick.js:188:7) + info ```` The info value will log information and errors. @@ -34,7 +48,7 @@ The info value will log information and errors. debug ````` -The debug value will log everything. +The debug value will log debug messages, debug errors as well as everything above. .. code-block:: bash @@ -47,18 +61,17 @@ The debug value will log everything. Thu, 24 Jan 2019 02:18:56 GMT - info: [telemetry] 0 consumer plug-in(s) loaded -error -````` -The error value will log only errors. + +verbose +```` +The verbose value will log everything. .. code-block:: bash - Thu, 24 Jan 2019 02:22:03 GMT - info: [telemetry] Global logLevel set to 'error' - Thu, 24 Jan 2019 02:22:08 GMT - severe: [telemetry] validateAndApply error: [{"keyword":"enum","dataPath":"['controls'].logLevel","schemaPath":"controls_schema.json#/allOf/0/then/properties/logLevel/enum","params":{"allowedValues":["debug","info","error"]},"message":"should be equal to one of the allowed values"}] - Traceback: - Error: [{"keyword":"enum","dataPath":"['controls'].logLevel","schemaPath":"controls_schema.json#/allOf/0/then/properties/logLevel/enum","params":{"allowedValues":["debug","info","error"]},"message":"should be equal to one of the allowed values"}] - at validator.then.catch (/var/config/rest/iapps/f5-telemetry/nodejs/config.js:237:41) - at - at process._tickCallback (internal/process/next_tick.js:188:7) + Thu, 24 Jan 2019 02:20:19 GMT - info: [telemetry] Global logLevel set to 'debug' + Thu, 24 Jan 2019 02:20:19 GMT - info: [telemetry] Loading consumer specific plug-ins from ./consumers + Thu, 24 Jan 2019 02:20:19 GMT - finest: [telemetry] new connection - "localhost" port 34567 + Thu, 24 Jan 2019 02:24:19 GMT - finest: [telemetry] removing connection - "localhost" port 34567 + diff --git a/docs/memory-monitor.rst b/docs/memory-monitor.rst new file mode 100644 index 00000000..506e3a92 --- /dev/null +++ b/docs/memory-monitor.rst @@ -0,0 +1,289 @@ +.. _memorymanagement: + +Memory Mamangement - BETA +========================== +.. NOTE:: Using F5 BIG-IP Telemetry Streaming **Memory Monitor** is supported as of BIG-IP TS 1.35. + +F5 BIG-IP Telemetry Streaming v1.35 and later allows you to specify memory usage limit. + + +Overview of the "memoryMonitor" property of Controls class +---------------------------------------------------------- +The "memoryMonitor" property of Controls class is where you define your memory usage limits. + + +.. list-table:: + :widths: 25 25 200 + :header-rows: 1 + + * - Property + - Required + - Description + + * - **interval** + - No + - Defines how often **Memory Monitor** should check memory usage. The acceptable values are **default** (the default), or **aggressive** (forces **Memory Monitor** to check memory usage more often). + + * - **logFrequency** + - No + - Defines how often **Memory Monitor** should log memory usage data. The default value set to **10** seconds. The minimal value is **1**. + + * - **logLevel** + - No + - Defines the logging level **Memory Monitor** should use to log memory usage data. The acceptable values are **verbose**, **debug** (the default), **info** or **error** + + * - **memoryThresholdPercent** + - No + - Defines the threshold value for memory usage. Once limit is exceeded the data processing will be temporarily ceased until the level returns below the threshold. The default value set to **90**. The minimal value is **1** and the maximum **100**. + + * - **osFreeMemory** + - No + - Defines the threshold value for OS free memory. Once amount of OS free memory becomes below the threshold value then data processing will be temporarily ceased until the level returns above the threshold. The default value set to **30** MB. The minimal value is **1**. + + * - **provisionedMemory** + - No + - Defines the total amount of memory available for application. The **allowed** amount of memory is calculated by multiplying **provisionedMemory** and **memoryThresholdPercent**. The default is **1400** MB. The minimal value is **1** and maximum **1400**. + + * - **thresholdReleasePercent** + - No + - Defines amount of memory (in %) once memory utilization is equal or below that value the data processing will be enabled. The default value set to **90**. The minimal value is **1** and the maximum **100**. For more info see :ref:`memorystateflapping`. + + +For example, your declaration could add the following snippet, which contains **Memory Monitor** configuration: + +.. code-block:: bash + + { + ... + "controls": { + "class": "Controls", + "memoryMonitor": { + "interval": "aggressive", + "logFrequency": 60, + "logLevel": "debug", + "memoryThresholdPercent": 80, + "osFreeMemory": 100, + "provisionedMemory": 500, + "thresholdReleasePercent": 95 + } + } + } + + +**memoryThresholdPercent** as part of Controls and "memoryMonitor" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +F5 BIG-IP Telemetry Streaming v1.35 and later allows to specify **memoryThresholdPercent** twice: + +.. code-block:: bash + + { + ... + "controls": { + "class": "Controls", + "memoryThresholdPercent": 80, + "memoryMonitor": { + "logLevel": "debug", + "memoryThresholdPercent": 90, + "provisionedMemory": 500 + } + } + } + +For this particular case the property **controls/memoryMonitor/memoryThresholdPercent** with value **90**% has more priority than **controls/memoryThresholdPercent** with value **80**% and as result the last one will be ignored. +If the property **controls/memoryMonitor/memoryThresholdPercent** would not be specified then **controls/memoryThresholdPercent** would be used. In other words - whenever **controls/memoryMonitor/memoryThresholdPercent** +specified then **controls/memoryThresholdPercent** ignored despite the value. + +.. code-block:: bash + + { + ... + "controls": { + "class": "Controls", + "memoryMonitor": { + "logLevel": "debug", + "memoryThresholdPercent": 90, + "provisionedMemory": 500 + } + } + } + +**Memory Monitor** will be configured with **memoryThresholdPercent** set to **90**%. + + +Using the "memoryMonitor" property of Controls class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The good starting point of using **memoryMonitor** may looks like following: + +.. code-block:: bash + + { + ... + "controls": { + "class": "Controls", + "memoryMonitor": { + "memoryThresholdPercent": 90 + } + } + } + +Simply limit memory usage by applying 90% threshold. + + +Default behavior when the "memoryMonitor" property not configured +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +If the **memoryMonitor** property is not specified, then default values will be used. It is equal to following declaration: + +.. code-block:: bash + + { + ... + "controls": { + "class": "Controls", + "memoryMonitor": { + "interval": "default", + "logFrequency": 10, + "logLevel": "debug", + "memoryThresholdPercent": 90, + "osFreeMemory": 30, + "provisionedMemory": 1400, + "thresholdReleasePercent": 90 + } + } + } + + +Default behavior when no active components configured +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Let's say your declaration look like following: + + +.. code-block:: bash + + { + "class": "Telemetry", + "controls": { + "class": "Controls", + "memoryMonitor": { + "interval": "default", + "logFrequency": 10, + "logLevel": "debug", + "memoryThresholdPercent": 90, + "osFreeMemory": 30, + "provisionedMemory": 1400, + "thresholdReleasePercent": 90 + } + }, + "listener": { + "class": "Telemetry_Listener", + "enable": false + } + } + +Once declaration applied F5 BIG-IP Telemetry Streaming checks if there are any active components enabled at all. +For that declaration **Memory Monitor** will be disabled because there are no active components. + + +.. _memorystateflapping: + +How to avoid processing state "flapping" behavior +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once limits defined by **memoryThresholdPercent** exceeding the data processing will be temporarily ceased until levels return below the threshold. +F5 BIG-IP Telemetry Streaming allows to specify a recovery state once reached will enable data processing. **thresholdReleasePercent** is amount of memory (in %) +once memory utilization is equal or below that value the data processing will be enabled. + +Let's say your declaration look like following: + +.. code-block:: bash + + { + "class": "Telemetry", + "controls": { + "class": "Controls", + "memoryMonitor": { + "memoryThresholdPercent": 90, + "provisionedMemory": 1000, + "thresholdReleasePercent": 90 + } + }, + "listener": { + "class": "Telemetry_Listener", + "enable": false + } + } + +**provisionedMemory** set to 1000 MB and the threshold value is **provisionedMemory** * **memoryThresholdPercent** = *1000 MB* * *90%* = *900 MB*. +Once memory usage exceeded *900 MB* the data processing will be temporarily ceased until levels return below **thresholdReleasePercent**. +The recovery limit calculated using following formula: **threshold** * **thresholdReleasePercent**, where **threshold** = **provisionedMemory** * **memoryThresholdPercent**. +In our example it will be *900 MB* * *90%* = *810 MB*. Once memory usage returns below or equal to *810 MB* the data processing +will be enabled and back to its activity. + +.. NOTE:: It is not recommended to set **thresholdReleasePercent** to **100** because it may result in **flapping** behavior: processing state will switch its states rapidly without a delay. + + +Runtime Configuration options - BETA +------------------------------------ +The "runtime" property of Controls class is where you define your runtime configuration. + +.. NOTE:: Using F5 BIG-IP Telemetry Streaming **runtime** is supported as of BIG-IP TS 1.35 (currently experimental). + +.. IMPORTANT:: **THOSE CONFIGURATION OPTIONS SHOULD BE USED ONLY WHEN YOU ARE OBSERVING/EXPERIENCING MEMORY USAGE ISSUES** + +.. list-table:: + :widths: 25 25 200 + :header-rows: 1 + + * - Property + - Required + - Description + + * - **enableGC** + - No + - **EXPERIMENTAL**: Enables the built-in Garbage Collector and makes it available for F5 BIG-IP Telemetry Streaming to clean up freed memory blocks. The default is **false**. + + * - **maxHeapSize** + - No + - **EXPERIMENTAL**: Defines the upper limit of V8's heap size that allows F5 BIG-IP Telemetry Streaming to utilize more memory before being killed due to a Heap-Out-Of-Memory error. The default value set to **1400** seconds. The minimal value is **1400**. + +.. IMPORTANT:: Changes in the runtime's configuration may require the **restnoded** service to be restarted. F5 BIG-IP Telemetry Streaming will schedule the **restnoded** restart when changes in configuration are made. + +The good starting point of using **runtime** may looks like following: + +.. code-block:: bash + + { + "class": "Telemetry", + "controls": { + "class": "Controls", + "runtime": { + "enableGC": true + } + }, + "listener": { + "class": "Telemetry_Listener", + "enable": false + } + } + +It enables the garbage collection function that F5 BIG-IP Telemetry Streaming will use to free memory. + +Declaration with all **runtime** properties specified: + +.. code-block:: bash + + { + "class": "Telemetry", + "controls": { + "class": "Controls", + "runtime": { + "enableGC": false, + "maxHeapSize": 1400 + } + }, + "listener": { + "class": "Telemetry_Listener", + "enable": false + } + } diff --git a/docs/pull-consumers.rst b/docs/pull-consumers.rst index f7bf42aa..51e84c2d 100644 --- a/docs/pull-consumers.rst +++ b/docs/pull-consumers.rst @@ -70,6 +70,8 @@ Default Pull Consumer This example shows how to use the default pull consumer. For the default pull consumer, the type must be **default** in the Pull Consumer class as shown. +The primary use case of such type of pull consumer is troubleshooting. + Example declaration: .. literalinclude:: ../examples/declarations/consumers/Pull_Consumer/default_pull_consumer.json diff --git a/docs/revision-history.rst b/docs/revision-history.rst index 44536161..95fc21d4 100644 --- a/docs/revision-history.rst +++ b/docs/revision-history.rst @@ -21,12 +21,16 @@ There is no plan to deprecate this product. - Description - Date + * - 1.35.0 + - Updated the documentation for Telemetry Streaming v1.35.0. This release contains the following changes: |br| * Added "memoryMonitor" (see :ref:`Memory Mamangement`). + - 01-19-24 + + * - 1.34.0 + - Updated the documentation for Telemetry Streaming v1.34.0. This release contains the following changes: |br| * Added "verbose" option for "logLevel". |br| * Event Listener bugfixes, performance and memory usage improvements. |br| * Updated description for "default" consumers. |br| * More troubleshooting entries. |br| |br| Changed: |br| * Update npm packages + - 01-19-24 + * - 1.33.0 -<<<<<<< HEAD - Updated the documentation for Telemetry Streaming v1.33.0. This release contains the following changes: |br| * Added allowing user provided endpoints for the Azure consumers |br| |br| Changed: |br| * Update npm packages -======= - - Released Telemetry Streaming v1.33.0 as a LTS (Long Term Support) version. See the Telemetry Streaming |supportmd| for information about the Telemetry Streaming support policy. |br|Updated the documentation for Telemetry Streaming v1.33.0. This release contains the following changes: |br| * Added allowing user provided endpoints for the Azure consumers |br| |br| Changed: |br| * Update npm packages ->>>>>>> develop - 03-17-23 * - 1.32.0 diff --git a/docs/setting-up-consumer.rst b/docs/setting-up-consumer.rst index 492258ad..61873d3e 100644 --- a/docs/setting-up-consumer.rst +++ b/docs/setting-up-consumer.rst @@ -11,6 +11,22 @@ Use the index on the right to locate a specific consumer. | +.. _push: + +Default Push Consumer +--------------------- + +This example shows how to use the default push consumer. For the default push consumer, the type must be **default** in the Consumer class as shown. + +The primary use case of such type of push consumer is troubleshooting. + +Example declaration: + +.. literalinclude:: ../examples/declarations/consumers/default_consumer.json + :language: json + +| + .. _splunk-ref: Splunk diff --git a/docs/telemetry-system.rst b/docs/telemetry-system.rst index fa7177b8..de699dfe 100644 --- a/docs/telemetry-system.rst +++ b/docs/telemetry-system.rst @@ -27,7 +27,8 @@ The system poller collects and normalizes statistics from a system, such as BIG- +--------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ | Parameter | Options | Description/Notes | +====================+================================+============================================================================================================================================+ -| interval | 60 - 6000, **300** | This value determines the polling period in seconds. By default, Telemetry Streaming collects statistics every 300 seconds. | +| interval | 0, 60 - 6000, **300** | This value determines the polling period in seconds. By default, Telemetry Streaming collects statistics every 300 seconds. | +| | | When value set to 0 then interval polling is disabled, useful when you want to configure :ref:`pullconsumer-ref` | +--------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+ .. _ihealthpoller: diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index ec23573d..cd537470 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -18,7 +18,7 @@ Logging ------- F5 BIG-IP Telemetry Streaming writes log output to the file **/var/log/restnoded/restnoded.log** on the BIG-IP. The verbosity of the log output can be adjusted by submitting a BIG-IP Telemetry Streaming declaration with a Controls class. -The allowed log levels (in increasing order of verbosity) are **error**, **info**, and **debug**. +The allowed log levels (in increasing order of verbosity) are **error**, **info**, **debug** and **verbose*. The following is an example declaration containing a Controls class that sets the logging level to debug. .. code-block:: json @@ -297,11 +297,10 @@ BIG-IP Telemetry Streaming sends this JSON payload to the Event Listener you spe | - .. _trace: How can I write an Event Listener's incoming raw data to a trace file? ----------------------------------------------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. sidebar:: :fonticon:`fa fa-info-circle fa-lg` Version Notice: Support for writing an Event Listener's incoming raw data to a trace file is available in BIG-IP TS v1.20 and later @@ -369,6 +368,8 @@ Increase the memory allocated for the restjavad daemon (e.g. 2 GB), by running t .. IMPORTANT:: You should not exceed 2500MB +.. NOTE:: The configuration above does not affect F5 BIG-IP Telemetry Streaming. It does not increse amount of memory available for application. For more information see :doc:`memory-monitor`. + | .. _memory: @@ -377,7 +378,7 @@ Where can I find the BIG-IP TS memory threshold information? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This section contains guidance how to configure the F5 BIG-IP Telemetry Streaming memory usage threshold to help prevent **restnoded** from restarting when too much memory is used. When **restnoded** restarts, the BIG-IP Telemetry Streaming consumer is unavailable. -F5 BIG-IP Telemetry Streaming v1.18 introduced a change in behavior by adding monitor checks that run by default. Memory usage is monitored to prevent **restnoded** from crashing and restarting if memory usage becomes too high. By default (without user configuration), this translates to 90% of total memory allocated for restnoded (1433 MB by default, unless you set the db variables as noted in the workaround section of :ref:`restjavad`). +F5 BIG-IP Telemetry Streaming v1.18 introduced a change in behavior by adding monitor checks that run by default. Memory usage is monitored to prevent **restnoded** from crashing and restarting if memory usage becomes too high. By default (without user configuration), this translates to 90% of total memory allocated for restnoded (1433 MB by default). You can configure your memory threshold using the new **memoryThresholdPercent** property in the **Controls** class. For example, to set the memory threshold to 65%, you use: @@ -385,16 +386,17 @@ You can configure your memory threshold using the new **memoryThresholdPercent** :emphasize-lines: 6 { - "class": "Telemetry", - "controls": { - "class": "Controls", - "logLevel": "info", - "memoryThresholdPercent": 65 + "class": "Telemetry", + "controls": { + "class": "Controls", + "logLevel": "info", + "memoryThresholdPercent": 65 } } .. NOTE:: You can disable monitor checks by setting **memoryThresholdPercent** value to 100. +For more information see :doc:`memory-monitor`. Monitor checks run by default on intervals depending on %memory usage: @@ -404,20 +406,28 @@ Monitor checks run by default on intervals depending on %memory usage: * - % of total memory usage - Interval - * - 0 - 24 - - 30 seconds + * - 0 - 50 + - 1.5 seconds + + * - 50 - 60 + - 1 seconds - * - 25 - 49 - - 15 seconds + * - 60 - 70 + - 0.8 seconds - * - 50 - 74 - - 10 seconds + * - 70 - 80 + - 0.5 seconds + + * - 80 - 90 + - 0.2 seconds + + * - 90 - 100 + - 0.1 second + + * - 100+ + - 1 second (data processing disabled already) - * - 75 - 89 - - 5 seconds - * - 90+ - - 3 seconds | @@ -430,7 +440,66 @@ By default, BIG-IP Telemetry Streaming compresses data before sending it to Splu F5 BIG-IP Telemetry Streaming 1.19 and later includes the **compressionType** property in the |telemetryconsumer| class. You can set this property to **none** (**gzip** is the default) to help reduce memory usage. +| + +.. _bigucstimeout: + +Why is BIG-IP TS not showing up in UCS archive? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due the fact that F5 BIG-IP TS has a quite high number of dependencies and sub-dependecies the BIG-IP unable to include it to UCS archive. You may see following log entries in **/var/log/ltm**: + +.. code-block:: bash + + err iAppsLX_save_pre[]: Failed to get task response within timeout for: /shared/iapp/build-package/16d78253-a7fb-449c-8c90-1c04a57a3af3 + err iAppsLX_save_pre[]: Failed to get getRPM build response within timeout for f5-telemetry + +Or you trying to save UCS from the CLI and it will run indefinitely and cancelling the operation with CTRL+C produces output similar to the following: + +.. code-block:: bash + + ^CTraceback (most recent call last): + File "/usr/libexec/iAppsLX_save_pre", line 158, in + taskResult = getFinishedTask(taskUri, 1.0, subprocess.check_output("getdb iapplxrpm.timeout", shell=True)) + File "/usr/libexec/iAppsLX_save_pre", line 86, in getFinishedTask + time.sleep(delay) + KeyboardInterrupt + ^CError executing 'pre-save' configsync script /var/tmp/cs_save_pre_script. + ^C/var/tmp/configsync.spec: Error creating package + + WARNING:There are error(s) during saving. + Not everything was saved. + Be very careful when using this saved file! + +**Workaround** |br| +Increase the value of **sys db iapplxrpm.timeout**: + +``tmsh modify sys db iapplxrpm.timeout value 600`` |br| +``tmsh restart sys service restjavad`` |br| + +For more information see `K51300313 `_ and `Bug ID 796605 `_. + +| + +.. _bigucshasync: + +Why is BIG-IP TS not syncing across HA group? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due the fact that F5 BIG-IP TS has a quite high number of dependencies and sub-dependecies the BIG-IP unable to sync it across all devices in HA group. + +For more details and workaround see :ref:`bigucshasync` + +| + +.. _bigucsupgrade: + +Why is BIG-IP TS not showing up after BIG-IP upgrade? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due the fact that F5 BIG-IP TS has a quite high number of dependencies and sub-dependecies the BIG-IP unable to back it up then restore to new volume with upgraded version of software. +For more details and workaround see :ref:`bigucshasync` .. |br| raw:: html diff --git a/docs/using-ts.rst b/docs/using-ts.rst index 6ea8291e..8faa8b20 100644 --- a/docs/using-ts.rst +++ b/docs/using-ts.rst @@ -23,4 +23,5 @@ Using F5 BIG-IP Telemetry Streaming Index data-modification customizing-data advanced-options + memory-monitor deleting-ts-config diff --git a/docs/validate.rst b/docs/validate.rst index 636e56ed..6d1b9cea 100644 --- a/docs/validate.rst +++ b/docs/validate.rst @@ -7,7 +7,7 @@ In this section, we show you how to validate a F5 BIG-IP Telemetry Streaming dec For more information on editing JSON with Visual Studio Code, see |json|. -To validate a declaration +How to validate a declaration ~~~~~~~~~~~~~~~~~~~~~~~~~ Use the following procedure to validate a declaration. diff --git a/examples/declarations/all_properties.json b/examples/declarations/all_properties.json index 57798873..90e80256 100644 --- a/examples/declarations/all_properties.json +++ b/examples/declarations/all_properties.json @@ -4,7 +4,19 @@ "class": "Controls", "logLevel": "info", "debug": false, - "memoryThresholdPercent": 90 + "memoryThresholdPercent": 90, + "memoryMonitor": { + "interval": "aggressive", + "logFrequency": 60, + "logLevel": "debug", + "memoryThresholdPercent": 90, + "osFreeMemory": 100, + "provisionedMemory": 500 + }, + "runtime": { + "enableGC": true, + "maxHeapSize": 1400 + } }, "My_System": { "class": "Telemetry_System", diff --git a/examples/declarations/consumers/default_consumer.json b/examples/declarations/consumers/default_consumer.json new file mode 100644 index 00000000..080737a9 --- /dev/null +++ b/examples/declarations/consumers/default_consumer.json @@ -0,0 +1,15 @@ +{ + "class": "Telemetry", + "My_Poller": { + "class": "Telemetry_System_Poller" + }, + "My_System": { + "class": "Telemetry_System", + "enable": "true", + "systemPoller": ["My_Poller"] + }, + "My_Push_Consumer": { + "class": "Telemetry_Consumer", + "type": "default" + } +} \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 99b7c170..8feb9bce 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "f5-telemetry", - "version": "1.34.0-1", + "version": "1.35.0-1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "f5-telemetry", - "version": "1.34.0-0", + "version": "1.35.0-0", "license": "Apache-2.0", "dependencies": { "@f5devcentral/f5-teem": "^1.6.1", diff --git a/package.json b/package.json index 2671343a..5376f1eb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "f5-telemetry", - "version": "1.34.0-1", + "version": "1.35.0-1", "author": "F5, Inc.", "license": "Apache-2.0", "repository": { diff --git a/scripts/build/buildRpm.sh b/scripts/build/buildRpm.sh index ad785642..be230ee9 100755 --- a/scripts/build/buildRpm.sh +++ b/scripts/build/buildRpm.sh @@ -5,8 +5,8 @@ set -evx # RPM template: --..rpm # For DEV === ... -# DEV RPM: f5-telemetry-1.34.0-0.20240107071243.28507f40.dev_build_info.noarch.rpm -# Release RPM: f5-telemetry-1.34.0-0.noarch.rpm +# DEV RPM: f5-telemetry-1.35.0-0.20240107071243.28507f40.dev_build_info.noarch.rpm +# Release RPM: f5-telemetry-1.35.0-0.noarch.rpm is_release_tag () {( node -e "process.exit(+!(/^(v[0-9]+\.[0-9]+\.[0-9]+|latest)$/.test('$1')));" @@ -49,7 +49,6 @@ elif ! is_release_tag "${CI_COMMIT_TAG}"; then RELEASE="${RELEASE}.${BUILD_TIMESTAMP}.${GIT_COMMIT_SHA}.${GIT_REF_NAME_SLUG}" fi - rpmbuild -bb \ --define "BUILD_TIMESTAMP ${BUILD_TIMESTAMP}" \ --define "GIT_REF_NAME ${GIT_REF_NAME}" \ diff --git a/src/lib/constants.js b/src/lib/constants.js index b36b3f96..20904c50 100644 --- a/src/lib/constants.js +++ b/src/lib/constants.js @@ -60,15 +60,57 @@ module.exports = { }, APP_NAME: 'Telemetry Streaming', APP_THRESHOLDS: { - MONITOR_DISABLED: 'MONITOR_DISABLED', + MONITOR_DISABLED: 'MONITOR_DISABLED', // TODO: delete MEMORY: { - // node default max is 1.4 GB - // assume this is the default provisioning - // value can vary according to db variables, handled by restjavad + /** TODO: DELETE */ DEFAULT_MB: 1433, - DEFAULT_LIMIT_PERCENT: 90, OK: 'MEMORY_USAGE_OK', - NOT_OK: 'MEMORY_USAGE_HIGH' + NOT_OK: 'MEMORY_USAGE_HIGH', + /** TODO: DELETE END */ + + ARGRESSIVE_CHECK_INTERVALS: [ + { usage: 50, interval: 0.5 }, + { usage: 60, interval: 0.4 }, + { usage: 70, interval: 0.3 }, + { usage: 80, interval: 0.2 }, + { usage: 90, interval: 0.2 }, + { usage: 100, interval: 0.1 } + ], + DEFAULT_CHECK_INTERVALS: [ + { usage: 50, interval: 1.5 }, + { usage: 60, interval: 1.0 }, + { usage: 70, interval: 0.8 }, + { usage: 80, interval: 0.5 }, + { usage: 90, interval: 0.2 }, + { usage: 100, interval: 0.1 } + ], + // default GC call interval in seconds + DEFAULT_GC_INTERVAL: 60, + // default check interval in seconds + DEFAULT_INTERVAL: 5, + // node.js default heap size + DEFAULT_HEAP_SIZE: 1400, + // 90% should be enough for everyone + DEFAULT_LIMIT_PERCENT: 90, + DEFAULT_LOG_FREQ: 10 * 1000, + DEFAULT_LOG_LEVEL: 'debug', + // min amount of system's free memory + DEFAULT_MIN_FREE_MEM: 30, + // default minimal check interval in seconds when mem usage is >= 100% + DEFAULT_MIN_INTERVAL: 0.1, + // default percent, when exceed that value app will disable processing + DEFAULT_OK_USAGE_PERCENT: 100, + // 90% should be enough to avoid processing state flapping + DEFAULT_RELEASE_PERCENT: 90, + STATE: { + OK: 'MEMORY_USAGE_BELOW_THRESHOLD', + NOT_OK: 'MEMORY_USAGE_ABOVE_THRESHOLD' + }, + TREND: { + DOWN: 'MEMORY_USAGE_GOES_DOWN', + NO_CHANGE: 'MEMORY_USAGE_NO_CHANGE', + UP: 'MEMORY_USAGE_GOES_UP' + } } }, CONFIG_CLASSES: { @@ -116,6 +158,7 @@ module.exports = { EVENT_LISTENER: { PARSER_MODE: 'buffer', // default parsing mode PARSER_MAX_ITERS_PER_CHECK: 1000, // how often to check the time spent on data processing + PARSER_MAX_KV_PAIRS: 2000, // max number of key=value pairs per message PARSER_MAX_MSG_SIZE: 16 * 1024, // max message size in chars (string) or bytes (buffer) PARSER_PREALLOC: 1000, // preallocated buffer size NETWORK_SERVICE_RESTART_DELAY: 10 * 1000, // 10 sec. delay before restart (units - ms.) diff --git a/src/lib/dataPipeline.js b/src/lib/dataPipeline.js index bcd8baa3..e900e415 100644 --- a/src/lib/dataPipeline.js +++ b/src/lib/dataPipeline.js @@ -17,54 +17,11 @@ 'use strict'; const actionProcessor = require('./actionProcessor'); -const constants = require('./constants'); -const consumersHandler = require('./consumers'); const EVENT_TYPES = require('./constants').EVENT_TYPES; const forwarder = require('./forwarder'); const logger = require('./logger'); -const monitor = require('./utils/monitor'); const util = require('./utils/misc'); -const EVENT_CUSTOM_TIMESTAMP_KEY = constants.EVENT_CUSTOM_TIMESTAMP_KEY; -const APP_THRESHOLDS = constants.APP_THRESHOLDS; - -let processingEnabled = true; - -/** - * Check if dataPipeline is running - * Toggled by monitor checks - * - * @returns {Boolean} - whether or not processing is enabled - */ -function isEnabled() { - return processingEnabled; -} - -/** - * Build log entry for data that we do not process and include details to help users troubleshoot - * - * @param {Object} dataCtx - the data context - * @returns {String} - the assembled log entry - */ -function buildSkippedDataLog(dataCtx) { - let timestampInfo = ''; - // best effort to log some known timestamp obj/fields - const timestampKeys = ['telemetryServiceInfo', EVENT_CUSTOM_TIMESTAMP_KEY, 'EOCTimestamp', 'event_timestamp']; - // need just one field to match - timestampKeys.some((key) => { - if (dataCtx.data[key]) { - timestampInfo = `"${key}": ${JSON.stringify(dataCtx.data[key])}`; - return true; - } - return false; - }); - - const consumers = consumersHandler.getConsumers() - .filter((c) => dataCtx.destinationIds.indexOf(c.id) > -1) - .map((c) => c.name); - return `Skipped Data - Category: "${dataCtx.data.telemetryEventCategory}" | Consumers: ${JSON.stringify(consumers)} | Addtl Info: ${timestampInfo}`; -} - /** * Pipeline to process data * @@ -81,7 +38,6 @@ function buildSkippedDataLog(dataCtx) { */ function process(dataCtx, options) { if (!isEnabled()) { - logger.warning(buildSkippedDataLog(dataCtx)); return Promise.resolve(); } @@ -119,19 +75,56 @@ function process(dataCtx, options) { }); } -monitor.on('check', (status) => new Promise((resolve) => { - const monitorChecksOk = status === APP_THRESHOLDS.MEMORY.OK; - // only log on status change to minimize entries - if (processingEnabled !== monitorChecksOk) { - logger.warning(`${status}. ${monitorChecksOk ? 'Resuming data pipeline processing.' : 'Incoming data will not be forwarded.'}`); +/** + * TEMP BLOCK OF CODE, REMOVE AFTER REFACTORING + */ +let processingEnabled = true; +let processingState = null; + +/** @param {restWorker.ApplicationContext} appCtx - application context */ +function initialize(appCtx) { + if (appCtx.resourceMonitor) { + if (processingState) { + logger.debug('Destroying existing ProcessingState instance'); + processingState.destroy(); + } + processingState = appCtx.resourceMonitor.initializePState( + onResourceMonitorUpdate.bind(null, true), + onResourceMonitorUpdate.bind(null, false) + ); + processingEnabled = processingState.enabled; + onResourceMonitorUpdate(processingEnabled); + } else { + logger.error('Unable to subscribe to Resource Monitor updates!'); + } +} + +/** @param {boolean} enabled - true if processing enabled otherwise false */ +function onResourceMonitorUpdate(enabled) { + processingEnabled = enabled; + if (enabled) { + logger.warning('Resuming data pipeline processing.'); + } else { + logger.warning('Incoming data will not be forwarded.'); } - processingEnabled = monitorChecksOk; - resolve(); -}).catch((err) => { - logger.exception('Unexpected error in data pipeline (monitor check handler).', err); -})); +} + +/** + * Check if systemPoller(s) are running + * Toggled by monitor checks + * + * @returns {Boolean} - whether or not processing is enabled + */ + +function isEnabled() { + return processingEnabled; +} +/** + * TEMP BLOCK OF CODE END + */ module.exports = { process, + initialize, isEnabled }; diff --git a/src/lib/eventListener/index.js b/src/lib/eventListener/index.js index dd59ebbc..0cf341d7 100644 --- a/src/lib/eventListener/index.js +++ b/src/lib/eventListener/index.js @@ -87,6 +87,14 @@ class ReceiversManager { }); } + disableIngress() { + Object.keys(this.registered).forEach((key) => this.registered[key].disableDataFlow()); + } + + enableIngress() { + Object.keys(this.registered).forEach((key) => this.registered[key].enableDataFlow()); + } + /** * All registered receivers * @@ -425,4 +433,55 @@ onApplicationExit(() => { EventListener.receiversManager.destroyAll().then(() => logger.info('All Event Listeners and Data Receivers destroyed')); }); +/** + * TEMP BLOCK OF CODE, REMOVE AFTER REFACTORING + */ +let processingEnabled = true; +let processingState = null; + +/** @param {restWorker.ApplicationContext} appCtx - application context */ +EventListener.initialize = function initialize(appCtx) { + if (appCtx.resourceMonitor) { + if (processingState) { + logger.debug('Destroying existing ProcessingState instance'); + processingState.destroy(); + } + processingState = appCtx.resourceMonitor.initializePState( + onResourceMonitorUpdate.bind(null, true), + onResourceMonitorUpdate.bind(null, false) + ); + processingEnabled = processingState.enabled; + onResourceMonitorUpdate(processingEnabled); + } else { + logger.error('Unable to subscribe to Resource Monitor updates!'); + } +}; + +/** @param {boolean} enabled - true if processing enabled otherwise false */ +function onResourceMonitorUpdate(enabled) { + processingEnabled = enabled; + if (enabled) { + logger.warning('Restriction ceased.'); + EventListener.receiversManager.enableIngress(); + } else { + logger.warning('Applying restrictions to incomming data.'); + EventListener.receiversManager.disableIngress(); + } +} + +/** + * Check if systemPoller(s) are running + * Toggled by monitor checks + * + * @returns {Boolean} - whether or not processing is enabled + */ + +EventListener.isEnabled = function isEnabled() { + return processingEnabled; +}; + +/** + * TEMP BLOCK OF CODE END + */ + module.exports = EventListener; diff --git a/src/lib/eventListener/networkService.js b/src/lib/eventListener/networkService.js index a0c2e6da..69d65074 100644 --- a/src/lib/eventListener/networkService.js +++ b/src/lib/eventListener/networkService.js @@ -16,6 +16,8 @@ 'use strict'; +/* eslint-disable no-multi-assign, no-var */ + const dgram = require('dgram'); const net = require('net'); @@ -34,8 +36,8 @@ class SocketServiceError extends Error {} * @see module:utils/service.Service * * @property {string} address - address to listen on - * @property {logger.Logger} logger - logger instance * @property {ReceiverCallback} callback - `connection` callback + * @property {logger.Logger} logger - logger instance * @property {integer} port - port to listen on * * NOTE: running instance should be restarted if `address` or `port` updated @@ -52,12 +54,25 @@ class BaseNetworkService extends Service { super(); options = options || {}; - this.address = options.address; - this.callback = callback; - this.port = port; - this.restartsEnabled = true; - this.logger = options.logger || logger.getChild(`${this.constructor.name}::${this.address}::${port}`); + /** define static read-only props that should not be overriden */ + Object.defineProperties(this, { + address: { + value: options.address + }, + callback: { + value: callback + }, + port: { + value: port + } + }); + Object.defineProperties(this, { + logger: { + value: options.logger || logger.getChild(`${this.constructor.name}::${this.address}::${port}`) + } + }); + this.restartsEnabled = true; } /** @@ -92,6 +107,7 @@ class TCPService extends BaseNetworkService { super(callback, port, options); this._connections = null; + this._handleConnection = handleTcpConnection.bind(this); this._socket = null; } @@ -108,6 +124,12 @@ class TCPService extends BaseNetworkService { if (this._socket) { reject(new SocketServiceError('_socket exists already!')); } else { + const srvOpts = this.getReceiverOptions(); + this.logger.debug(`starting listen using following options ${JSON.stringify(srvOpts)}`); + + // reset connections registry + this._connections = []; + this._socket = net.createServer({ allowHalfOpen: false, pauseOnConnect: false @@ -135,23 +157,8 @@ class TCPService extends BaseNetworkService { } }); - this._socket.on('connection', (conn) => { - const dst = this.callback({ - address: conn.remoteAddress, - family: conn.remoteFamily, - port: conn.remotePort - }); - addTcpConnection.call(this, conn, dst); - conn.on('data', dst.push.bind(dst)) - .on('error', () => conn.destroy()) // destroy emits 'close' event - .on('close', () => removeTcpConnection.call(this, conn)) - .on('end', () => {}); // allowHalfOpen is false, no need to call 'end' explicitly - }); - - const options = this.getReceiverOptions(); - this.logger.debug(`starting listen using following options ${JSON.stringify(options)}`); - this._connections = []; - this._socket.listen(options); + this._socket.on('connection', this._handleConnection); + this._socket.listen(srvOpts); } }); } @@ -200,8 +207,9 @@ class UDPService extends BaseNetworkService { } }); - this._connections = null; this._cleanupID = null; + this._connections = null; + this._handleConnection = handleUdpConnection.bind(this); this._socket = null; } @@ -218,6 +226,12 @@ class UDPService extends BaseNetworkService { if (this._socket) { reject(new SocketServiceError('_socket exists already!')); } else { + const srvOpts = this.getReceiverOptions(); + this.logger.debug(`starting listen using following options ${JSON.stringify(srvOpts)}`); + + // reset connections registry + this._connections = {}; + this._socket = dgram.createSocket({ type: this.family, ipv6Only: this.family === 'udp6', // available starting from node 11+ only @@ -246,15 +260,8 @@ class UDPService extends BaseNetworkService { } }); - this._socket.on('message', (data, remoteInfo) => { - const key = `${remoteInfo.address}-${remoteInfo.port}`; - (this._connections[key] || addUdpConnection.call(this, key, remoteInfo)).push(data); - }); - - const options = this.getReceiverOptions(); - this.logger.debug(`starting listen using following options ${JSON.stringify(options)}`); - this._connections = {}; - this._socket.bind(options); + this._socket.on('message', this._handleConnection); + this._socket.bind(srvOpts); this._cleanupID = setInterval(() => { Object.keys(this._connections).forEach((key) => { @@ -314,9 +321,7 @@ class DualUDPService extends BaseNetworkService { if (this._services) { reject(new SocketServiceError('_services exists already!')); } else { - // should never happen - this._onFailCb = onFatalError; - this.ee.on('failed', this._onFailCb); + this._listenerOnFailed = this.ee.on('failed', onFatalError, { objectify: true }); this._services = ['udp4', 'udp6'].map((family) => { const service = new UDPService( @@ -353,8 +358,7 @@ class DualUDPService extends BaseNetworkService { if (!this._services) { resolve(); } else { - this.ee.removeListener('failed', this._onFailCb); - this._onFailCb = null; + this._listenerOnFailed.off(); this.ee.stopListeningTo(); promiseUtil.allSettled(this._services.map((srv) => srv.destroy())) @@ -371,32 +375,6 @@ class DualUDPService extends BaseNetworkService { /** * PRIVATE METHODS */ -/** - * Add connection to the list of opened connections - * - * @this TCPService - * @param {net.Socket} conn - connection to add - * @param {MessageStream} receiver - data receiver - */ -function addTcpConnection(conn, receiver) { - this.logger.verbose(`new connection - "${conn.remoteAddress}" port "${conn.remotePort}"`); - this._connections.push([conn, receiver]); -} - -/** - * Add connection to the list of opened connections - * - * @this UDPService - * @param {string} connKey - connection to add - * @param {ConnInfo} connInfo - connection info - * - * @returns {MessageStream} data receiver - */ -function addUdpConnection(connKey, connInfo) { - this.logger.verbose(`new connection - "${connInfo.address}" port "${connInfo.port}"`); - // eslint-disable-next-line no-return-assign - return this._connections[connKey] = this.callback(connInfo); -} /** * Close all opened client connections @@ -425,11 +403,53 @@ function closeAllUdpConnections() { .forEach((connKey) => removeUdpConnection.call(this, connKey)); this._connections = {}; } +/** + * Add connection to the list of opened connections + * + * @this TCPService + * + * @param {net.Socket} conn - connection to add + */ +function handleTcpConnection(conn) { + var receiver = this.callback({ + address: conn.remoteAddress, + family: conn.remoteFamily, + port: conn.remotePort + }); + this.logger.verbose(`new connection - "${conn.remoteAddress}" port "${conn.remotePort}"`); + this._connections.push([conn, receiver]); + + conn.on('data', receiver.push.bind(receiver)) + .on('error', () => conn.destroy()) // destroy emits 'close' event + .on('close', () => removeTcpConnection.call(this, conn)) + .on('end', () => {}); // allowHalfOpen is false, no need to call 'end' explicitly +} + +/** + * Add connection to the list of opened connections + * + * @this UDPService + * + * @param {Buffer} data - data to process + * @param {object} remoteInfo - connection info + */ +function handleUdpConnection(data, remoteInfo) { + var key = `${remoteInfo.address}-${remoteInfo.port}`; + var stream = this._connections[key]; + + if (stream === undefined) { + stream = this._connections[key] = this.callback(remoteInfo); + this.logger.verbose(`new connection - "${remoteInfo.address}" port "${remoteInfo.port}"`); + } + + stream.push(data); +} /** * Remove connection from the list of opened connections * * @this TCPService + * * @param {net.Socket} conn - connection to remove */ function removeTcpConnection(conn) { @@ -450,6 +470,7 @@ function removeTcpConnection(conn) { * Remove connection from the list of opened connections * * @this UDPService + * * @param {string} connKey - unique connection key */ function removeUdpConnection(connKey) { diff --git a/src/lib/eventListener/parser.js b/src/lib/eventListener/parser.js index cd7ed063..187b3167 100644 --- a/src/lib/eventListener/parser.js +++ b/src/lib/eventListener/parser.js @@ -17,7 +17,8 @@ 'use strict'; /* eslint-disable no-continue, no-multi-assign, no-plusplus, no-unused-expressions */ -/* eslint-disable no-use-before-define, no-var, vars-on-top */ +/* eslint-disable no-use-before-define, no-var, vars-on-top, no-bitwise */ +/* eslint-disable no-nested-ternary, no-cond-assign, no-return-assign */ const assignDefaults = require('../utils/misc').assignDefaults; const constants = require('../constants').EVENT_LISTENER; @@ -26,24 +27,32 @@ const CircularArray = require('../utils/structures').CircularArray; /** @module eventListener/parser */ +/** + * TODO: perf tests + * - Linked List vs Array + * - Slow Buffer vs Buffer + */ + /** * DEV NOTES: * - * THIS IS THE CORE OF EVENT LISTNER MODULE BE CAREFUL WITH CHANGING/UPDATING IT + * THIS IS THE CORE OF EVENT LISTNER MODULE. BE CAREFUL WITH CHANGING/UPDATING IT * EVEN A SMALL CHANGE MAY RESULT IN SIGNIFICANT SLOWDOWN DUE V8 NATURE * - * - buffer is more performant than string + * - original naive implementation takes about 4+ seconds to parse 145MB + * + * - buffer is more performant than string (Apple M1 Pro) * - buffer may result in external memory grow/fragmentation if held for too long * - Parser's perf stats: - * - v4.8.0 - 1.6sec, - 91 MByte/s - * - v8.11.1 - 14.x - 750ms, - 194 MByte/s - * - v16 - 21.x - 550ms - 264 MByte/s - * - string is slower (x1.5-2 times) but no external memory grow/fragmentation + * - v4.8.0 - 1.5sec, - 100 MByte/s + * - v8.11.1 - 14.x - 600, - 245 MByte/s + * - v16 - 21.x - 441ms - 334 MByte/s + * - string is slower (x1.5-2 times) but no external memory grow/fragmentation (Apple M1 Pro) * - Parser's perf stats: - * - v4.8.0 - 1.6sec, - 91 MByte/s - * - v8.11.1 - 1.6sec, - 91 MByte/s - * - v12.x - 14.x - 121 MByte/s - * - v16 - 21.x - 550ms - 182 MByte/s + * - v4.8.0 - 1.5sec, - 96 MByte/s + * - v8.11.1 - 1.1sec, - 132 MByte/s + * - v12.x - 14.x - 173 MByte/s + * - v16 - 21.x - 441ms - 334 MByte/s * - V8 optimizations: * - monomorphic structures * - instances re-use @@ -51,29 +60,89 @@ const CircularArray = require('../utils/structures').CircularArray; * - pre-compute if possible * - less `this` (??) * + * `=,` search and `$F5` check adds +0.2-0.3 sec. but it saves more time + * down the pipeline + * * Update this file/code only in case of bug or beter solution/optiimzation found. * Run benchmark(s) and see opt/deopt logs by using following node.js flags: * --turbo_profiling --print_deopt_stress --code_comments --trace_opt --trace_deopt - */ - -/** - * Character codes * - * @type {{string: integer}} + * Even small function may help to gain perf - try to move code to funcs: + * smaller funcs are easier to optimize */ -const CC_BS = '\\'.charCodeAt(0); -const CC_CR = '\r'.charCodeAt(0); -const CC_DQ = '"'.charCodeAt(0); -const CC_EM = '\0'.charCodeAt(0); -const CC_NL = '\n'.charCodeAt(0); -const CC_SQ = '\''.charCodeAt(0); + +// Character codes +var CC_BS = '\\'.charCodeAt(0); +var CC_CM = ','.charCodeAt(0); +var CC_CR = '\r'.charCodeAt(0); +var CC_DQ = '"'.charCodeAt(0); +var CC_EM = '\0'.charCodeAt(0); +var CC_EQ = '='.charCodeAt(0); +var CC_NL = '\n'.charCodeAt(0); +var CC_SQ = '\''.charCodeAt(0); + +// ASCII len === UTF-8 len +var F5_FCAT_LEN = Buffer.from('$F5TelemetryEventCategory').length; // pre-compute for `if` statement down below +var F5_SCAT_DS = '$'.charCodeAt(0); +var F5_SCAT_LF = 'F'.charCodeAt(0); +var F5_SCAT_NOT_FOUND = 0b001; // 1 - nothing found yet +var F5_SCAT_FOUND = 0b000; // 0 - $F found +var F5_ECAT_DEFAULT_STATE_IDX = 2; +var F5_ECAT_STATE_IDX = 0; +var F5_ECAT_OFFSET_IDX = 1; + +// KV Pairs states and variables +var KV_DISABLED = 0b0000; // 0b0000 - 0 - feature disabled +var KV_FULL = 0b0001; // 0b0001 - 1 - number of pairs exceeded the limit +var KV_NOT_FOUND = 0b0100; // 0b0100 - 4 - next pair not found +var KV_FOUND = 0b0110; // 0b0110 - 6 - `=` found +var KV_INVALID_SEQ = 0b1000; // 0b1000 - 8 - invalid sequence + +var KV_DEFAULT_STATE_IDX = 0; // item at index stores default state +var KV_STATE_IDX = 1; // item at index stores current state +var KV_OFFSET_IDX = 2; // item at index stores current offset +var KV_MAX_OFFSET_IDX = 8; // item at index stores max allowed offset +var KV_RESERVED_IDX = 3; // item at index stores number of revserved elems + +// max unsiged 16 bit int +var MAX_UINT16 = 64 * 1024; + +// parser flag features +var FEAT_NONE = 0b000; // 0 - no features +var FEAT_F5_EVT_CAT = 0b001; // 1 - $F scan +var FEAT_KV_PAIRS = 0b010; // 2 - key-value pairs scan +var FEAT_ALL = FEAT_NONE + | FEAT_F5_EVT_CAT + | FEAT_KV_PAIRS; + +// NOTE: everything above is ASCII -> no issues with UTF-8 /** * Parser Class * * Parses messages separated by new line chars. + * Also does non-strict matching for $F5TelemetryEventCategory keyword. + * Also does colloecting info about `=` and `,` symbols outside of quotes. * - * NOTE: data may contain multiple events separated by newline + * NOTE: + * Parser uses only ASCII chars to search for data, so any encoding + * with first 127-ASCII chars should work. + * ASCII strings (1-bytes) are faster than UTF-8 strings (2+ bytes) + * + * NOTE: + * due the way how node.js encode/decode UTF-8 I was not able to + * find a reliable way to calculate number of UxFFFD substitutions + * for invalid bytes. As result offsets for `=` and etc. in UTF-8 + * strings may be incorrect. + * - use `buffer` mode or `ascii` or `binary` encoding for strings + * - update Pointer class with appropriate logic for UxFFFD calculations + * + * there is no sense to collect any info about offsets, because it may change + * after conversion from buffer to string (requires to calculate + * all UTF-8 bytes and check for errors) + * + * NOTE: + * data may contain multiple events separated by newline * however newline chars may also show up inside a given event * so split only on newline with preceding double quote. * Expected behavior is that every channel (TCP connection) @@ -86,10 +155,12 @@ const CC_SQ = '\''.charCodeAt(0); */ class Parser { /** - * @param {function(Buffer[]|string[])} callback - callback + * @param {OnLineFoundCb} callback - callback * @param {object} [options] - options * @param {integer} [options.bufferPrealloc = PARSER_PREALLOC] - number of buffer's items to preallocate * @param {integer} [options.bufferSize = PARSER_MAX_MSG_SIZE + 1] - number of max buffer's items + * @param {integer} [options.features = FEAT_ALL] - processing features, by default all enabled + * @param {integer} [options.maxKVPairs = PARSER_MAX_KV_PAIRS] - max number of key=value pairs per message * @param {integer} [options.maxSize = PARSER_MAX_MSG_SIZE] - max message size (bytes (buffer) or chars (string)) * @param {'buffer' | 'string'} [options.mode = 'buffer'] - processing mode */ @@ -101,6 +172,8 @@ class Parser { * in case when input Buffers has 1 char/byte only */ bufferSize: constants.PARSER_MAX_MSG_SIZE + 1, + features: FEAT_ALL, + maxKVPairs: constants.PARSER_MAX_KV_PAIRS, maxSize: constants.PARSER_MAX_MSG_SIZE, mode: constants.PARSER_MODE }); @@ -109,6 +182,12 @@ class Parser { /** read-only static properties */ Object.defineProperties(this, { + features: { + value: options.features + }, + maxKVPairs: { + value: options.maxKVPairs + }, maxSize: { value: options.maxSize }, @@ -124,7 +203,7 @@ class Parser { size: options.bufferSize }); this._cb = callback; - this._state = new State(pointerCls); + this._state = new State(pointerCls, this); } /** @returns {integer} number of pending buffers */ @@ -132,13 +211,21 @@ class Parser { return this._buffers.length; } - /** - * @returns {integer} size of pending data in bytes - */ + /** @returns {integer} size of pending data in bytes */ get bytes() { return this._bytes; } + /** @returns {boolean} true if $F scan enabled */ + get featF5EvtCategory() { + return !!(this.features & FEAT_F5_EVT_CAT); + } + + /** @returns {boolean} true if key=value scan enabled */ + get featKVPairs() { + return !!((this.features & FEAT_KV_PAIRS) && this.maxKVPairs); + } + /** @returns {number} number of free buffers */ get freeBuffers() { return this._buffers.size - this._buffers.length; @@ -151,6 +238,7 @@ class Parser { return this._length; } + /** Erase all data, can not be used after that anymore */ erase() { this._buffers.erase({ size: 1 }); this._state.erase(); @@ -220,20 +308,53 @@ class Parser { */ class State { /** @param {Object} PointerCls - class to use to create pointers */ - constructor(PointerCls) { + constructor(PointerCls, parser) { this.backSlash = false; + + // Uint16Array allows to store indexes up to 65535 + // Uint32Array allows to store indexes for long lines + + this.eventCategory = new ( + parser.maxSize <= MAX_UINT16 ? Uint16Array : Uint32Array + )(3); + this.eventCategory[F5_ECAT_OFFSET_IDX] = 0; + this.eventCategory[F5_ECAT_DEFAULT_STATE_IDX] = parser.featF5EvtCategory ? F5_SCAT_NOT_FOUND : F5_SCAT_FOUND; + this.eventCategory[F5_ECAT_STATE_IDX] = this.eventCategory[F5_ECAT_DEFAULT_STATE_IDX]; + + // - buffer to store positions of special symbols that + // speed up data processing later down the pipeline + // - uint16 for small buffers + // - uint32 for large buffers to be able to store values 2^16 ++ + // - mult. by 2 because `=` and `,` need to be stored for each pair + var maxKVSize = (parser.featKVPairs ? (parser.maxKVPairs * 2) : 0) + 9; + this.kvSymbols = new this.eventCategory.constructor(maxKVSize); + this.kvSymbols[KV_RESERVED_IDX] = this.kvSymbols[KV_OFFSET_IDX] = 8; // number reserved cells and start index + this.kvSymbols[KV_DEFAULT_STATE_IDX] = parser.featKVPairs ? KV_NOT_FOUND : KV_DISABLED; + this.kvSymbols[KV_STATE_IDX] = this.kvSymbols[KV_DEFAULT_STATE_IDX]; // current state + this.kvSymbols[KV_MAX_OFFSET_IDX] = maxKVSize - 1; + this.kvSymbols[KV_NOT_FOUND] = CC_EQ; // 4 - searcing for `=` + this.kvSymbols[KV_NOT_FOUND + 1] = KV_FOUND; // 5 - `=` found - next state 6 + this.kvSymbols[KV_FOUND] = CC_CM; // 6 - searching for `,` + this.kvSymbols[KV_FOUND + 1] = KV_NOT_FOUND; // 7 - `=` found - next state 4 + this.pointerCls = PointerCls; this.prevChar = CC_EM; + /** Create all pointers with stub buffer obj - helps V8 to optimize */ this.pLeft = new PointerCls(PointerCls.BUFFER_STUB, 0); this.pNewLine = new PointerCls(PointerCls.BUFFER_STUB, 0); this.pQuote = new PointerCls(PointerCls.BUFFER_STUB, 0); this.pRight = new PointerCls(PointerCls.BUFFER_STUB, 0); + this.pValidNewLine = new PointerCls(PointerCls.BUFFER_STUB, 0); } /** @param {Parser} parser */ erase() { this.backSlash = false; + this.eventCategory[F5_ECAT_STATE_IDX] = this.eventCategory[F5_ECAT_DEFAULT_STATE_IDX]; + this.eventCategory[F5_ECAT_OFFSET_IDX] = 0; + this.kvSymbols[KV_OFFSET_IDX] = this.kvSymbols[KV_RESERVED_IDX]; + this.kvSymbols[KV_STATE_IDX] = this.kvSymbols[KV_DEFAULT_STATE_IDX]; this.prevChar = CC_EM; /** * - create all pointers with stub buffer obj - helps V8 to optimize @@ -246,6 +367,7 @@ class State { this.pNewLine.init(this.pointerCls.BUFFER_STUB, 0); this.pQuote.init(this.pointerCls.BUFFER_STUB, 0); this.pRight.init(this.pointerCls.BUFFER_STUB, 0); + this.pValidNewLine.init(this.pointerCls.BUFFER_STUB, 0); } refresh() { @@ -253,6 +375,7 @@ class State { this.pNewLine.refresh(); this.pQuote.refresh(); this.pRight.refresh(); + this.pValidNewLine.refresh(); } } @@ -428,19 +551,26 @@ StringPointer.BUFFER_STUB = new CircularArray({ */ function splitByLines(timeLimit) { // local vars a faster than property access/lookup - var backSlash = this._state.backSlash; - var prevChar = this._state.prevChar; - var pNewLine = this._state.pNewLine; - var pQuote = this._state.pQuote; - var pLeft = this._state.pLeft; - var pRight = this._state.pRight; + var state = this._state; + var backSlash = state.backSlash; + var eventCategory = state.eventCategory; + var kvSymbols = state.kvSymbols; + var prevChar = state.prevChar; + var pNewLine = state.pNewLine; + var pQuote = state.pQuote; + var pLeft = state.pLeft; + var pRight = state.pRight; + var pValidNewLine = state.pValidNewLine; var char = CC_EM; - var qchar = pQuote.isFree ? char : pQuote.value(); + var ecState = eventCategory[F5_ECAT_STATE_IDX]; var forceSplit = false; var iterNo = 0; + var kvState = kvSymbols[KV_STATE_IDX]; var maxItersBeforeTimeCheck = constants.PARSER_MAX_ITERS_PER_CHECK; var maxMsgSize = this.maxSize; + var qchar = pQuote.isFree ? char : pQuote.value(); + // pre-compute to save CPU cycles (pre-optimization) var maxTimeTs = hrtimestamp() + timeLimit; @@ -453,54 +583,68 @@ function splitByLines(timeLimit) { pLeft.inc(); } - while (pRight.inc()) { - iterNo++; - char = pRight.value(); - - if (char === CC_BS) { - backSlash = !backSlash; - if (backSlash) { - continue; - } - } else if (char === CC_DQ || char === CC_SQ) { - // igore escaped quotes - if (!backSlash) { - if (pQuote.isFree) { - // reset value, this new line is invalid now (before quote starts, from prev message probably) - pNewLine.isFree = true; - // quote opened, reuse pointer - pRight.copy(pQuote); - qchar = char; - } else if (char === qchar) { - // reset value, this new line is invalid now (between quotes) - // quote closed - pQuote.isFree = pNewLine.isFree = true; + while (!pRight.endOfData()) { + while (pRight.msgOffset < maxMsgSize && pRight.inc()) { + iterNo++; + char = pRight.value(); + if (char === CC_BS) { + prevChar = char; + if ((backSlash = !backSlash)) { + // - go to next char if leading `\` + continue; } - } - } else if (char === CC_NL) { - if (pNewLine.isFree) { - // remember position of new line (it might be closest to the open quote) + } else if (char === CC_DQ || char === CC_SQ) { + // igore escaped quotes + if (!backSlash) { + if (pQuote.isFree) { + // reset value, this new line is invalid now (before quote starts, from prev message probably) + pValidNewLine.isFree = true; + // quote opened, reuse pointer + pRight.copy(pQuote); + qchar = char; + } else if (char === qchar) { + // reset value, this new line is invalid now (between quotes) + // quote closed + pQuote.isFree = pValidNewLine.isFree = true; + } + } + } else if (char === CC_NL) { + // most recent new line char pRight.copy(pNewLine); - // point to \r if exist prevChar === CC_CR && pNewLine.dec(); + + if (pValidNewLine.isFree) { + // remember position of new line char (it might be closest to the open quote or to the start) + pNewLine.copy(pValidNewLine); + } + if (pQuote.isFree) { + forceSplit = true; + break; + } + } else if (kvState & KV_NOT_FOUND && pQuote.isFree && prevChar !== CC_CM + && (char === CC_EQ || char === CC_CM) + ) { + kvState = checkKeyValuePair(kvSymbols, char, pRight.msgOffset); + } else if (ecState & F5_SCAT_NOT_FOUND && prevChar === F5_SCAT_DS && char === F5_SCAT_LF) { + ecState = eventCategory[F5_ECAT_STATE_IDX] = F5_SCAT_FOUND; + eventCategory[F5_ECAT_OFFSET_IDX] = pRight.msgOffset - 1; } - if (pQuote.isFree) { - // regular split - forceSplit = true; - } + + backSlash = false; + prevChar = char; } - backSlash = false; - prevChar = char; + if (forceSplit || pRight.msgOffset === maxMsgSize) { + extractLine.call(this, pLeft, pRight, pQuote, kvSymbols, eventCategory); - if (!forceSplit && pRight.msgOffset >= maxMsgSize) { - forceSplit = true; - } - if (forceSplit) { - extractLine.call(this, pLeft, pRight, pQuote, pNewLine); - prevChar = CC_EM; + // reset state before next iteration + backSlash = false; + ecState = eventCategory[F5_ECAT_STATE_IDX] = eventCategory[F5_ECAT_DEFAULT_STATE_IDX]; forceSplit = false; + kvState = kvSymbols[KV_STATE_IDX] = kvSymbols[KV_DEFAULT_STATE_IDX]; + kvSymbols[KV_OFFSET_IDX] = kvSymbols[KV_RESERVED_IDX]; + prevChar = CC_EM; } // compare SMI (small integers) is faster than '%' operation if (iterNo > maxItersBeforeTimeCheck) { @@ -511,8 +655,9 @@ function splitByLines(timeLimit) { } } - this._state.backSlash = backSlash; - this._state.prevChar = prevChar; + // save all state's data + state.backSlash = backSlash; + state.prevChar = prevChar; } /** @@ -523,11 +668,17 @@ function splitByLines(timeLimit) { * @param {Pointer} pLeft * @param {Pointer} pRight * @param {Pointer} pQuote - * @param {Pointer} pNewLine + * @param {integer} kvOffset + * @param {integer} kvInvalidOffset + * @param {integer} evtCatOffset */ -function extractLine(pLeft, pRight, pQuote, pNewLine) { - var pRightNewLine = false; +function extractLine(pLeft, pRight, pQuote, kvSymbols, eventCategory) { + var pNewLine = this._state.pNewLine; + var pValidNewLine = this._state.pValidNewLine; + var forceSplit = true; + var maxMsgSize = this.maxSize; + var pRightNewLine = false; /** * Split reasons: @@ -535,32 +686,44 @@ function extractLine(pLeft, pRight, pQuote, pNewLine) { * - msg is too long and no new line * - opened quote is too long */ - if (!pQuote.isFree) { + if (pQuote.isFree === false) { // malformed quote - too long // split by new line char closest to quote // or split by open quote - if (pNewLine.isFree) { - pQuote.copy(pRight); - } else { + if (pValidNewLine.isFree === false) { + pRightNewLine = true; + pValidNewLine.copy(pRight); + } else if (pNewLine.isFree === false) { pRightNewLine = true; pNewLine.copy(pRight); + } else { + pQuote.copy(pRight); } - } else if (!pNewLine.isFree) { + } else if (pValidNewLine.isFree === false) { // points to \r or \n - if (pNewLine.msgOffset > 1) { + if (pValidNewLine.msgOffset > 1) { // msg has atleast 1 char pRightNewLine = true; - pNewLine.copy(pRight); + pValidNewLine.copy(pRight); } else { // \n\n or \r\n\r\n or similar forceSplit = false; } + } else if (pRight.msgOffset >= maxMsgSize && pNewLine.isFree === false) { + // - points to \r or \n + // pNewLine.msgOffset for sure > 1 + pRightNewLine = true; + pNewLine.copy(pRight); } // else message is too long - split by pRight if (forceSplit) { // ignore new line char \r\n or \n pRightNewLine && pRight.dec(); - this._cb(slicer(pLeft.cArr, pLeft, pRight)); + this._cb( + slicer(pLeft, pRight), + getKVPairsOffsets(kvSymbols, pRight.msgOffset), + getEventCategoryOffset(eventCategory, pRight.msgOffset) + ); } pRightNewLine && pRight.inc() @@ -570,7 +733,7 @@ function extractLine(pLeft, pRight, pQuote, pNewLine) { pRight.copy(pLeft); // move to start of next msg pLeft.inc(); - pQuote.isFree = pNewLine.isFree = true; + pQuote.isFree = pValidNewLine.isFree = pNewLine.isFree = true; } /** @@ -601,14 +764,17 @@ function extractLine(pLeft, pRight, pQuote, pNewLine) { * - third item is amount of time in ns. spent to parse data and do cleanup */ function splitLines(timeLimit, flush) { - var pLeft = this._state.pLeft; - var pRight = this._state.pRight; + var parseTime; + var state = this._state; + var pLeft = state.pLeft; + var pRight = state.pRight; + // do -1 to start time just to show the data was processed (e.g delta will be 1) var startTs = hrtimestamp() - 1; splitByLines.call(this, timeLimit); - var parseTime = hrtimestamp() - startTs; + parseTime = hrtimestamp() - startTs; if (pRight.endOfData()) { // all data read @@ -622,7 +788,11 @@ function splitLines(timeLimit, flush) { // all valid new lines were processed already, no chance there is an empty message // no need to check length - this._cb(slicer(pLeft.cArr, pLeft, pRight)); + this._cb( + slicer(pLeft, pRight), + getKVPairsOffsets(state.kvSymbols, pRight.msgOffset), + getEventCategoryOffset(state.eventCategory, pRight.msgOffset) + ); pRight.isFree = true; } } @@ -646,13 +816,12 @@ function splitLines(timeLimit, flush) { /** * Make a slice * - * @param {CircularArray} cArr * @param {Pointer} pleft * @param {Pointer} pright * * @returns {Buffer[] | string[]} */ -function slicer(cArr, pleft, pright) { +function slicer(pleft, pright) { /** * General assumption: 1 buffer (string too) may contain a lot of messages, node:buffer.slice() * shares memory with parent buffer - less fragmentation. To avoid growing @@ -660,7 +829,7 @@ function slicer(cArr, pleft, pright) { */ return pleft.cArrIdx === pright.cArrIdx ? singleBuffer(pleft, pright) - : bufferChain(cArr, pleft, pright); + : bufferChain(pleft.cArr, pleft, pright); } /** @return {Buffer[] | string[]} */ @@ -725,4 +894,111 @@ function freeNodes(nodes) { this._length -= length; } +/** + * Filter out symbols that execeeding `maxPos` + * + * @param {Uint16Array | Uint32Array} symbols + * @param {integer} start + * @param {integer} end + * @param {integer} maxPos + * + * @returns {interger} last index to include to result array + */ +function filterSymbols(symbols, start, end, maxPos) { + // simple binary search + var mid = 0; + while (start <= end) { + mid = ((start + end) / 2) >> 0; + if (symbols[mid] < maxPos) { + start = mid + 1; + } else if (symbols[mid] > maxPos) { + end = mid - 1; + } else { + return mid; + } + } + return start - 1; +} + +/** + * Process key-value data + * + * @param {Uint16Array | Uint32Array} kvSymbols + * @param {integer} char + * @param {integer} offset + * + * @returns {integer} next state + */ +function checkKeyValuePair(kvSymbols, char, offset) { + // detects pairs of key=value - scanning for unquoted `=` and `,` in correct order + // out of order sequences result in KV_DONE + var kvState = kvSymbols[KV_STATE_IDX]; + var kvIndex = kvSymbols[KV_OFFSET_IDX]; + + // compare to target char and do not allow: + // - `=` be first char in message + // - `,` be in front of `,` or `=` + kvState = kvSymbols[kvState] === char && offset !== 1 + ? kvSymbols[kvState + 1] // success - next state + : KV_INVALID_SEQ; // fail + + // record offset even if invalid + kvSymbols[(kvSymbols[KV_OFFSET_IDX] = ++kvIndex)] = offset - 1; // store 0-based offsets + (kvIndex === kvSymbols[KV_MAX_OFFSET_IDX]) && (kvState = KV_FULL); + + return kvSymbols[KV_STATE_IDX] = kvState; +} + +/** + * @param {Uint16Array | Uint32Array} kvSymbols + * @param {integer} maxOffset + * + * @returns {null | Uint16Array | Uint32Array} 0-based offsets if found else null + */ +function getKVPairsOffsets(kvSymbols, maxOffset) { + if (kvSymbols[KV_STATE_IDX] === KV_DISABLED) { + return null; + } + + var kvIndex = kvSymbols[KV_OFFSET_IDX]; // points to last added item + var kvStart = kvSymbols[KV_RESERVED_IDX]; + var isInvalid = kvSymbols[KV_STATE_IDX] === KV_INVALID_SEQ; + + if (kvIndex !== kvStart && kvSymbols[kvIndex] > maxOffset) { + kvIndex = filterSymbols(kvSymbols, kvStart + 1, kvIndex, maxOffset); + isInvalid = false; + } + + return (kvIndex === kvStart || isInvalid) + ? null + : kvSymbols.slice(kvStart + 1, kvIndex + 1); +} + +/** + * @param {Uint16Array | Uint32Array} eventCategory + * @param {integer} maxOffset + * + * @returns {integer} 1-base offset if found else 0 + */ +function getEventCategoryOffset(eventCategory, maxOffset) { + return (eventCategory[F5_ECAT_STATE_IDX] === F5_SCAT_FOUND + && (eventCategory[F5_ECAT_OFFSET_IDX] + F5_FCAT_LEN) <= maxOffset // $F5TelemetryEventCategory= is ok + ) + ? eventCategory[F5_ECAT_OFFSET_IDX] // 1-base offset (receiver should do -1 to get actual position) + : 0; // no match +} + module.exports = Parser; +module.exports.FEAT_ALL = FEAT_ALL; +module.exports.FEAT_F5_EVT_CAT = FEAT_F5_EVT_CAT; +module.exports.FEAT_KV_PAIRS = FEAT_KV_PAIRS; +module.exports.FEAT_NONE = FEAT_NONE; + +/** + * @callback OnLineFoundCb + * @param {Buffer[] | String[]} chunks - data chunks to build a line + * @param {null | Uint16Array | Uint32Array} mayHaveKeyVals - 0-based offsets for unquoted `=` and `,`. + * Even index for `=`, odd index for `,`. + * @param {integer} mayHaveEvtCat - 1-base offset if `$F` found else 0 + * (non-strict check done) + */ diff --git a/src/lib/eventListener/streamService.js b/src/lib/eventListener/streamService.js index 4066cb6a..c8aa5ff0 100644 --- a/src/lib/eventListener/streamService.js +++ b/src/lib/eventListener/streamService.js @@ -93,7 +93,10 @@ class StreamService extends Service { this._streamID = 0; const streamCb = () => { - const parser = new Parser(this._onMessageCb, { mode: this._parsingMode }); + const parser = new Parser(this._onMessageCb, { + features: Parser.FEAT_NONE, + mode: this._parsingMode + }); const stream = new Stream(parser, { strategy: this._bufferingStrategy }); if (!this._dataFlowEnabled) { stream.disableIngress(); diff --git a/src/lib/logger.js b/src/lib/logger.js index 477be3c7..eb018b5c 100644 --- a/src/lib/logger.js +++ b/src/lib/logger.js @@ -124,6 +124,23 @@ class Logger { this.prefix = prefix; } + /** + * @param {string | number} aLevel - desired level + * + * @returns {boolean} true if log message will be logged under current logging level + */ + isLevelAllowed(aLevel) { + if (typeof aLevel === 'string') { + aLevel = this.getLevel(aLevel.toLowerCase()); + } else if (typeof aLevel !== 'number') { + aLevel = undefined; + } + + return aLevel === undefined + ? false + : aLevel >= this.getLevel(); + } + /** * Get child logger * diff --git a/src/lib/resourceMonitor/index.js b/src/lib/resourceMonitor/index.js new file mode 100644 index 00000000..c6f7653d --- /dev/null +++ b/src/lib/resourceMonitor/index.js @@ -0,0 +1,457 @@ +/* + * Copyright 2022. F5 Networks, Inc. See End User License Agreement ("EULA") for + * license terms. Notwithstanding anything to the contrary in the EULA, Licensee + * may copy and modify this software product for its internal business purposes. + * Further, Licensee may upload, publish and distribute the modified version of + * the software product on devcentral.f5.com. + */ + +'use strict'; + +/* eslint-disable no-unused-expressions, no-nested-ternary, prefer-template */ +/* eslint-disable no-use-before-define */ + +const APP_THRESHOLDS = require('../constants').APP_THRESHOLDS; +const configUtil = require('../utils/config'); +const logger = require('../logger').getChild('resourceMonitor'); +const miscUtil = require('../utils/misc'); +const rmUtil = require('./utils'); + +const MemoryMonitor = require('./memoryMonitor'); +const Service = require('../utils/service'); + +/** @module resourceMonitor */ + +class ServiceError extends Error {} + +const MEM_MON_STOP_EVT = 'memoryMonitorStop'; + +/** + * Resource Monitor Class + * + * @property {logger.Logger} logger + */ +class ResourceMonitor extends Service { + constructor() { + super(); + + /** define static read-only props that should not be overriden */ + Object.defineProperties(this, { + logger: { + value: logger.getChild(this.constructor.name) + } + }); + + this._memoryMonitorState = { + config: {}, + enabled: false, + instance: null, + logging: { + freq: APP_THRESHOLDS.MEMORY.DEFAULT_LOG_FREQ, // logging requence (in ms.) + lastMessage: 0, // last logged message timestamp (in ms.) + level: APP_THRESHOLDS.MEMORY.DEFAULT_LOG_LEVEL + }, + recentUsage: null + }; + this.restartsEnabled = true; + } + + /** @returns {boolean} true if Memory Monitor is running */ + get isMemoryMonitorActive() { + const memMonitor = this._memoryMonitorState.instance; + return !!memMonitor && memMonitor.isRunning(); + } + + /** @returns {MemoryMontorLiveConfig} Memory Monitor config */ + get memoryMonitorConfig() { + return { + config: this._memoryMonitorState.config, + enabled: this._memoryMonitorState.enabled, + logging: this._memoryMonitorState.logging + }; + } + + /** @returns {memoryMonitor.MemoryCheckStatus} most recent data about memory usage */ + get memoryState() { + // making a copy ensures that any other piece of code to be able to modify this data + return miscUtil.deepCopy(this._memoryMonitorState.recentUsage); + } + + /** + * Configure and start the service + * + * @param {function} onFatalError - function to call on fatal error to restart the service + */ + _onStart() { + return new Promise((resolve, reject) => { + const memState = this._memoryMonitorState; + if (memState.instance) { + reject(new ServiceError('_memoryMonitorState.instance exists already!')); + } else if (memState.enabled) { + this.logger.verbose('_onStart: Memory Monitor enabled, starting...'); + memState.instance = new MemoryMonitor( + memoryMonitorCb.bind(this), + Object.assign(miscUtil.deepCopy(memState.config), { + logger: this.logger.getChild('Memory Monitor') + }) + ); + memState.instance.start() + .then(resolve, reject); + } else { + memState.recentUsage = null; + this.logger.verbose('_onStart: Memory Monitor disabled!'); + resolve(); + } + }); + } + + /** + * Stop the service + * + * @param {boolean} [restart] - true if service going to be restarted + */ + _onStop(restart) { + return new Promise((resolve, reject) => { + const memState = this._memoryMonitorState; + if (memState.instance) { + this.logger.verbose( + '_onStop: ' + + ((restart && memState.enabled) + ? 'Restarting Memory Monitor to apply configuration.' + : 'Stopping Memory Monitor.') + ); + memState.instance.stop() + .then(() => { + memState.instance = null; + if (!memState.enabled) { + memState.recentUsage = null; + this.ee.safeEmit(MEM_MON_STOP_EVT); + } + }) + .then(resolve, reject); + } else { + resolve(); + } + }); + } + + /** @returns {Promise} resolved with true when service destroyed or if it was destroyed already */ + destroy() { + // disabled Memory Monitor to emit `MEM_MON_STOP_EVT` later + this._memoryMonitorState.enabled = false; + this._offConfigUpdates + && this._offConfigUpdates.off() + && (this._offConfigUpdates = null); + + return super.destroy() + .then(() => { + // all listeners notified already, safe to remove + this.ee.removeAllListeners(APP_THRESHOLDS.MEMORY.STATE.NOT_OK); + this.ee.removeAllListeners(APP_THRESHOLDS.MEMORY.STATE.OK); + this.ee.removeAllListeners(MEM_MON_STOP_EVT); + this.logger.info('Destroyed! Data processing enabled!'); + }); + } + + /** @param {restWorker.ApplicationContext} appCtx - application context */ + initialize(appCtx) { + if (appCtx.configMgr) { + this._offConfigUpdates = appCtx.configMgr.on('change', onConfigEvent.bind(this), { objectify: true }); + this.logger.debug('Subscribed to configuration updates.'); + } else { + this.logger.warning('Unable to subscribe to configuration updates!'); + } + } + + /** + * @param {function} onEnable + * @param {function} onDisable + * + * @returns {ProcessingState} instance + */ + initializePState(onEnable, onDisable) { + return (new ProcessingState(this)).initialize(onEnable, onDisable); + } + + /** @return {boolean} true if processing allowed by most recent memory status check */ + isProcessingEnabled() { + const recentUsage = this._memoryMonitorState.recentUsage; + return recentUsage + ? recentUsage.thresholdStatus === APP_THRESHOLDS.MEMORY.STATE.OK + : true; + } +} + +/** Processing State Class */ +class ProcessingState { + /** @param {ResourceMonitor} resourceMonitor */ + constructor(resourceMonitor) { + this._enabled = true; + this._listeners = []; + this._onDisable = null; + this._onEnable = null; + this._resMonitor = resourceMonitor; + } + + /** @returns {boolean} if processing allowed to continue */ + get enabled() { + return this._enabled; + } + + /** @returns {memoryMonitor.MemoryCheckStatus} most recent data about memory usage */ + get memoryState() { + return this._resMonitor.memoryState; + } + + /** Destroy instance and unsubscribe from all events */ + destroy() { + this._enabled = true; + this._listeners.forEach((listener) => listener.off()); + this._listeners.length = 0; + this._resMonitor = null; + } + + /** + * @param {function} onEnable + * @param {function} onDisable + * + * @returns {ProcessingState} instance + */ + initialize(onEnable, onDisable) { + // save prev state + const isEnabled = this._enabled; + const resMonitor = this._resMonitor; + + this.destroy(); + + // restore prev state + this._enabled = isEnabled; + this._resMonitor = resMonitor; + + // assign callbacks and subscribe to events + this._onEnable = onEnable; + this._onDisable = onDisable; + + const updateEventCb = updateProcessingState.bind(this, true); + this._listeners = [ + APP_THRESHOLDS.MEMORY.STATE.NOT_OK, + APP_THRESHOLDS.MEMORY.STATE.OK, + MEM_MON_STOP_EVT + ].map((evt) => this._resMonitor.ee.on(evt, updateEventCb, { objectify: true })); + + updateProcessingState.call(this, false); + return this; + } +} + +/** + * @this ResourceMonitor + * + * @param {memoryMonitor.MemoryCheckStatus} checkStatus + */ +function memoryMonitorCb(checkStatus) { + const memState = this._memoryMonitorState; + if (memState.instance === null) { + return; + } + + const prevMemState = memState.recentUsage; + if (prevMemState && checkStatus.hrtimestamp < prevMemState.hrtimestamp) { + this.logger.warning(`Memory Monitor event is late by ${prevMemState.hrtimestamp - checkStatus.hrtimestamp}ns.!`); + return; + } + + const eventName = ( + !prevMemState + || prevMemState.thresholdStatus !== checkStatus.thresholdStatus + ) + ? checkStatus.thresholdStatus + : ''; + + memState.recentUsage = checkStatus; + if (eventName) { + this.ee.safeEmit(eventName, checkStatus); + } + + const logConfig = memState.logging; + + let logLevel = eventName + ? (checkStatus.thresholdStatus === APP_THRESHOLDS.MEMORY.STATE.NOT_OK ? 'warning' : 'info') + : logConfig.level; + + if (!eventName && logConfig.lastMessage && (Date.now() - logConfig.lastMessage) < logConfig.freq) { + logLevel = 'verbose'; + } else { + logConfig.lastMessage = Date.now(); + } + + if (this.logger.isLevelAllowed(logLevel)) { + const usageStr = Object.keys(checkStatus.usage) + .map((k) => `${k}=${rmUtil.formatFloat(checkStatus.usage[k], 2)}`) + .join(', '); + + this.logger[logLevel]( + `MEMORY_USAGE: (${usageStr}), TREND = ${checkStatus.trend}, STATUS = ${checkStatus.thresholdStatus}` + ); + } + + this.ee.safeEmit('memoryCheckStatus', checkStatus); +} + +/** + * Event handler for memore usage state updates + * + * @this ProcessingState + * + * @param {boolean} fireCallbacks - if true then callbacks will be fired + */ +function updateProcessingState(fireCallbacks) { + const prevEnabled = this._enabled; + this._enabled = this._resMonitor.isProcessingEnabled(); + + if (arguments.length === 1) { + // monitor stopped, re-enable all + !prevEnabled && this._onEnable && this._onEnable(); + } else if (fireCallbacks && (prevEnabled !== this._enabled)) { + // call all callbacks in same event loop + this.enabled + ? (this._onEnable && this._onEnable()) + : (this._onDisable && this._onDisable()); + } +} + +/** + * Create a Memory Monitor configuration + * + * @this ResourceMonitor + * + * @param {Configuration} config +*/ +function updateMemoryMonitorConfig(config) { + const controls = configUtil.getTelemetryControls(config); + const maxHeapSize = miscUtil.getRuntimeInfo().maxHeapSize; + const memState = this._memoryMonitorState; + + if (configUtil.hasEnabledComponents(config)) { + // there are should be `default` values inhereted from the schema anyway + // otherwise Memory Monitor defaults will be used. + const memoryMonitorConfig = controls.memoryMonitor || {}; + const freeMemoryLimit = memoryMonitorConfig.osFreeMemory; + const logLevel = memoryMonitorConfig.logLevel; + const logFreq = memoryMonitorConfig.logFrequency; + const memCheckInterval = memoryMonitorConfig.interval; + const memLimit = memoryMonitorConfig.provisionedMemory; + const memReleasePct = memoryMonitorConfig.thresholdReleasePercent; + const memThresholdPct = typeof memoryMonitorConfig.memoryThresholdPercent === 'undefined' + ? controls.memoryThresholdPercent + : memoryMonitorConfig.memoryThresholdPercent; + + memState.enabled = true; + + memState.config.freeMemoryLimit = (Number.isSafeInteger(freeMemoryLimit) && freeMemoryLimit > 0) + ? freeMemoryLimit + : APP_THRESHOLDS.MEMORY.DEFAULT_MIN_FREE_MEM; + + memState.config.provisioned = (Number.isSafeInteger(memLimit) && memLimit > 0) + ? memLimit + : maxHeapSize; + + if (memState.config.provisioned > maxHeapSize) { + this.logger.warning( + `Memory limit (${rmUtil.megabytesToStr(memState.config.provisioned)}) is equal or higher ` + + `than V8 max heap size (${rmUtil.megabytesToStr(maxHeapSize)}). Application may crash. ` + + `Please, adjust memory limit's value! Memory limit set to ${maxHeapSize} MB` + ); + memState.config.provisioned = maxHeapSize; + } + + memState.config.thresholdPercent = (Number.isSafeInteger(memThresholdPct) && memThresholdPct > 0) + ? memThresholdPct + : APP_THRESHOLDS.MEMORY.DEFAULT_LIMIT_PERCENT; + + memState.config.releasePercent = (Number.isSafeInteger(memReleasePct) && memReleasePct > 0) + ? memReleasePct + : APP_THRESHOLDS.MEMORY.DEFAULT_RELEASE_PERCENT; + + if (memState.config.thresholdPercent >= 100) { + this.logger.warning( + 'Disabling Memory Monitor due high threshold percent value ' + + `(${rmUtil.percentToStr(memState.config.thresholdPercent)}).` + ); + memState.enabled = false; + } + + memState.config.intervals = miscUtil.deepCopy((memCheckInterval === 'aggressive') + ? APP_THRESHOLDS.MEMORY.ARGRESSIVE_CHECK_INTERVALS + : APP_THRESHOLDS.MEMORY.DEFAULT_CHECK_INTERVALS); + + if (memCheckInterval === 'aggressive') { + this.logger.info('More frequent Memory Monior checks are enabled.'); + } + + if (typeof logLevel === 'string') { + memState.logging.level = logLevel; // should be validated by the declaration validator + } + if (Number.isSafeInteger(logFreq) && logFreq > 0) { + memState.logging.freq = logFreq * 1000; // convert to ms. + } + } else { + this.logger.info('No active components found in the declaration - no reason to start Memory Monitor.'); + memState.enabled = false; + } +} + +/** + * @this ResourceMonitor + * + * @param {Configuration} config + * + * @returns {Promise} resolved once config applied to the instance + */ +function onConfigEvent(config) { + return Promise.resolve() + .then(() => { + this.logger.verbose('Config "change" event'); + return setConfig.call(this, config); + }).catch((err) => { + this.logger.exception('Error caught on attempt to apply configuration to Resource Monitor:', err); + }); +} + +/** + * Upate Resource Monitor configuration + * + * @this ResourceMonitor + * + * @param {Configuration} config - configuration to apply + * + * @returns {Promise} resolved once configuration applied/updated + */ +function setConfig(config) { + let needRestart = false; + + updateMemoryMonitorConfig.call(this, config); + const memState = this._memoryMonitorState; + + if (memState.enabled) { + needRestart = true; + this.logger.info(`Memory Monitor will be restarted to apply new settings: ${JSON.stringify(memState.config)}.`); + } else if (memState.instance) { + this.logger.warning('Stopping Memory Monitor!'); + needRestart = true; + } + + // restart the service, if more sub-services added in future then this line should be + // upated to restart only updated instances + return Promise.resolve(needRestart && this.restart()); +} + +module.exports = ResourceMonitor; + +/** + * @typedef MemoryMontorLiveConfig + * @type {Object} + * @property {object} config - config + * @property {boolean} enabled - true if Memory Monitor enabled + * @property {object} logging - logging config + */ diff --git a/src/lib/resourceMonitor/memoryMonitor.js b/src/lib/resourceMonitor/memoryMonitor.js new file mode 100644 index 00000000..2683c099 --- /dev/null +++ b/src/lib/resourceMonitor/memoryMonitor.js @@ -0,0 +1,438 @@ +/* + * Copyright 2022. F5 Networks, Inc. See End User License Agreement ("EULA") for + * license terms. Notwithstanding anything to the contrary in the EULA, Licensee + * may copy and modify this software product for its internal business purposes. + * Further, Licensee may upload, publish and distribute the modified version of + * the software product on devcentral.f5.com. + */ + +'use strict'; + +/* eslint-disable no-bitwise, no-nested-ternary */ + +const APP_THRESHOLDS = require('../constants').APP_THRESHOLDS; +const hrtimestamp = require('../utils/datetime').hrtimestamp; +const logger = require('../logger').getChild('memoryMonitor'); +const miscUtil = require('../utils/misc'); +const rmUtil = require('./utils'); +const Service = require('../utils/service'); +const timers = require('../utils/timers'); + +/** @module resourceMonitor/memoryMonitor */ + +class ServiceError extends Error {} + +/** + * Memory Monitor Class + * + * NOTE: + * - due nature of implementation there is a chance that subscribers + * may receive `MemoryCheckStatus` objeects out-of-order. Subscribers + * are responsible to check `hrtimestamp`. + * - when utilization exceeded 100% then check interval will be 1sec. + * to avoid additional allocations + * - `thresholdPercent` is not applicable to `freeMemoryLimit`. Once + * `freeUtilizationPercent` exceeded 100% then processing will be disabled. + * + * NOTE: RSS/external `bloat` issue. TS heavily relies on buffers/arraybuffers + * and as result there are might be situations when RSS is above threshold and + * `external` memory is close to it too. Once traffic stopped following + * scenarious may occur: + * - RSS and `external` when back to normal. Traffic enabled. + * - RSS stil high, but `external` and `heap` are low. Should we enable traffic? + * - RSS is low, but `external` and/or `heap` are above the threshold. Should we enable traffic? + * - RSS, `external` and/or `heap` are high. Traffic disabled. + * + * It is not a memory leak, because with enabled GC usage counters drops back to normal. + * This might be due high memory fragmentation. Or node.js weird behavior. + * + * @property {number} freeMemoryLimit - OS free memory limit (in MB) + * @property {boolean} gcEnabled - true if GC enabled otherwise false + * @property {Logger} logger - logger + * @property {number} provisioned - max number of MB available for the process + * (can be configured via --max_old_space_size CLI option) + * @property {number} releaseThreshold - amount of memory to release threshold lock (in MB) + * @property {number} releasePercent - amount of memory to release threshold lock (in %) + * @property {number} threshold - V8's RSS usage threshold (in MB) + * @property {number} thresholdPercent - V8's RSS usage threshold (in %) + */ +class MemoryMonitor extends Service { + /** + * Constructor + * + * @param {function(MemoryCheckStatus)} cb - callback + * @param {object} [options] - options + * @param {number} [options.freeMemoryLimit] - OS free memory limit (in MB) + * @param {object} [options.fs] - FS module, by default 'fs' from './misc' + * @param {Interval[]} [options.intervals] - memory check intervals + * @param {Logger} [options.logger] - logger + * @param {number} [options.provisioned] - amount of provisioned memory in MB + * @param {number} [options.thresholdPercent] - application memory threshold percent to use for alerts + */ + constructor(cb, options) { + super(); + + options = Object.assign({ + freeMemoryLimit: APP_THRESHOLDS.MEMORY.DEFAULT_MIN_FREE_MEM, + fs: miscUtil.fs, + intervals: miscUtil.deepCopy(APP_THRESHOLDS.MEMORY.DEFAULT_CHECK_INTERVALS), + logger: logger.getChild(this.constructor.name), + provisioned: miscUtil.getRuntimeInfo().maxHeapSize, + releasePercent: APP_THRESHOLDS.MEMORY.DEFAULT_RELEASE_PERCENT, + thresholdPercent: APP_THRESHOLDS.MEMORY.DEFAULT_LIMIT_PERCENT + }, options || {}); + + this._lastKnownIntervalIdx = -1; + this._lastKnownState = APP_THRESHOLDS.MEMORY.STATE.OK; + this._timerPromise = Promise.resolve(); + this.restartsEnabled = true; + + /** define static read-only props that should not be overriden */ + Object.defineProperties(this, { + _cb: { + value: cb + }, + _gcInterval: { + value: APP_THRESHOLDS.MEMORY.DEFAULT_GC_INTERVAL * 1000 + }, + _intervals: { + value: miscUtil.deepFreeze(enrichMemoryCheckIntervals(options.intervals)) + }, + _readOSFreeMem: { + value: () => options.fs.readFileSync('/proc/meminfo') + }, + freeMemoryLimit: { + value: options.freeMemoryLimit + }, + gcEnabled: { + value: typeof global.gc === 'function' + }, + logger: { + value: options.logger + }, + provisioned: { + value: options.provisioned + }, + releasePercent: { + value: options.releasePercent + }, + thresholdPercent: { + value: options.thresholdPercent + } + }); + + // >> 0 is like Math.floor actually + Object.defineProperties(this, { + threshold: { + value: Math.floor(this.provisioned * (this.thresholdPercent / 100.0)) >> 0 + } + }); + Object.defineProperties(this, { + releaseThreshold: { + value: Math.floor(this.threshold * (this.releasePercent / 100.0)) >> 0 + } + }); + + const usage = memoryUsage.call(this); + if (usage.free === -1) { + this.logger.warning('Unable to get information about available memory from OS!'); + } else { + this.logger.info(`OS available memory: ${rmUtil.megabytesToStr(usage.free)} (limit = ${rmUtil.megabytesToStr(this.freeMemoryLimit)})`); + } + + this._lastKnownUtilization = usage.utilization; + this._lastKnownGCCall = Date.now(); + + this.logger.info(`Total memory available to the process (${process.argv[0]}): ${rmUtil.megabytesToStr(this.provisioned)}.`); + this.logger.info(`Memory threshold: ${rmUtil.megabytesToStr(this.threshold)} (${rmUtil.percentToStr(this.thresholdPercent)}).`); + this.logger.info(`Memory release threshold: ${rmUtil.megabytesToStr(this.releaseThreshold)} (${rmUtil.percentToStr(this.releasePercent)}).`); + this.logger.info(`Memory usage: ${rmUtil.megabytesToStr(usage.utilization)} (${rmUtil.percentToStr(usage.utilizationPercent)})`); + this.logger.info(`GC exposed = ${this.gcEnabled}`); + } + + /** + * Start Memory Monitor service + * + * @async + * @param {function(Error)} onFatalError - callback to call on unexpected errors + * + * @returns {Promise} resolved with true once instance started and interval + * updated according to current memory usage, otherwise false + */ + _onStart(onFatalError) { + return new Promise((resolve, reject) => { + if (this._timer) { + reject(new ServiceError('_timer exists already!')); + } else { + this._timer = new timers.BasicTimer(null, { + abortOnFailure: false, // timer will contiue to call a func even on error + logger: this.logger.getChild('timer') + }); + this._lastKnownIntervalIdx = getIntervalIdx.call( + this, + getOverallUtilization(memoryUsage.call(this)) + ); + const nextConf = this._intervals[this._lastKnownIntervalIdx]; + + updateTimerInterval.call(this, nextConf.interval, this._timer) + .then(resolve, onFatalError); + } + }); + } + + /** + * Stop Memory Monitor service + * + * @async + * @returns {Promise} resolved once service stopped + */ + _onStop() { + return new Promise((resolve, reject) => { + if (this._timer) { + const timer = this._timer; + this._timer = null; + this._timerPromise = this._timerPromise + .then(() => timer.stop()) + .then(resolve, reject); + } else { + resolve(); + } + }); + } +} + +/** + * Transform user-supplied intervals to internal structures + * + * @param {Interval[]} intervals + * + * @returns {InternalInterval[]} + */ +function enrichMemoryCheckIntervals(intervals) { + // sort by usage and drop intervals with same usage + intervals = intervals + .sort((a, b) => a.usage - b.usage) + .filter((conf, idx) => conf.usage + && conf.usage <= APP_THRESHOLDS.MEMORY.DEFAULT_OK_USAGE_PERCENT + && ( + idx === 0 + || conf.usage > intervals[idx - 1].usage + )); + + intervals = intervals.map((conf, idx) => { + const min = idx === 0 ? 0 : intervals[idx - 1].usage; + return { + min, + max: conf.usage, + interval: conf.interval + }; + }); + + let maxUsage = intervals[intervals.length - 1]; + if (maxUsage.max < APP_THRESHOLDS.MEMORY.DEFAULT_OK_USAGE_PERCENT) { + intervals.push(maxUsage = { + interval: APP_THRESHOLDS.MEMORY.DEFAULT_MIN_INTERVAL, + min: maxUsage.max, + max: APP_THRESHOLDS.MEMORY.DEFAULT_OK_USAGE_PERCENT + }); + } + + intervals.push({ + interval: APP_THRESHOLDS.MEMORY.DEFAULT_MIN_INTERVAL * 10, + min: maxUsage.max, + max: Number.MAX_SAFE_INTEGER + }); + return intervals; +} + +/** + * Get interval index according to memory usage + * + * @this MemoryMonitor + * + * @param {number} usagePercent - memory usage percent + * + * @returns {integer} index number + */ +function getIntervalIdx(usagePercent) { + return this._intervals.findIndex( + (conf) => usagePercent >= conf.min && usagePercent < conf.max + ); +} + +/** + * Perform memory check + * + * @this MemoryMonitor + * + * @param {timers.BasicTimer} timer - origin time (used to verify that timer is still active) + * + * @returns {Promise} resolved once timer updated and notification sent + */ +function memoryMonitorCheck(timer) { + const usage = memoryUsage.call(this); + const trend = (this._lastKnownUtilization > usage.utilization) + ? APP_THRESHOLDS.MEMORY.TREND.DOWN + : ((this._lastKnownUtilization < usage.utilization) + ? APP_THRESHOLDS.MEMORY.TREND.UP + : APP_THRESHOLDS.MEMORY.TREND.NO_CHANGE); + + this._lastKnownUtilization = usage.utilization; + + this._lastKnownState = ( + this._lastKnownState === APP_THRESHOLDS.MEMORY.STATE.NOT_OK + && usage.utilization > this.releaseThreshold + ) + ? this._lastKnownState + : ((Math.max( + usage.freeUtilizationPercent, + usage.thresholdUtilzationPercent + ) < APP_THRESHOLDS.MEMORY.DEFAULT_OK_USAGE_PERCENT) + ? APP_THRESHOLDS.MEMORY.STATE.OK + : APP_THRESHOLDS.MEMORY.STATE.NOT_OK); + + if (this.gcEnabled && ( + this._lastKnownState === APP_THRESHOLDS.MEMORY.STATE.NOT_OK + || ((Date.now() - this._lastKnownGCCall) > this._gcInterval) + )) { + this._lastKnownGCCall = Date.now(); + global.gc(); + } + + const nextConfIdx = (this._lastKnownState === APP_THRESHOLDS.MEMORY.STATE.NOT_OK) + ? (this._intervals.length - 1) + : getIntervalIdx.call(this, getOverallUtilization(usage)); + + const nextConf = this._intervals[nextConfIdx]; + + if (nextConfIdx !== this._lastKnownIntervalIdx + && this._intervals[this._lastKnownIntervalIdx].interval !== nextConf.interval + && this._timer === timer + ) { + updateTimerInterval.call(this, nextConf.interval, timer); + } + + this._lastKnownIntervalIdx = nextConfIdx; + + setImmediate(this._cb, { + hrtimestamp: hrtimestamp(), + interval: miscUtil.deepCopy(nextConf), + thresholdStatus: this._lastKnownState, + trend, + usage + }); +} + +/** + * Set new interval for the timer + * + * @this MemoryMonitor + * + * @param {number} interval - interval in seconds + * @param {timers.BasicTimer} timer - origin time (used to verify that timer is still active) + * + * @returns {Promise} resolved once timer updated + */ +function updateTimerInterval(interval, timer) { + this._timerPromise = this._timerPromise + .then( + // is it still same timer or not + () => timer === this._timer + && this._timer + .update(memoryMonitorCheck.bind(this, this._timer), interval) + .then(() => this.logger.info(`Interval updated to ${interval}s.`)), + (err) => this.logger.exception('Uncaught error on attempt to update interval:', err) + ); + return this._timerPromise; +} + +/** + * Application's memory usage stats + * + * @this MemoryMonitor + * + * @returns {MemoryUsage} + */ +function memoryUsage() { + const usage = rmUtil.appMemoryUsage(); + Object.keys(usage).forEach((key) => { + usage[key] = rmUtil.bytesToMegabytes(usage[key]); + }); + + const free = rmUtil.osAvailableMem(this._readOSFreeMem); + const freeUtilizationPercent = (free >= 0) + ? ((this.freeMemoryLimit <= free) + ? (100 + this.freeMemoryLimit - free) // result might be negative + : ((2 - free / this.freeMemoryLimit) * 100)) + : 0; + + const maxUtilization = Math.max(usage.rss, usage.external, usage.heapUsed); + return Object.assign(usage, { + free, + freeLimit: this.freeMemoryLimit, + freeUtilizationPercent: freeUtilizationPercent >= 0 ? freeUtilizationPercent : 0, + provisioned: this.provisioned, + release: this.releaseThreshold, + releasePercent: this.releasePercent, + threshold: this.threshold, + thresholdPercent: this.thresholdPercent, + thresholdUtilzationPercent: (maxUtilization / this.threshold) * 100.0, + utilization: maxUtilization, + utilizationPercent: (maxUtilization / this.provisioned) * 100.0 + }); +} + +/** + * @param {MemoryUsage} usage + * + * @returns {number} overall max usage + */ +function getOverallUtilization(usage) { + return usage.freeUtilizationPercent > usage.thresholdUtilzationPercent + ? usage.freeUtilizationPercent + : usage.thresholdUtilzationPercent; +} + +module.exports = MemoryMonitor; + +/** + * @typedef InternalInterval + * @type {Object} + * @property {number} min - min memory usage in % + * @property {number} max - max memory usage in % + * @property {number} interval - check interval to use + */ +/** + * @typedef Interval + * @type {Object} + * @property {number} usage - max memory usage in % + * @property {number} interval - check interval to use when actual usage is below `usage` + */ +/** + * @typedef MemoryCheckStatus + * @type {Object} + * @property {number} hrtimestamp - timestamp (high-resolution time) + * @property {InternalInterval} interval - current interval config + * @property {string} thresholdStatus - threshold status (might be used as an event name) + * @property {'down' | 'same' | 'up'} trend - interval-related trend + * @property {MemoryUsage} usage - used memory (in MB) + */ +/** + * @typedef MemoryUsage + * @type {Object} + * @property {number} external - C++ object bound to JS layer (in MB) + * @property {number} free - OS freem memory (in MB) + * @property {number} freeLimit - OS free memory limit (in MB) + * @property {number} freeUtilizationPercent - OS free memory utilization (in %) + * @property {number} heapTotal - V8's memory usage - amount of heap size (in MB) + * @property {number} heapUsed - V8's memory usage - amount of used heap (in MB) + * @property {number} provisioned - amount of provisioned memory (in MB) + * @property {number} release - memory release threshold limit (in MB) + * @property {number} releasedPercent - memory release threshold limit (in %) + * @property {number} rss - amount of space occupied in the main memory device (in MB) + * @property {number} threshold - memory threshold limit (in MB) + * @property {number} thresholdPercent - memory threshold limit (in %) + * @property {number} thresholdUtilzationPercent - memory threshold utilization (in %) + * @property {number} utilization - memory usage (in MB) + * @property {number} utilizationPercent - memory utilization (in %) + */ diff --git a/src/lib/resourceMonitor/utils.js b/src/lib/resourceMonitor/utils.js new file mode 100644 index 00000000..f00f9473 --- /dev/null +++ b/src/lib/resourceMonitor/utils.js @@ -0,0 +1,110 @@ +/* + * Copyright 2022. F5 Networks, Inc. See End User License Agreement ("EULA") for + * license terms. Notwithstanding anything to the contrary in the EULA, Licensee + * may copy and modify this software product for its internal business purposes. + * Further, Licensee may upload, publish and distribute the modified version of + * the software product on devcentral.f5.com. + */ + +'use strict'; + +/* eslint-disable no-restricted-properties, no-use-before-define */ + +/** @type {integer} */ +const BYTES_TO_MB_DIVISOR = Math.pow(1024, 2); + +/** @type {integer} number of digits to appear after the decimal point */ +const FLOAT_DIGITS = 2; + +/** @returns {object} application's memory usage */ +function appMemoryUsage() { + return process.memoryUsage(); +} + +/** + * Conver bytes to megabytes + * + * @param {number} value - Bytes to convert to MegaBytes + * + * @returns {number} value in in MB + */ +const bytesToMegabytes = (value) => value / BYTES_TO_MB_DIVISOR; + +/** + * Trim number of digits after the decimal point + * + * @param {number} number - number to format + * @param {integer} [digits=FLOAT_DIGITS] - number of digits to appear after the decimal point + * + * @returns {string} format number as a string + */ +function formatFloat(number, digits) { + digits = arguments.length > 1 ? digits : FLOAT_DIGITS; + return number.toFixed(digits); +} + +/** + * @param {number} value - value to convert + * + * @returns {string} printable represenation of value (in MB) + */ +function megabytesToStr(value) { + return wrapMB(formatFloat(value, 2)); +} + +/** + * Read MemAvailable from /proc/meminfo data + * + * NOTE: + * - never tested on BIG-IP 13.0.x and older + * + * @returns {number} available memory in MB or -1 if unable to fetch data + */ +function osAvailableMem(readMemInfoFn) { + let ret = -1; + try { + const data = readMemInfoFn().toString(); + const idx = data.indexOf('MemAvailable'); + if (idx !== -1) { + const nl = data.indexOf('\n', idx); + // parse to int and convert to megabytes + ret = parseInt( + data.substring(idx, (nl !== -1) ? nl : data.length - 1) + .trim() + .split(':')[1], + 10 + ) / 1024; // KB -> MB + } + } catch (err) { + // do nothing + } + return ret; +} + +/** + * @param {number} value - value to convert + * + * @returns {string} printable represenation of value (in %) + */ +function percentToStr(value) { + return `${formatFloat(value, 2)}%`; +} + +/** + * Wrap string or number with ` MB` suffix + * + * @param {number | string} value - value to wrap + * + * @returns {string} wrapped string + */ +const wrapMB = (value) => `${value} MB`; + +module.exports = { + appMemoryUsage, + bytesToMegabytes, + formatFloat, + megabytesToStr, + osAvailableMem, + percentToStr, + wrapMB +}; diff --git a/src/lib/runtimeConfig/index.js b/src/lib/runtimeConfig/index.js new file mode 100644 index 00000000..af115a08 --- /dev/null +++ b/src/lib/runtimeConfig/index.js @@ -0,0 +1,256 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable no-unused-expressions, no-nested-ternary, prefer-template */ +/* eslint-disable no-use-before-define */ + +const fs = require('fs'); + +const configUtil = require('../utils/config'); +const constants = require('../constants'); +const logger = require('../logger').getChild('runtimeConfig'); +const miscUtil = require('../utils/misc'); +const Service = require('../utils/service'); +const Task = require('./task'); +const updater = require('./updater'); + +/** @module runtimeConfig */ + +/** + * Runtime Config Class + * + * @property {logger.Logger} logger + */ +class RuntimeConfig extends Service { + /** @param {updater.FSLikeObject} [fsUtil] */ + constructor(fsUtil) { + super(); + + /** define static read-only props that should not be overriden */ + Object.defineProperties(this, { + logger: { + value: logger.getChild(this.constructor.name) + } + }); + this.fsUtil = fsUtil || fs; + this.restartsEnabled = true; + } + + /** + * Configure and start the service + * + * @param {function} onFatalError - function to call on fatal error to restart the service + */ + _onStart() { + return new Promise((resolve) => { + this._taskLoop = Promise.resolve(); + this._currentTask = null; + this._nextTask = null; + resolve(); + }); + } + + /** + * Stop the service + * + * @param {boolean} [restart] - true if service going to be restarted + */ + _onStop() { + return new Promise((resolve, reject) => { + this._taskLoop = null; + this._nextTask = null; + + Promise.resolve() + .then(() => this._currentTask && this._currentTask.isRunning() && this._currentTask.stop()) + .then( + () => { + this._currentTask = null; + }, + () => {} // ignore everything + ) + .then(resolve, reject); + }); + } + + /** @returns {Promise} resolved with true when service destroyed or if it was destroyed already */ + destroy() { + this._offConfigUpdates + && this._offConfigUpdates.off() + && (this._offConfigUpdates = null); + + return super.destroy(); + } + + /** @param {restWorker.ApplicationContext} appCtx - application context */ + initialize(appCtx) { + if (appCtx.configMgr) { + this._offConfigUpdates = appCtx.configMgr.on('change', onConfigEvent.bind(this), { objectify: true }); + this.logger.debug('Subscribed to configuration updates.'); + } else { + this.logger.warning('Unable to subscribe to configuration updates!'); + } + } +} + +/** + * @this ResourceMonitor + * + * @param {Configuration} config + * + * @returns {Promise} resolved once config applied to the instance + */ +function onConfigEvent(config) { + return Promise.resolve() + .then(() => { + this.logger.verbose('Config "change" event'); + this.logger.info(`Current runtime state: ${JSON.stringify(runtimeState())}`); + + // even empty configuration should be processed - e.g. restore defaults + const runtimeConfig = configUtil.getTelemetryControls(config).runtime || {}; + const newRuntimeConfig = updater.enrichScriptConfig({}); // initialize with defaults + + if (runtimeConfig.enableGC === true) { + this.logger.info('Going to try to enable GC (request from user).'); + newRuntimeConfig.gcEnabled = true; + } // disabled by default + + if (Number.isSafeInteger(runtimeConfig.maxHeapSize)) { + if (runtimeConfig.maxHeapSize <= constants.APP_THRESHOLDS.MEMORY.DEFAULT_HEAP_SIZE) { + // - need to remove CLI option from the script if presented - use default value then + // - can't go lower than default without affecting other apps + this.logger.info('Going to try to restore the default heap size (request from user).'); + } else { + // need to add/update CLI option to the script + this.logger.info(`Going to try to set the heap size to ${runtimeConfig.maxHeapSize} MB (request from user).`); + newRuntimeConfig.heapSize = runtimeConfig.maxHeapSize; + } + } // else use default value + + this.logger.info(`New runtime configuration: ${JSON.stringify(newRuntimeConfig)}`); + this.logger.debug('Scheduling an update to apply the new runtime configuration.'); + + addTask.call(this, newRuntimeConfig); + }).catch((err) => { + this.logger.exception('Error caught on attempt to apply configuration to Runtime Config:', err); + }); +} + +/** + * @private + * + * @this RuntimeConfig + * + * @returns {updater.AppContext} + */ +function makeAppCtx() { + const log = this.logger; + return { + fsUtil: this.fsUtil, + logger: { + debug(msg) { log.debug(msg); }, + error(msg) { log.error(msg); }, + exception(msg, error) { log.debugException(msg, error); }, + info(msg) { log.debug(msg); }, + warning(msg) { log.warning(msg); } + } + }; +} + +/** + * @private + * + * @returns {object} current state of the runtime + */ +function runtimeState() { + return { + gcEnabled: typeof global.gc !== 'undefined', + maxHeapSize: miscUtil.getRuntimeInfo().maxHeapSize + }; +} + +/** + * Add config to the task loop + * + * @private + * + * @this RuntimeConfig + * + * @param {updater.ScriptConfig} config - configuration to apply + */ +function addTask(config) { + taskLoop.call(this, new Task(config, makeAppCtx.call(this), this.logger.getChild('task'))); +} + +/** + * Schedule the update task + * + * @private + * + * @this RuntimeConfig + * + * @param {Task} task - task to add to the loop + */ +function taskLoop(task) { + if (!task || this._taskLoop === null) { + if (task && this._taskLoop === null) { + this.logger.info('Unable to schedule next task: the service restart requested already.'); + } + return Promise.resolve(); + } + + // ignore previosuly scheduled task + this._nextTask = task; + + if (this._currentTask) { + this.logger.debug('Stopping current task...'); + return this._currentTask.stop() + .then(() => {}, () => {}); // ignore all errors and etc. + } + + this._currentTask = task; + this._nextTask = null; + + this._taskLoop = this._taskLoop.then(() => this._currentTask.run()) + .then(() => this.logger.debug('Update task finished!')) + .catch((error) => this.logger.exception('Uncaught error on attempt to run "update" task to apply changes to runtime configuration:', error)) + .then(() => { + if (this._currentTask) { + if (this._currentTask.isFailed()) { + if (this._nextTask === null) { + if (this._currentTask.__retry !== true) { + this.logger.debug('Retrying attempt to update the startup script!'); + this._nextTask = this._currentTask.copy(); + this._nextTask.__retry = true; + } else { + this.logger.debug('No retries left for the failed task'); + } + } + } else if (this._currentTask.isRestartRequested()) { + this.logger.info('Restart scheduled to apply changes to the runtime configuration'); + // drop next task + this._nextTask = null; + this._taskLoop = null; + } + } + this._currentTask = null; + return taskLoop.call(this, this._nextTask); + }); + return Promise.resolve(); +} + +module.exports = RuntimeConfig; diff --git a/src/lib/runtimeConfig/task.js b/src/lib/runtimeConfig/task.js new file mode 100644 index 00000000..aed767c5 --- /dev/null +++ b/src/lib/runtimeConfig/task.js @@ -0,0 +1,440 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +const assert = require('assert'); +const getKey = require('lodash/get'); +const machina = require('machina'); +const pathUtil = require('path'); +const uuid = require('uuid').v4; + +const constants = require('../constants'); +const deviceUtil = require('../utils/device'); +const logger = require('../logger').getChild('runtimeConfig').getChild('Task'); +const miscUtil = require('../utils/misc'); +const SafeEventEmitter = require('../utils/eventEmitter'); +const updater = require('./updater'); + +/** @module runtimeConfig/task */ + +const DACLI_SCRIPT_NAME = 'telemetry_delete_me__async_restnoded_updater'; +const UPDATER_SCRIPT = pathUtil.join(__dirname, 'updater.js'); + +/** + * FSM State Transitions + * + * Initial state: uninitialized + * + * uninitialized ----> cleanup + * cleanup ----> config-pre-check || stopped + * config-pre-check ----> shell-check || done || stopped + * shell-check ----> updater-run || done || stopped + * updater-run ----> config-post-check || stopped || failed + * config-post-check ----> restart-service-delay || stopped || failed + * restart-service-delay ----> restart-service || stopped + * restart-service ----> restart-service-force || done || stopped + * restart-service-force ----> done || stopped + */ + +/** + * @private + * + * @returns {Promise} resolved with true if `bash` enabled or false otherwise + */ +function isShellEnabled() { + return Promise.resolve() + .then(() => deviceUtil.makeDeviceRequest( + constants.LOCAL_HOST, + '/mgmt/tm/sys/db/systemauth.disablebash' + )) + .then((retval) => retval.value, () => false); +} + +/** + * @private + * + * @param {string} cmd - command to execute + * + * @returns {Promise} true if command succeed or false otherwise + */ +function runRemoteCmd(cmd) { + return Promise.resolve() + .then(() => ( + new deviceUtil.DeviceAsyncCLI({ + scriptName: DACLI_SCRIPT_NAME + })).execute(cmd)) + .then(() => true, () => false); +} + +const taskFsm = new machina.BehavioralFsm({ + namespace: 'updater-fsm', + initialState: 'uninitialized', + + states: { + cleanup: { + _onEnter(task) { + updater.cleanupLogsFile(task.appCtx); + this._doTransition(task, 'config-pre-check'); + } + }, + 'config-post-check': { + _onEnter(task) { + const config = task.runtimeConfig; + const scriptConfig = updater.fetchConfigFromScript(task.appCtx); + + // if we are here then shell command succeed already + let configApplied = true; + if (scriptConfig === null) { + task.logger.error('Unable to read configuration from the startup script.'); + } else { + try { + assert.deepStrictEqual(config, scriptConfig); + } catch (error) { + configApplied = false; + } + } + if (!configApplied) { + task.logger.error('Configuration was not applied to the script!'); + } + this._doTransition(task, configApplied ? 'restart-service-delay' : 'failed'); + } + }, + 'config-pre-check': { + _onEnter(task) { + const config = task.runtimeConfig; + const scriptConfig = updater.fetchConfigFromScript(task.appCtx); + let hasChanges = false; + + if (scriptConfig === null) { + task.logger.error('Unable to read configuration from the startup script.'); + } else { + // remove before comparison + delete scriptConfig.id; + delete config.id; + + try { + assert.deepStrictEqual(config, scriptConfig); + } catch (error) { + hasChanges = true; + } + + if (!hasChanges) { + task.logger.debug('No changes found between running configuration and the new one.'); + } + } + + if (hasChanges) { + config.id = uuid(); + hasChanges = updater.saveScriptConfigFile(config, task.appCtx); + } + this._doTransition(task, hasChanges ? 'shell-check' : 'done'); + } + }, + done: { + _onEnter(task) { + task.logger.error('Task done!'); + task.ee.emit('done'); + } + }, + failed: { + _onEnter(task) { + task.logger.error('Task failed!'); + task.ee.emit('failed'); + } + }, + 'restart-service-force': { + _onEnter(task) { + task._restartRequested = true; + task.logger.warning('Unable to restart service gracefully! Calling process.exit(0) to restart it'); + this._doTransition(task, 'done'); + process.exit(0); + } + }, + 'restart-service': { + _onEnter(task) { + task.logger.warning('Restarting service to apply new changes for the runtime configuraiton!'); + runRemoteCmd('bigstart restart restnoded') + .then((success) => { + if (success) { + task.logger.warning('Service will be restarted in a moment to apply changes in the configuration!'); + task._restartRequested = true; + } else { + task.logger.error('Unable to restart service via bigstart. Calling process.exit(0) instead to restart it'); + } + this._doTransition(task, success ? 'done' : 'restart-service-force'); + }); + } + }, + 'restart-service-delay': { + _onEnter(task) { + task.logger.warning('New configuration was successfully applied to the startup script! Scheduling service restart in 1 min.'); + miscUtil.sleep(60 * 1000) + .then(() => this._doTransition(task, 'restart-service')); + } + }, + 'shell-check': { + _onEnter(task) { + isShellEnabled() + .then((enabled) => { + if (enabled) { + task.logger.debug('Shell available, proceeding with task execution.'); + } else { + task.logger.debug('Shell not available, unable to proceed with task execution.'); + } + this._doTransition(task, enabled ? 'updater-run' : 'done'); + }); + } + }, + stopped: { + _onEnter(task) { + task.logger.error('Task stopped!'); + task.ee.emit('stopped'); + } + }, + uninitialized: { + '*': function (task) { + this._doTransition(task, 'cleanup'); + } + }, + 'updater-run': { + _onEnter(task) { + task.logger.debug('Trying to execute "updater" script'); + runRemoteCmd(`${process.argv[0]} ${UPDATER_SCRIPT}`) + .then((success) => { + let logs = updater.readLogsFile(task.appCtx); + if (logs === null) { + logs = 'no logs available!'; + } + task.logger.debug(`Device Async CLI logs:\n${logs}`); + + if (!success) { + task.logger.error('Attempt to update the runtime configuration failed! See logs for more details.'); + } + this._doTransition(task, 'config-post-check'); + }); + } + } + }, + + /** + * @private + * + * @param {Task} task + * + * @returns {boolean} true if task allowed to start + */ + _allowedToStart(task) { + return this.getState(task) === 'uninitialized'; + }, + + /** + * @private + * + * @param {Task} task + * + * @returns {boolean} true if task allowed to stop + */ + _allowedToStop(task) { + const state = this.getState(task); + return state !== 'done' && state !== 'failed' && state !== 'stopped'; + }, + + /** + * Do transition if not stopped yet + * + * @private + * + * @param {Task} task + * @param {string} state + */ + _doTransition(task, state) { + this.transition(task, task._stopRequested ? 'stopped' : state); + }, + + /** + * @private + * + * @param {Task} task + * @param {string} action + * @param {string | string[]} successEvents + * @param {string | string[]} failureEvents + * + * @returns {Promise} resolved with `true` if action succeed or rejected with error + */ + _promisifyActionHandle(task, action, successEvents, failureEvents) { + return new Promise((resolve, reject) => { + successEvents = Array.isArray(successEvents) ? successEvents : [successEvents]; + failureEvents = Array.isArray(failureEvents) ? failureEvents : [failureEvents]; + + const cancel = () => { + successEvents.forEach((p) => p.cancel()); + failureEvents.forEach((p) => p.cancel()); + }; + + successEvents = successEvents.map((evtName) => { + const promise = task.ee.waitFor(evtName); + promise.then((args) => { + resolve(args.length === 0 || args); + cancel(); + }) + .catch(() => {}); + return promise; + }); + failureEvents = failureEvents.map((evtName) => { + const promise = task.ee.waitFor(evtName); + promise.then((args) => { + reject(args.length === 0 ? new Error(`Task emitted event "${evtName}"`) : args[0]); + cancel(); + }) + .catch(() => {}); + return promise; + }); + + this.handle.apply(this, [task, action].concat(Array.from(arguments).slice(4))); + }); + }, + + /** + * @public + * + * @param {Task} task + * + * @returns {string} current state + */ + getState(task) { + return getKey(task, ['__machina__', this.namespace, 'state']) || 'uninitialized'; + }, + + /** + * @public + * + * @param {Task} task + * + * @returns {boolean} true if task failed to complete execution + */ + isFailed(task) { + return this.getState(task) === 'failed'; + }, + + /** + * @public + * + * @param {Task} task + * + * @returns {boolean} true if task is running + */ + isRunning(task) { + return !this._allowedToStart(task) && this._allowedToStop(task); + }, + + /** + * @public + * + * @param {Task} task + * + * @returns {Promise} resolve with `true` if task started and successfully finished + */ + run(task) { + return this._allowedToStart(task) + ? this._promisifyActionHandle(task, 'start', 'done', ['failed', 'stopped']) + : Promise.resolve(false); + }, + + /** + * @public + * + * @param {Task} task + * + * @returns {Promise} resolve with `true` if task stopped or `false` if stop not allowed + */ + stop(task) { + return this._allowedToStop(task) + ? this._promisifyActionHandle(task, 'stop', ['done', 'stopped'], 'failed') + : Promise.resolve(false); + } + +}); + +// resend events to appropriate instances of Task +taskFsm.on('transition', (data) => data.client.ee.emitAsync('transition', data)); + +/** + * Task Class + * + * Note: event emitter `ee` fires folllowing events: + * - transition(data) - state transition + * + * @property {SafeEventEmitter} ee - event emitter + * @property {logger.Logger} logger - logger + * @property {boolean} restartsEnabled - true if restarts on fatal error at `running` state are enabled + */ +class Task { + /** + * @param {updater.ScriptConfig} config + * @param {updater.AppContext} appCtx + * @param {logger.Logger} [logger] - logger instance + */ + constructor(config, appCtx, _logger) { + // create read-only properties + Object.defineProperties(this, { + ee: { + value: new SafeEventEmitter() + } + }); + + this._restartRequested = false; + this._stopRequested = false; + this.appCtx = appCtx; + this.logger = _logger || logger; + this.runtimeConfig = config; + + this.ee.on('transition', (data) => this.logger.debug(`transition from "${data.fromState}" to "${data.toState}" (action=${data.action})`)); + } + + /** @returns {Task} copy (state not copied) */ + copy() { + return new Task(this.runtimeConfig, this.appCtx, this.logger); + } + + /** @returns {boolean} true if task failed to complete execution */ + isFailed() { + return taskFsm.isFailed(this); + } + + /** @returns {boolean} true if service restart was requested as result of task execution */ + isRestartRequested() { + return this._restartRequested; + } + + /** @returns {boolean} true if task is running */ + isRunning() { + return taskFsm.isRunning(this); + } + + /** @returns {Promise} resolve with `true` if task started and successfully finished */ + run() { + return taskFsm.run(this); + } + + /** @returns {Promise} resolve with `true` if task stopped or `false` if stop not allowed */ + stop() { + this._stopRequested = true; + return taskFsm.stop(this); + } +} + +module.exports = Task; diff --git a/src/lib/runtimeConfig/updater.js b/src/lib/runtimeConfig/updater.js new file mode 100644 index 00000000..238bd431 --- /dev/null +++ b/src/lib/runtimeConfig/updater.js @@ -0,0 +1,432 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +const assert = require('assert'); +const Console = require('console').Console; +const fs = require('fs'); +const pathUtil = require('path'); + +/** @module runtimeConfig/updater */ + +/** + * THE SCRIPT MAY BE RUN IN TWO CONTEXTS: + * - restnoded context as regular iApp LX - all libs are available + * - as regular node.js script - none of iApp LX libs are available + */ + +// approximation for default heap size +const NODEJS_DEFAULT_HEAP_SIZE = 1400; + +// should be used to check single-line only +const RESTNODE_EXEC_LINE_REGEX = /^.*exec\s*\/usr\/bin\/f5-rest-node/; + +const SCRIPT_CONFIG_ID = /# ID:[a-zA-Z0-9-]+/gm; + +// the restnode startup script +const RESTNODE_SCRIPT_FPATH = '/etc/bigstart/scripts/restnoded'; + +// actual config to apply +const SCRIPT_CONFIG_FPATH = pathUtil.join(__dirname, 'config.json'); + +// most recent logs +const SCRIPT_LOGS_FPATH = pathUtil.join(__dirname, 'logs.txt'); + +/** + * Add notes about restoring original behavior to the script's content + * + * @private + * + * @param {string} script + * @param {string} execLine + * @param {AppContext} appCtx + * + * @returns {string} updated script + */ +function addAttentionBlockIfNeeded(script, execLine, appCtx) { + if (script.indexOf('modified by F5 BIG-IP Telemetry Streaming') !== -1) { + appCtx.logger.debug('"notice" block exists already.'); + return script; + } + + appCtx.logger.debug('Adding "notice" block to the script.'); + + const indent = getIndent(execLine); + // eslint-disable-next-line prefer-template + const comment = `${indent}# ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!\n` + + `${indent}# To restore original behavior, uncomment the next line and remove the block below.\n` + + `${indent}#\n` + + `${indent}# ${execLine.slice(indent.length)}\n` + + `${indent}#\n` + + `${indent}# The block below should be removed to restore original behavior!\n` + + execLine; + + return script.replace(execLine, comment); +} + +/** + * Apply configuration to the script + * + * @private + * + * @param {string} script + * @param {ScriptConfig} config + * @param {AppContext} appCtx + * + * @returns {null | string} modified script or null if unable to modify + */ +function applyScriptConfig(script, config, appCtx) { + appCtx.logger.info(`Applying configuration to the script: ${JSON.stringify(config)}`); + + const currentConfig = fetchConfigFromScript(script, appCtx); + if (currentConfig === null) { + appCtx.logger.info('No configuration read from the script.'); + return null; + } + + let hasChanges = false; + try { + assert.deepStrictEqual(currentConfig, config); + } catch (error) { + hasChanges = true; + } + + if (!hasChanges) { + appCtx.logger.info('No diffs found in current and new config.'); + return null; + } + + appCtx.logger.info('Found diffs in current and new config.'); + + // do not trim it! will be used for replacement later + const originExecLine = getExecLine(script); + let newExecLine = originExecLine; + + // enable/disable GC + if (currentConfig.gcEnabled !== config.gcEnabled) { + appCtx.logger.info('Updating GC config.'); + newExecLine = newExecLine.replace(/ --expose-gc/g, ''); + if (config.gcEnabled) { + appCtx.logger.info('Enabling GC config.'); + const substr = newExecLine.match(RESTNODE_EXEC_LINE_REGEX)[0]; + newExecLine = newExecLine.slice(0, substr.length) + .concat(' --expose-gc') + .concat(newExecLine.slice(substr.length)); + } else { + appCtx.logger.info('Disabling GC config.'); + } + } + + // enable/disable custom heap size + if (currentConfig.heapSize !== config.heapSize) { + appCtx.logger.info('Upading heap size.'); + newExecLine = newExecLine.replace(/ --max_old_space_size=\d+/g, ''); + if (config.heapSize + && Number.isSafeInteger(config.heapSize) + && config.heapSize > NODEJS_DEFAULT_HEAP_SIZE + ) { + appCtx.logger.info(`Setting heap size to ${config.heapSize} MB.`); + const substr = newExecLine.match(RESTNODE_EXEC_LINE_REGEX)[0]; + newExecLine = newExecLine.slice(0, substr.length) + .concat(` --max_old_space_size=${config.heapSize}`) + .concat(newExecLine.slice(substr.length)); + } else { + appCtx.logger.info(`Setting heap size to default value - ${NODEJS_DEFAULT_HEAP_SIZE} MB.`); + } + } + + if (currentConfig.id) { + script = script.replace(new RegExp(`^.*ID:${currentConfig.id}.*(?:\r\n|\n)`, 'm'), ''); + } + newExecLine = `${getIndent(originExecLine)}# ID:${config.id}\n${newExecLine}`; + + script = addAttentionBlockIfNeeded(script, originExecLine, appCtx); + script = script.replace(originExecLine, newExecLine); + return script; +} + +/** + * Delete file + * + * @private + * + * @param {string} path + * @param {AppContext} appCtx + * + * @returns {boolean} true on success + */ +function deleteFile(path, appCtx) { + appCtx.logger.info(`Deleting file "${path}"`); + try { + appCtx.fsUtil.unlinkSync(path); + return true; + } catch (err) { + appCtx.logger.exception(`Unable to delete file "${path}":`, err); + } + return false; +} + +/** + * Enrich script config with defaults + * + * @private + * + * @param {ScriptConfig} config + * + * @returns {ScriptConfig} + */ +function enrichScriptConfig(config) { + if (typeof config.gcEnabled === 'undefined') { + config.gcEnabled = false; + } + if (typeof config.heapSize === 'undefined') { + config.heapSize = NODEJS_DEFAULT_HEAP_SIZE; + } + return config; +} + +/** + * Grab current config state for the script + * + * @public + * + * @param {string} script + * @param {AppContext} appCtx + * + * @returns {null | ScriptConfig} script's config data or null if unable to read/parse + */ +function fetchConfigFromScript(script, appCtx) { + const execLine = getExecLine(script); + if (!execLine) { + appCtx.logger.error('Unable to find "exec" line in the script'); + return null; + } + + appCtx.logger.info('Parsing configuration from the script'); + + const config = enrichScriptConfig({}); + // check for GC + config.gcEnabled = execLine.indexOf('--expose-gc') !== -1; + + // check for custom heap size + const heapMatch = execLine.match(/--max_old_space_size=(\d+)/); + if (heapMatch) { + config.heapSize = parseInt(heapMatch[1], 10); + } + + const scriptIDMatch = script.match(SCRIPT_CONFIG_ID); + if (scriptIDMatch) { + config.id = scriptIDMatch[0].split(':')[1]; + } + + appCtx.logger.info(`Parsed configuration from the script: ${JSON.stringify(config)}`); + return config; +} + +/** + * Search for a line 'exec /usr/bin/f5-rest-node + * + * @private + * + * @param {string} data + * + * @returns {string} exec line if found otherwise empty string + */ +function getExecLine(data) { + data = data.split('\n') + .filter((line) => RESTNODE_EXEC_LINE_REGEX.test(line) + && line.indexOf('--debug') === -1 + && line.indexOf('--inspect') === -1); + + return data.length === 0 ? '' : data[data.length - 1]; +} + +/** + * Extract indentation from string + * + * @private + * + * @param {string} string + * + * @returns {string} indentation string + */ +function getIndent(string) { + const m = string.match(/^(\s+)/); + return m ? m[1] : ''; +} + +/** + * Read data from the file + * + * @private + * + * @param {string} path + * @param {AppContext} appCtx + * + * @returns {null | string} file content if file read or null + */ +function readFile(path, appCtx) { + appCtx.logger.info(`Reading data from file "${path}"`); + try { + return appCtx.fsUtil.readFileSync(path).toString(); + } catch (err) { + appCtx.logger.exception(`Unable to read file "${path}":`, err); + } + return null; +} + +/** + * Write content to the file + * + * @private + * + * @param {string} path + * @param {string} data + * @param {AppContext} appCtx + * + * @returns {boolean} true on success + */ +function writeFile(path, data, appCtx) { + appCtx.logger.info(`Writing data to file "${path}"`); + try { + appCtx.fsUtil.writeFileSync(path, data, { flag: 'w' }); + return true; + } catch (err) { + appCtx.logger.exception(`Unable to write data to file "${path}":`, err); + } + return false; +} + +/** + * Shortcuts for generic functions + */ +// cleanup logs +const cleanupLogsFile = (appCtx) => deleteFile(SCRIPT_LOGS_FPATH, appCtx); +// read logs +const readLogsFile = (appCtx) => readFile(SCRIPT_LOGS_FPATH, appCtx); +// read task config from the file +const readScriptConfigFile = (appCtx) => { + const data = readFile(SCRIPT_CONFIG_FPATH, appCtx); + return data === null ? data : JSON.parse(data); +}; +// read the restnode script data +const readScriptFile = (appCtx) => readFile(RESTNODE_SCRIPT_FPATH, appCtx); +// write task config to the file +const saveScriptConfigFile = (data, appCtx) => writeFile(SCRIPT_CONFIG_FPATH, JSON.stringify(data), appCtx); +// override the restnode script with new scriprt +const saveScriptFile = (data, appCtx) => writeFile(RESTNODE_SCRIPT_FPATH, data, appCtx); + +/** + * Main routine - applies user-defined configuration to restnode startup script + */ +function main(fsUtil) { + fsUtil = fsUtil || fs; + + const logsStream = fsUtil.createWriteStream(SCRIPT_LOGS_FPATH, { flags: 'w' }); + const logger = new Console(logsStream, logsStream); + + const appCtx = { + fsUtil, + logger: { + debug: logger.log.bind(logger), + error: logger.log.bind(logger), + exception: logger.log.bind(logger), + info: logger.log.bind(logger) + } + }; + + function inner() { + let newConfig = readScriptConfigFile(appCtx); + if (newConfig === null || !(typeof newConfig.id === 'string' && newConfig.id)) { + appCtx.logger.info('No config found, nothing to apply to the script!'); + return; + } + + let script = readScriptFile(appCtx); + if (script === null) { + appCtx.logger.info('Unable to read "restnode" startup script!'); + return; + } + + newConfig = enrichScriptConfig(newConfig); + script = applyScriptConfig(script, newConfig, appCtx); + if (script === null) { + appCtx.logger.info('The "restnode" startup script not modified!'); + return; + } + + if (!saveScriptFile(script, appCtx)) { + appCtx.logger.info('Unable to save "restnode" startup script!'); + } else { + appCtx.logger.info('Done!'); + } + } + + try { + inner(); + } catch (error) { + logger.exception('Uncaught error:', error); + } + + logsStream.end(() => logsStream.close()); +} + +if (require.main === module) { + main(); +} + +module.exports = { + cleanupLogsFile, + enrichScriptConfig, + fetchConfigFromScript(appCtx) { + const script = readScriptFile(appCtx); + return script ? fetchConfigFromScript(script, appCtx) : null; + }, + main, + readLogsFile, + readScriptConfigFile, + saveScriptConfigFile +}; + +/** + * @typedef AppContext + * @type {object} + * @property {FSLikeObject} fsUtil + * @property {LoggerLikeObject} logger + */ +/** + * @typedef FSLikeObject + * @type {object} + * @property {function} readFileSync + * @property {function} writeFileSync + */ +/** + * @typedef LoggerLikeObject + * @type {object} + * @property {function(string)} debug + * @property {function(string)} error + * @property {function(string, error)} exception + * @property {function(string)} info + * @property {function(string)} warning + */ +/** + * @typedef ScriptConfig + * @type {Object} + * @property {boolean} gcEnabled - true when GC enabled + * @property {number} heapSize - heap size (in MB) + * @property {string} id - configuration ID + */ diff --git a/src/lib/systemPoller.js b/src/lib/systemPoller.js index b51e820a..adde3989 100644 --- a/src/lib/systemPoller.js +++ b/src/lib/systemPoller.js @@ -16,14 +16,12 @@ 'use strict'; -const APP_THRESHOLDS = require('./constants').APP_THRESHOLDS; const configUtil = require('./utils/config'); const configWorker = require('./config'); const constants = require('./constants'); const dataPipeline = require('./dataPipeline'); const errors = require('./errors'); const logger = require('./logger'); -const monitor = require('./utils/monitor'); const promiseUtil = require('./utils/promise'); const SystemStats = require('./systemStats'); const timers = require('./utils/timers'); @@ -35,8 +33,6 @@ const util = require('./utils/misc'); // key - poller name, value - { timer, config } const POLLER_TIMERS = {}; -let processingEnabled = true; - class NoPollersError extends errors.ConfigLookupError {} /** @@ -48,17 +44,6 @@ function getPollerTimers() { return POLLER_TIMERS; } -/** - * Check if systemPoller(s) are running - * Toggled by monitor checks - * - * @returns {Boolean} - whether or not processing is enabled - */ - -function isEnabled() { - return processingEnabled; -} - function findSystemOrPollerConfigs(originalConfig, sysOrPollerName, pollerName, namespace) { // If namespace is undefined, assumption is we're querying for objects in the 'default namespace' const namespaceInfo = namespace ? ` in Namespace '${namespace}'` : ''; @@ -332,30 +317,63 @@ function fetchPollersData(pollerConfigs, decryptSecrets) { configWorker.on('change', (config) => Promise.resolve() .then(() => { logger.debug('configWorker change event in systemPoller'); - // reset for monitor check - processingEnabled = true; return applyConfig(util.deepCopy(config)); }) .then(() => logger.debug(`${Object.keys(getPollerTimers()).length} system poller(s) running`)) .catch((error) => logger.exception('Uncaught error during System Poller(s) configuration', error))); -monitor.on('check', (status) => new Promise((resolve) => { - const monitorChecksOk = status === APP_THRESHOLDS.MEMORY.OK; - // only enable/disable when there is change in state - if (processingEnabled !== monitorChecksOk) { - // also only log here to minimize entries - logger.warning(`${status}. ${monitorChecksOk ? 'Re-enabling system poller(s).' : 'Temporarily disabling system poller(s).'}`); - if (monitorChecksOk) { - enablePollers(); - } else { - disablePollers(); +/** + * TEMP BLOCK OF CODE, REMOVE AFTER REFACTORING + */ +let processingEnabled = true; +let processingState = null; +let processingStatePromise = Promise.resolve(); + +/** @param {restWorker.ApplicationContext} appCtx - application context */ +function initialize(appCtx) { + if (appCtx.resourceMonitor) { + if (processingState) { + logger.debug('Destroying existing ProcessingState instance'); + processingState.destroy(); } + processingState = appCtx.resourceMonitor.initializePState( + onResourceMonitorUpdate.bind(null, true), + onResourceMonitorUpdate.bind(null, false) + ); + processingEnabled = processingState.enabled; + onResourceMonitorUpdate(processingEnabled); + } else { + logger.error('Unable to subscribe to Resource Monitor updates!'); } - processingEnabled = monitorChecksOk; - resolve(); -}).catch((err) => { - logger.exception('Unexpected error in system poller (monitor check handler).', err); -})); +} + +/** @param {boolean} enabled - true if processing enabled otherwise false */ +function onResourceMonitorUpdate(enabled) { + processingEnabled = enabled; + processingStatePromise = processingStatePromise.then(() => { + if (enabled) { + logger.warning('Enabling system poller(s).'); + return enablePollers(); + } + logger.warning('Temporarily disabling system poller(s).'); + return disablePollers(); + }) + .catch((error) => logger.exception(`Unexpected error on attempt to ${enabled ? 'enable' : 'disable'} system pollers:`, error)); +} + +/** + * Check if systemPoller(s) are running + * Toggled by monitor checks + * + * @returns {Boolean} - whether or not processing is enabled + */ + +function isEnabled() { + return processingEnabled; +} +/** + * TEMP BLOCK OF CODE END + */ module.exports = { NoPollersError, @@ -365,5 +383,6 @@ module.exports = { getPollerTimers, process, safeProcess, - isEnabled + isEnabled, + initialize }; diff --git a/src/lib/utils/device.js b/src/lib/utils/device.js index 8b92a051..fc34006e 100644 --- a/src/lib/utils/device.js +++ b/src/lib/utils/device.js @@ -37,8 +37,7 @@ const HOST_DEVICE_CACHE = {}; const HDC_KEYS = { TYPE: 'TYPE', VERSION: 'VERSION', - RETRIEVE_SECRETS_FROM_TMSH: 'RETRIEVE_SECRETS_FROM_TMSH', - NODE_MEMORY_LIMIT: 'NODE_MEMORY_LIMIT' + RETRIEVE_SECRETS_FROM_TMSH: 'RETRIEVE_SECRETS_FROM_TMSH' }; /** @@ -561,10 +560,6 @@ module.exports = { HDC_KEYS.RETRIEVE_SECRETS_FROM_TMSH, isVersionAffectedBySecretsBug(deviceVersion) ); - return this.getDeviceNodeMemoryLimit(constants.LOCAL_HOST); - }) - .then((deviceNodeMemLimit) => { - this.setHostDeviceInfo(HDC_KEYS.NODE_MEMORY_LIMIT, deviceNodeMemLimit); }); }, @@ -839,51 +834,6 @@ module.exports = { }); }, - /** - * Returns a device's node memory limit in MB - * - * @param {String} host - HTTP host - * @param {Object} [options] - function options, see 'makeDeviceRequest' - * - * @returns {Promise} A promise which is resolved with the max memory limit for device - * If an error occurs, the node default of 1433 MB (1.4 GB) will be returned - */ - getDeviceNodeMemoryLimit(host, options) { - const defaultMem = constants.APP_THRESHOLDS.MEMORY.DEFAULT_MB; - let provisionExtraMb; - let useExtraMb; - - const uri = '/mgmt/tm/sys/db'; - options = options || {}; - options.method = 'GET'; - options.includeResponseObject = false; - return promiseUtil.allSettled([ - this.makeDeviceRequest(host, `${uri}/restjavad.useextramb`, util.copy(options)), - this.makeDeviceRequest(host, `${uri}/provision.extramb`, util.copy(options)) - ]) - .then((results) => { - results = promiseUtil.getValues(results); - useExtraMb = results[0]; - provisionExtraMb = results[1]; - if (!util.isObjectEmpty(useExtraMb) && (useExtraMb.value === 'true' || useExtraMb.value === true) - && !util.isObjectEmpty(provisionExtraMb) && provisionExtraMb.value > defaultMem) { - return provisionExtraMb.value; - } - return defaultMem; - }) - .catch((err) => { - logger.warning(`Unable to retrieve memory provisioning. ${err.message}`); - }) - .then((memLimit) => { - if (!memLimit) { - logger.debug(`Memory provisioning: (default) ${defaultMem}`); - return defaultMem; - } - logger.debug(`Memory provisioning: ${memLimit} | Sys db settings: restjavad.useextramb (${JSON.stringify(useExtraMb || {})}) | provision.extramb (${JSON.stringify(provisionExtraMb || {})})}`); - return Number(memLimit); - }); - }, - /** * Send request to the device * diff --git a/src/lib/utils/misc.js b/src/lib/utils/misc.js index c4b08526..da17f88d 100644 --- a/src/lib/utils/misc.js +++ b/src/lib/utils/misc.js @@ -26,6 +26,8 @@ const objectGet = require('lodash/get'); const childProcess = require('child_process'); const fs = require('fs'); const net = require('net'); +const v8 = require('v8'); + // deep require support is deprecated for versions 7+ (requires node8+) const uuidv4 = require('uuid/v4'); const jsonDuplicateKeyHandle = require('json-duplicate-key-handle'); @@ -703,6 +705,23 @@ module.exports = { return cloneDeep(obj); }, + /** + * Deeply freeze an object + * + * @param {any} obj - object to freeze + * + * @returns {any} freezed object + */ + deepFreeze(obj) { + Object.freeze(obj); + traverseJSON(obj, (parent, key) => { + if (typeof parent[key] === 'object') { + Object.freeze(parent[key]); + } + }); + return obj; + }, + /** * Merges an Array of Objects into a single Object (uses lodash.mergeWith under the hood). * Note: Nested Arrays are concatenated, not overwritten. @@ -774,10 +793,13 @@ module.exports = { }, /** - * Return Node version without 'v' + * @returns {RuntimeInfo} */ getRuntimeInfo() { - return { nodeVersion: process.version.substring(1) }; + return { + maxHeapSize: v8.getHeapStatistics().heap_size_limit / (1024 * 1024), + nodeVersion: process.version.substring(1) + }; }, /** @@ -1114,3 +1136,9 @@ module.exports = { * * @returns {boolean} false when item should be ignored */ +/** + * @typedef RuntimeInfo + * @type {Object} + * @property {integer} maxHeapSize - max V8 heap size + * @property {string} nodeVersion - node.js version without `v` prefix + */ diff --git a/src/lib/utils/monitor.js b/src/lib/utils/monitor.js deleted file mode 100644 index cff5c920..00000000 --- a/src/lib/utils/monitor.js +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Copyright 2024 F5, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -const APP_THRESHOLDS = require('../constants').APP_THRESHOLDS; -const deviceUtil = require('./device'); -const logger = require('../logger'); -const timers = require('./timers'); -const configWorker = require('../config'); -const configUtil = require('./config'); -const onApplicationExit = require('./misc').onApplicationExit; -const SafeEventEmitter = require('./eventEmitter'); - -// eslint-disable-next-line no-restricted-properties -const BYTES_TO_MB_DIVISOR = Math.pow(1024, 2); - -const DEFAULT_INTERVAL_SEC = 5; -// mapping of intervals to use according to memory usage -// higher usage %, more frequent checks -const MEM_PERCENT_TO_INTERVAL_SEC = [ - { min: 0, max: 24, interval: 30 }, - { min: 25, max: 49, interval: 15 }, - { min: 50, max: 74, interval: 10 }, - { min: 75, max: 89, interval: 5 }, - { min: 90, max: Number.MAX_VALUE, interval: 3 } -]; - -/** - * Monitor that performs checks on an interval - * Used to determine if certain thresholds are met - * And if so, if certain events need to be emitted - * - * @class Monitor - * @extends {EventEmitter2} - */ -class Monitor extends SafeEventEmitter { - constructor() { - super(); - this.logger = logger.getChild(this.constructor.name); - this.memoryThreshold = null; - this.memoryThresholdPercent = null; - this.memoryLimit = null; - this.timer = new timers.BasicTimer(this.checkThresholds.bind(this), { - abortOnFailure: false, - intervalInS: DEFAULT_INTERVAL_SEC, - logger: this.logger.getChild('timer') - }); - - onApplicationExit(this.stop.bind(this)); - } - - /** - * Returns whether monitor is enabled, by default true unless toggled with env var - * - * @returns {Boolean} - whether monitor checks are enabled - */ - isEnabled() { - return process.env[APP_THRESHOLDS.MONITOR_DISABLED] !== 'true'; - } - - /** - * Configure the monitor limit settings to use for checks - * - * @param {Number} memThresholdPercent - Memory threshold percent - * (% of total memory usage that once reached, fires an event) - * - * @returns {void} - */ - setLimits(memThresholdPercent) { - // default memThreshold also set at 90% set in schema - this.memoryThresholdPercent = memThresholdPercent || APP_THRESHOLDS.MEMORY.DEFAULT_LIMIT_PERCENT; - // do this here in case Host Device Cache might not be fully loaded earlier - this.memoryLimit = deviceUtil.getHostDeviceInfo('NODE_MEMORY_LIMIT') || APP_THRESHOLDS.MEMORY.DEFAULT_MB; - this.memoryThreshold = Math.round(this.memoryLimit * (this.memoryThresholdPercent / 100)); - logger.info(`Total Memory Provisioned (node process): ${this.memoryLimit} MB. Memory Threshold: ${this.memoryThreshold} MB (${this.memoryThresholdPercent}%)`); - } - - /** - * Starts the monitor timer - * - * @param {Number} memThresholdPercent - Memory threshold percent - * (% of total memory usage that once reached, triggers an event). - * First run starts after 5 seconds, then interval auto-adjusts according to usage - * - * @returns {Promise} resolved once timer started - */ - start(memThresholdPercent) { - return this.update(memThresholdPercent, DEFAULT_INTERVAL_SEC); - } - - /** - * Stops and clear the monitor timer - * - * @returns {Promise} resolved once timer stopped - */ - stop() { - this.memoryThreshold = null; - return this.timer.stop() - .then(() => logger.info('Monitor checks stopped.')); - } - - /** - * Updates the monitor timer - * - * @param {Number} memThresholdPercent - Memory threshold percent - * (% of total memory usage that once reached, fires an event) - * @param {Number} interval - The frequency of monitor timer in seconds, defaults to 5 - * - * @returns {Promise} resolved once timer updated - */ - update(memThresholdPercent, interval) { - if (!this.isEnabled()) { - return Promise.resolve(); - } - interval = interval || DEFAULT_INTERVAL_SEC; - this.setLimits(memThresholdPercent); - return this.timer.update(this.checkThresholds.bind(this), interval) - .then(() => logger.info(`Monitor checks updated. Interval: ${this.timer.intervalInS}s | Memory Threshold: ${this.memoryThreshold} MB`)); - } - - /** - * Perform the actual checks and event notification - * Automatically adjusts timer interval depending on % memory usage - * - * @returns {Promise} resolved once all checks are done - */ - checkThresholds() { - let usedMem; - return Promise.resolve() - .then(() => { - usedMem = this.getProcessMemUsage(); - const memEventName = usedMem < this.memoryThreshold - ? APP_THRESHOLDS.MEMORY.OK : APP_THRESHOLDS.MEMORY.NOT_OK; - return this.safeEmitAsync('check', memEventName); - }) - .then(() => { - const usedMemPercent = Math.round((usedMem / this.memoryLimit) * 100); - logger.debug(`MEMORY_USAGE: ${usedMem} MB (${usedMemPercent}%, limit = ${this.memoryLimit} MB)`); - const newInterval = MEM_PERCENT_TO_INTERVAL_SEC.find( - (mapping) => usedMemPercent >= mapping.min && usedMemPercent <= mapping.max - ).interval; - if (this.timer.intervalInS !== newInterval) { - return this.update(this.memoryThresholdPercent, newInterval); - } - return Promise.resolve(); - }); - } - - /** - * Gets this node process' total memory usage (rss value) - * - * @returns {Number} - the memory usage in MB - */ - getProcessMemUsage() { - // use rss to add some extra buffer to our check (includes all c++ and js objects and code) - // node v15 adds a new method process.memoryUsage.rss() which is supposed to be faster - return process.memoryUsage().rss / BYTES_TO_MB_DIVISOR; - } -} - -const monitor = new Monitor(); - -monitor.on('error', (err) => { - logger.exception('An unexpected error occurred in monitor checks', err); -}); - -configWorker.on('change', (config) => Promise.resolve() - .then(() => { - logger.debug('configWorker change event in monitor'); - const controls = configUtil.getTelemetryControls(config); - const monitoringNeeded = configUtil.hasEnabledComponents(config); - const memThresholdPct = controls.memoryThresholdPercent; - - if (!monitoringNeeded || memThresholdPct >= 100) { - return monitor.stop(); - } - return monitor.timer.isActive() - ? monitor.update(memThresholdPct) - : monitor.start(memThresholdPct); - }).catch((err) => { - logger.exception('An error occurred in monitor checks (config change handler)', err); - })); - -module.exports = monitor; diff --git a/src/lib/utils/structures/circularArray.js b/src/lib/utils/structures/circularArray.js index 385c5547..1f6729c1 100644 --- a/src/lib/utils/structures/circularArray.js +++ b/src/lib/utils/structures/circularArray.js @@ -16,7 +16,8 @@ 'use strict'; -/* eslint-disable no-multi-assign, no-plusplus, no-var, one-var, one-var-declaration-per-line */ +/* eslint-disable no-multi-assign, no-plusplus, no-var, one-var, one-var-declaration-per-line, no-unused-expressions */ +/* eslint-disable no-nested-ternary */ /** * NOTE: `var`, `++` are intentional and helps to gain some perf */ @@ -56,9 +57,7 @@ class CircularArray { /** @returns {integer} number of items */ get length() { - return this._isEmpty - ? 0 - : ((this._backIdx >= this._frontIdx ? this._size : 0) - this._backIdx + this._frontIdx); + return this._isEmpty ? 0 : segmentLength(this._backIdx, this._frontIdx, this._size); } /** @returns {integer} buffer size (max number of items) */ @@ -76,6 +75,33 @@ class CircularArray { return this._storage[this._backIdx]; } + /** + * Array data (shallow-copy of it) + * + * NOTE: + * `callee` is responsible for cheking `start` and `end` to be + * withing the boundaries (>= startIdx and <= endIdx) + * + * @param {integer} [start] - start index + * @param {integer} [end] - index of last item to include + * + * @returns {Array} shallow-copy of data for provided range of indexes + */ + content(start, end) { + var slen = this._storage.length; + var storage = this._storage; + + if (arguments.length === 0) { + start = this.startIdx; + } + if (arguments.length < 2) { + end = this.endIdx; + } + return start <= end + ? storage.slice(start, end + 1) + : storage.slice(start, slen).concat(storage.slice(0, end + 1)); + } + /** * Erase all data * @@ -85,15 +111,13 @@ class CircularArray { options = options || {}; if (typeof options.size !== 'undefined') { const size = options.size; - if (!(Number.isSafeInteger(size) && size > 0)) { - throw RangeError(`Invalid "size" value. Should be an integer value greater than 0, got '${size}' instead (type = ${typeof size})`); - } + checkSizeValue(size); this._size = size; } else if (typeof this._size === 'undefined') { this._size = 1; } - const prealloc = options.prealloc === true + const prealloc = (this._size === 1 || options.prealloc === true) ? this._size : Math.min( this._size, @@ -106,6 +130,7 @@ class CircularArray { } if (!(this._storage && this._storage.length === prealloc)) { + // TODO: provide option for TypedArray this._storage = new Array(prealloc); } if (this._holeSet) { @@ -117,15 +142,19 @@ class CircularArray { this._isEmpty = true; } - /** Feel non-empty nodes with 'fill' value */ - fastErase() { + /** + * Feel non-empty nodes with 'fill' value + * + * @param {boolean} [freeRefs = true] - free object references + */ + fastErase(freeRefs) { var end = this.endIdx; var hole = this._hole; var idx = this._backIdx; var storage = this._storage; - if (!this._isEmpty) { - if (this.length === 1 || this.size === 1) { + if (!this._isEmpty && freeRefs !== false) { + if (this.length === 1) { storage[idx] = hole; } else { // read from left to right or from left to end @@ -141,8 +170,8 @@ class CircularArray { } } } - this._isEmpty = true; } + this._isEmpty = true; this._backIdx = this._frontIdx = 0; } @@ -154,10 +183,12 @@ class CircularArray { /** * @param {integer} idx - base index number, 0 <= idx < size * + * NOTE: '%' is slow, use basic comparisons + * * @returns {integer} next index number */ nextIdx(idx) { - return (idx + 1) % this._size; + return (this._size === 1 || ++idx >= this._size) ? 0 : idx; } /** @@ -173,11 +204,15 @@ class CircularArray { pop() { var value = this._storage[this._backIdx]; this._storage[this._backIdx] = this._hole; - this._backIdx = this.nextIdx(this._backIdx); - this._isEmpty = this._backIdx === this._frontIdx; - if (this._isEmpty) { - // rebase - this._backIdx = this._frontIdx = 0; + if (this._size === 1) { + this._isEmpty = true; + } else { + this._backIdx = this.nextIdx(this._backIdx); + this._isEmpty = this._backIdx === this._frontIdx; + if (this._isEmpty) { + // rebase + this._backIdx = this._frontIdx = 0; + } } return value; } @@ -185,10 +220,12 @@ class CircularArray { /** * @param {integer} idx - base index number, 0 <= idx < size * + * NOTE: '%' is slow, use basic comparisons + * * @returns {integer} previous index number */ prevIdx(idx) { - return (idx || this._size) - 1; + return this._size === 1 ? 0 : (idx === 0 || idx >= this._size) ? (this._size - 1) : --idx; } /** @@ -198,22 +235,29 @@ class CircularArray { */ push(value) { var oldValue = this._hole; - var sameCell = this._frontIdx === this._backIdx; + var sameCell; + var storage = this._storage; - if (this._frontIdx >= this._storage.length) { - this._storage.push(value); + if (this._size === 1) { + oldValue = storage[0]; + storage[0] = value; } else { - oldValue = this._storage[this._frontIdx]; - this._storage[this._frontIdx] = value; - } + sameCell = this._frontIdx === this._backIdx; + + if (this._frontIdx >= storage.length) { + storage.push(value); + } else { + oldValue = storage[this._frontIdx]; + storage[this._frontIdx] = value; + } - this._frontIdx = this.nextIdx(this._frontIdx); + this._frontIdx = this.nextIdx(this._frontIdx); - if (!this._isEmpty && sameCell) { - this._backIdx = this._frontIdx; + if (!this._isEmpty && sameCell) { + this._backIdx = this._frontIdx; + } } this._isEmpty = false; - return oldValue; } @@ -273,25 +317,396 @@ class CircularArray { } } -/** @returns {integer} Greater common divider */ -function getGCD(a, b) { - return (b ? getGCD(b, a % b) : a); +/** + * Reader Class + */ +class Reader { + /** + * @param {ReaderProxy} proxy - Reader proxy + * @param {integer} rid - Reader ID + */ + constructor(proxy, rid) { + this._proxy = proxy; + this._rid = rid; + } + + /** @returns {integer} end index */ + get endIdx() { + return this._proxy.endIdx(this); + } + + /** @returns {number} number of elements from current position to the end, e.g. from 5 to 10 = 6 */ + get length() { + return this._proxy.length(this); + } + + /** @returns {integer} start index */ + get startIdx() { + return this._proxy.startIdx(this); + } + + /** Destroy the reader */ + destroy() { + this._proxy.destroy(this); + } + + /** @returns {boolean} true the reader may need to make a copy of data it points to before any modifications */ + needCopy() { + return this._proxy.needCopy(this); + } + + /** @returns {any} value of the backmost node (will be deleted if no readers left behind) */ + pop() { + return this._proxy.pop(this); + } } /** - * Shift sub-array to index 0 + * Reader Proxy Class + */ +class ReaderProxy { + /** @param {CircularArrayMR} carr */ + constructor(carr) { + this._carr = carr; + // - readers should be sorted by .length property + // - reader with idx 0 should point to the backmost node + // [Reader, index, is-empty] + this._readers = []; + } + + /** @returns {Reader} a new reader that points to the beginning of the data */ + create() { + var readers = this._readers; + var rid = 0; // set it to 0 index + + readers.unshift([new Reader(this, rid), this._carr.startIdx, false]); + if (readers.length > 1) { + // re-index readers + readers.forEach((rdr, idx) => { rdr[0]._rid = idx; }); + } + return readers[rid][0]; + } + + /** @param {Reader} [rdr] - reader to destroy. If not set then all readers will be destroyed */ + destroy(rdr) { + var rid; + var readers = this._readers; + var rlen = readers.length; + var dlen = 0; + + // if there is only 1 reader then all data should be left untouched + if (!rdr || rlen < 2) { + readers.forEach((reader) => { + reader[0]._proxy = null; + }); + this._readers = []; + return; + } + // if there are more than 1 reader then all nodes between + // reader 0 and reader 1 should be freed + + rid = rdr._rid; + rdr = readers[rid]; + rdr[0]._proxy = null; + + // if reader is empty and rid === 0 - no data at all in the list + if (rid === 0 && this.length(rdr[0])) { + // free nodes between reader 0 and 1 + dlen = this._carr.length - this.length(readers[1][0]); + while (dlen--) { + this._carr._pop(); + } + // no data left - reset all readers to be in sync with the array + if (this._carr._isEmpty && rlen) { + this.resetAll(); + } + } + + // re-index readers + rid++; + while (rid < rlen) { + rdr = readers[rid]; + rdr[0]._rid = rid - 1; + readers[rdr[0]._rid] = rdr; + rid++; + } + + readers.length = rlen - 1; + } + + /** + * @param {Reader} rdr + * + * @returns {integer} end index + */ + endIdx() { + return this._carr.endIdx; + } + + /** + * @param {Reader} rdr + * + * @returns {number} calculate length for the reader + */ + length(rdr) { + rdr = this._readers[rdr._rid]; + return (this._carr._isEmpty || (rdr[2] && rdr[1] === this._carr._frontIdx)) + ? 0 + : segmentLength(rdr[1], this._carr._frontIdx, this._carr.size); + } + + /** + * @param {Reader} rdr + * + * @returns {boolean} true the reader needs to make a copy of data it points to before any modifications + */ + needCopy(rdr) { + var readers = this._readers; + // data may need a copy if: + // - more than 1 reader registered + // - reader doesn't point to the backmost item + // if reader points to the backmost item and + // next reader too and has no data left to read + // then no copy needed + return readers.length !== 1 && ( + rdr._rid !== 0 + || (readers[0][1] === readers[1][1] && !readers[1][2])); + } + + /** @returns {integer} number of active readers */ + numberOfReaders() { + return this._readers.length; + } + + /** + * `callee` is responsible to check `.length` property for presense of + * data before call .pop() method + * + * @param {Reader} rdr + * + * @returns {any} value of the backmost node (will be deleted if no readers left behind) + */ + pop(rdr) { + var cid; + var nextlen; + var readers = this._readers; + var retval; + var rid = rdr._rid; + var rlen = readers.length - 1; + rdr = readers[rid]; + nextlen = this.length(rdr[0]) - 1; + cid = rdr[1]; + + if (rlen === 0 || (rid === 0 && (readers[1][1] !== cid || readers[1][2]))) { + // call .pop() when: + // - one reader only + // - the reader points to the tail and: + // - next reader does not + // - next reader has no data to read (is empty) + // NOTE: _readers should be sorted already + retval = this._carr._pop(); + + if (this._carr._isEmpty) { + // all data read, reset position of all readers to be in sync with the array + // also is a shortcut for size === 1 + this.resetAll(); + } else if (rlen && readers[1][1] === cid && readers[1][2] && cid !== this._carr._frontIdx) { + // next reader points to `cid` (origin position) and read its data already and new + // data was pushed since last .pop() for that reader - need to sync + this.sync(); + } + cid = this._carr._backIdx; + } else { + retval = this._carr.peak(cid); + // array sorted already, readers[i].length >= readers[i + 1].length + while (rid < rlen && this.length(readers[rid + 1][0]) > nextlen) { + // swap readers and update rid + readers[rid] = readers[rid + 1]; + readers[rid][0]._rid = rid++; + } + if (rdr[0]._rid !== rid) { + // position changed + rdr[0]._rid = rid; + readers[rid] = rdr; + } + cid = this._carr.nextIdx(cid); + rdr[2] = cid === this._carr._frontIdx; + } + + rdr[1] = cid; + return retval; + } + + /** + * Update all readers after .rebase() call + * + * @param {integer} delta - difference for the backmost node IDx before and aftet .rebase() call + */ + rebase(delta) { + if (delta) { + this._readers.forEach((reader) => { + var cid = reader[1] - delta; + if (cid < 0) { + // frontIndx <= backIndx in the past + // calculate a new position + cid += this._carr.size; + } + reader[1] = cid; + }); + } + } + + /** Reset all readers to point to the beginning of data */ + resetAll() { + this._readers.forEach((reader) => { + reader[1] = this._carr.startIdx; + reader[2] = false; + }); + } + + /** + * @param {Reader} rdr + * + * @returns {integer} start index + */ + startIdx(rdr) { + return this._readers[rdr._rid][1]; + } + + /** Synchronize readers state with the underlying array state */ + sync() { + var isEmpty = this._carr._isEmpty; + var readers = this._readers; + var rlen = readers.length; + var rdr = readers[0]; + var prev = rdr[1]; // points to prev value of _backIdx + var cur = this._carr.nextIdx(prev); + + if (rlen === 1) { + // single reader points to the backmost node only + rdr[1] = cur; + rdr[2] = isEmpty; + } else { + for (let i = 0; i < rlen; i++) { + rdr = readers[i]; + if (rdr[1] !== prev) { + break; + } + rdr[1] = cur; + rdr[2] = isEmpty; + } + } + } +} + +/** + * Circular Array Class extension with Multiple Readers * - * @param {any[]} array - * @param {integer} startIdx - * @param {integer} endIdx + * Multiple Readers are able to read data without removing + * it from the underlying array until no refs left. + * + * Example: + * + * reader = cl.reader(); + * // reader.length === cl.length === 5; + * reader.pop(); // some data returned + * // reader.length === cl.length === 4; + * + * reader2 = cl.reader(); + * // reader2.length === reader.length === cl.length === 4; + * reader2.pop(); // some data returned + * // reader2.length === 3, reader.length === cl.length === 4; + * reader2.pop(); // some data returned + * // reader2.length === 2, reader.length === cl.length === 4; + * reader.pop(); // some data returned + * // reader2.length === 2, reader.length === cl.length === 3; + * reader.destroy(); + * // reader2.length === cl.length === 2; + * + * `callee` is responsible to check `.length` property for presense of + * data before call .pop() method */ -function shiftSubArray(array, startIdx, endIdx) { - var i = startIdx; - for (; i < endIdx; i += 1) { - array[i - startIdx] = array[i]; +class CircularArrayMR extends CircularArray { + /** @returns {integer} number of active readers */ + get readers() { + return this._readers.numberOfReaders(); + } + + /** + * Origin pop method + * + * @see CircularArray.pop + */ + _pop() { + return super.pop.apply(this, arguments); + } + + /** + * Erase all data + * + * @see CircularArray.erase + * + * @param {boolean} [options.keepReaders = false] - keep readers + */ + erase(options) { + super.erase(options); + + if (options && options.keepReaders && this._readers) { + // reset readers state only + this._readers.resetAll(); + } else if (this._readers) { + this._readers.destroy(); + } else { + this._readers = new ReaderProxy(this); + } + } + + /** @see CircularArray.fastErase */ + fastErase() { + super.fastErase.apply(this, arguments); + this._readers.resetAll(); + } + + /** @see CircularArray.pop */ + pop() { + var ret = super.pop.apply(this, arguments); + this.readers && this._readers.sync(); + return ret; + } + + /** @see CircularArray.push */ + push() { + var before = this._backIdx; + var ret = super.push.apply(this, arguments); + (this._size === 1 || this._backIdx !== before) && this.readers && this._readers.sync(); + return ret; + } + + /** @returns {Reader} a new reader */ + reader() { + return this._readers.create(); + } + + /** @see CircularArray.rebase */ + rebase() { + const before = this._backIdx; + super.rebase.apply(this, arguments); + this._readers.rebase(before - this._backIdx); + } +} + +/** @throws {RangeError} when invalid value passed */ +function checkSizeValue(size) { + if (!Number.isSafeInteger(size) || size < 1) { + throw RangeError(`Invalid "size" value. Should be an integer value greater than 0, got '${size}' instead (type = ${typeof size})`); } } +/** @returns {integer} Greater common divider */ +function getGCD(a, b) { + return (b ? getGCD(b, a % b) : a); +} + /** * Rotate sub-array * end start @@ -353,7 +768,35 @@ function rotateSubArray(array, startIdx, endIdx, length) { return delta !== 0; } -module.exports = CircularArray; +/** + * @param {integer} start - start position + * @param {integer} end - end position + * @param {integer} size - size of object + * + * @returns {integer} length of segment defined by `start` and `end` + */ +function segmentLength(start, end, size) { + return (start >= end ? size : 0) - start + end; +} + +/** + * Shift sub-array to index 0 + * + * @param {any[]} array + * @param {integer} startIdx + * @param {integer} endIdx + */ +function shiftSubArray(array, startIdx, endIdx) { + var i = startIdx; + for (; i < endIdx; i += 1) { + array[i - startIdx] = array[i]; + } +} + +module.exports = { + CircularArray, + CircularArrayMR +}; /** * @typedef InitOptions diff --git a/src/lib/utils/structures/circularLinkedList.js b/src/lib/utils/structures/circularLinkedList.js index 8c126d48..02b0db28 100644 --- a/src/lib/utils/structures/circularLinkedList.js +++ b/src/lib/utils/structures/circularLinkedList.js @@ -16,10 +16,8 @@ 'use strict'; -/* eslint-disable no-multi-assign, no-plusplus, no-var, one-var, one-var-declaration-per-line */ -/** - * NOTE: `var`, `++` are intentional and helps to gain some perf - */ +/* eslint-disable no-multi-assign, no-plusplus, no-unneeded-ternary, operator-linebreak */ +/* eslint-disable no-unused-expressions, no-var, one-var, one-var-declaration-per-line */ /** * CircularLinked List Class (FIFO) @@ -38,7 +36,7 @@ class CircularLinkedList { size = Number.MAX_SAFE_INTEGER; if (arguments.length) { - this._checkSizeValue(arguments[0]); + checkSizeValue(arguments[0]); size = arguments[0]; } @@ -89,10 +87,6 @@ class CircularLinkedList { if (restore) { this._size = this._oldSize; } - - if (this._front) { - this._front.next = null; - } } } @@ -112,13 +106,9 @@ class CircularLinkedList { this._oldSize = this._size; if (arguments.length) { - this._checkSizeValue(newSize); + checkSizeValue(newSize); this._size = newSize; } - - if (this._front) { - this._front.next = this._back; - } } } @@ -140,17 +130,10 @@ class CircularLinkedList { * @returns {any} value of deleted the backmost node * * NOTE: - * - if `ring` enabled it may remove nodes if limit exceeded * - should check .length before calling .pop() */ pop() { var retval = this._back.value; - if (this._ring) { - // 0 (back) -> 1 (back.next) -> 2 -> 3 (front) -> 0 (front.next == back) - this._front.next = this._back.next; - // set to null if it was the last element in the ring - this._back.next = this._back.next === this._back ? null : this._back.next; - } this._back = this._back.next; this._front = this._back && this._front; this._length--; @@ -169,10 +152,12 @@ class CircularLinkedList { if (this._front) { if (this._ring && this._length >= this._size) { // start overriding existing items - this._front = this._front.next; - retval = this._front.value; + retval = this._back.value; + + this._front = this._front.next = this._back; + this._back = this._front.next; // points to next node or to itself + this._front.next = null; this._front.value = value; - this._back = this._front.next; } else { this._front = this._front.next = { next: this._front.next, @@ -185,19 +170,370 @@ class CircularLinkedList { next: null, value }; - if (this._ring) { - this._front.next = this._back; - } this._length++; } return retval; } +} - _checkSizeValue(size) { - if (size < 1 || !Number.isSafeInteger(size)) { - throw RangeError(`Invalid "size" value. Should be an integer value greater than 0, got '${size}' instead (type = ${typeof size})`); +/** @throws {RangeError} when invalid value passed */ +function checkSizeValue(size) { + if (!Number.isSafeInteger(size) || size < 1) { + throw RangeError(`Invalid "size" value. Should be an integer value greater than 0, got '${size}' instead (type = ${typeof size})`); + } +} + +/** + * Reader Class + */ +class Reader { + /** + * @param {ReaderProxy} proxy - Reader proxy + * @param {integer} rid - Reader ID + */ + constructor(proxy, rid) { + this._proxy = proxy; + this._rid = rid; + } + + /** @returns {any} value of the backmost node */ + bpeak() { + return this._proxy.bpeak(this); + } + + /** Destroy the reader */ + destroy() { + this._proxy.destroy(this); + } + + /** @returns {any} value of the topmost node */ + fpeak() { + return this._proxy.fpeak(this); + } + + /** @returns {boolean} true if there is data to read */ + hasData() { + return this._proxy.hasData(this); + } + + /** @returns {boolean} true the reader may need to make a copy of data it points to before any modifications */ + needCopy() { + return this._proxy.needCopy(this); + } + + /** @returns {any} value of the backmost node (will be deleted if no readers left behind) */ + pop() { + return this._proxy.pop(this); + } +} + +/** + * Reader Proxy Class + */ +class ReaderProxy { + constructor(clist) { + this._clist = clist; + // [Reader, node-to-read, is-last-node-read] + // node-to-read usually is null when reader points to the tail + this._readers = []; + } + + /** + * @param {Reader} rdr + * + * @returns {any} value of the backmost node + */ + bpeak(rdr) { + return this._readers[rdr._rid][1].value; + } + + /** @returns {Reader} a new reader that points to the beginning of the data */ + create() { + var rid = this._readers.length; + // [Reader, node-to-read, is-last-node-read] + this._readers.push([new Reader(this, rid), null, false]); + return this._readers[rid][0]; + } + + /** @param {Reader} [rdr] - reader to destroy. If not set then all readers will be destroyed */ + destroy(rdr) { + var rid; + var readers = this._readers; + var rlen = readers.length; + + // if there is only 1 reader then all data should be left untouched + if (!rdr || rlen < 2) { + // eslint-disable-next-line no-shadow + this._readers.forEach((reader) => { + reader[0]._proxy = null; + }); + this._readers = []; + return; + } + // if there are more than 1 reader then all nodes in between should be freed + + rid = rdr._rid; + rdr = readers[rdr._rid]; + rdr[0]._proxy = null; + + // check that reader points to the backmost node and there is data at all + if ((rdr[1] === null || rdr[1] === this._clist.back) && this._clist.length) { + // if the reader is the single owner of the backmost node then + // need to remove all nodes between that node and the next closest one + rdr[2] = false; // reset flag + while (this.numberOfBacknodeRefs() === 1 && this._clist.length) { + // that's us, safe to .pop() + this._clist.pop(); + } + } + + // remove reader of the list of registered readers + if (rid !== --rlen) { + readers[rid] = readers[rlen]; + readers[rid][0]._rid = rid; } + readers.length = rlen; + } + + /** + * @param {Reader} rdr + * + * @returns {any} value of the topmost node */ + fpeak() { + return this._clist.fpeak(); + } + + /** + * @param {Reader} rdr + * + * @returns {boolean} true if there is data to read */ + hasData(rdr) { + rdr = this._readers[rdr._rid]; + return rdr[1] === null // points to the backmost node + ? this._clist.length !== 0 + : (!rdr[2] || rdr[1].next !== null); // last node was read already or new data added after that + } + + /** + * @param {Array} rdr + * + * @returns {integer} 1 if reader points to the backmost node else 0 + */ + isBacknodeRef(rdr) { + return ((rdr[1] === this._clist.back && !rdr[2]) || rdr[1] === null) + ? 1 + : 0; + } + + /** + * @param {Reader} rdr + * + * @returns {boolean} true the reader needs to make a copy of data it points to before any modifications + */ + needCopy(rdr) { + var rnode = this._readers[rdr._rid][1]; + return (rnode !== null && rnode !== this._clist.back) || this.numberOfBacknodeRefs() > 1; + } + + /** @returns {integer} number of active refs for the backmost node */ + numberOfBacknodeRefs() { + var i = 0; + var readers = this._readers; + var refsnum = 0; + var rnum = readers.length; + + if (rnum === 1) { + refsnum = this.isBacknodeRef(readers[0]); + } else if (rnum === 2) { + refsnum + = this.isBacknodeRef(readers[0]) + + this.isBacknodeRef(readers[1]); + } else if (rnum === 3) { + refsnum + = this.isBacknodeRef(readers[0]) + + this.isBacknodeRef(readers[1]) + + this.isBacknodeRef(readers[2]); + } else if (rnum === 4) { + refsnum + = this.isBacknodeRef(readers[0]) + + this.isBacknodeRef(readers[1]) + + this.isBacknodeRef(readers[2]) + + this.isBacknodeRef(readers[3]); + } else { + for (; i < rnum; i++) { + refsnum += this.isBacknodeRef(readers[i]); + } + } + return refsnum; + } + + /** @returns {integer} number of active readers */ + numberOfReaders() { + return this._readers.length; + } + + /** + * `callee` is responsible to check `.hasData` property for presense of + * data before call .pop() method + * + * @param {Reader} rdr + * + * @returns {any} value of the backmost node (will be deleted if no readers left behind) + */ + pop(rdr) { + var backnode = this._clist.back; + var readers = this._readers; + var retval; + var rnode; + rdr = readers[rdr._rid]; + + if (readers.length === 1) { + // shortcut for a single reader + retval = this._clist._pop(); // throws error when no data + rdr[1] = null; + rdr[2] = false; + return retval; + } + + rnode = rdr[1]; + if (rnode === null) { + // - new reader + // - .erase() called + // - no data left after prev call + rnode = backnode; // may be a null but it is ok + } else if (rdr[2]) { + // the node is the head and was read already + rnode = rnode.next; // may be a null but it is ok + } + + retval = rnode.value; // throws error when no data because rnode === null + // "retain" ref to next node + if (rnode.next === null) { + // the only line in the code + // where ref to `backnode` can be saved + rdr[1] = rnode; + rdr[2] = true; + } else { + rdr[1] = rnode.next; + rdr[2] = false; + } + + // need to make decision about current node removal + if (rnode === backnode && this.numberOfBacknodeRefs() === 0) { + this._clist._pop(); + this.rebase(this._clist.length ? rnode : null); + } + return retval; + } + + /** + * Update readers of the backmost node to point to a new location + * + * @param {null | OneWayNode} oldback + */ + rebase(oldback) { + var readers = this._readers; + var rnum = readers.length; + var i; + + if (rnum === 1) { + readers[0][1] = null; + readers[0][2] = false; + } else { + for (i = 0; i < rnum; i++) { + if (oldback === null || readers[i][1] === oldback) { + readers[i][1] = null; + readers[i][2] = false; + } + } + } + } + + /** Reset all readers to point to the beginning of data */ + resetAll() { + this.rebase(null); + } +} + +/** + * Circular List Class extension with Multiple Readers + * + * Multiple Readers are able to read data without removing + * it from the underlying array until no refs left. + * + * Example: + * + * reader = cl.reader(); + * // reader.hasData() === true; + * reader.pop(); // some data returned + * // reader.hasData() === true; + * + * `callee` is responsible to check `.length` property for presense of + * data before call .pop() method + */ +class CircularLinkedListMR extends CircularLinkedList { + /** @returns {integer} number of active readers */ + get readers() { + return this._readers.numberOfReaders(); + } + + /** + * Origin pop method + * + * @see CircularLinkedList.pop + */ + _pop() { + return super.pop.apply(this, arguments); + } + + /** + * Erase all data + * + * @see CircularLinkedList.erase + * + * @param {boolean} [keepReaders = false] - keep readers + */ + erase(keepReaders) { + super.erase(); + if (keepReaders && this._readers) { + this._readers.resetAll(); + } else if (this._readers) { + this._readers.destroy(); + } else { + this._readers = new ReaderProxy(this); + } + } + + /** @see CircularLinkedList.push */ + pop() { + var backnode = this.back; + var ret = super.pop.apply(this, arguments); + this.readers && this._readers.rebase(backnode); + return ret; + } + + /** @see CircularLinkedList.push */ + push() { + var backnode = this.back; + var ret = super.push.apply(this, arguments); + this.front === backnode && this.readers && this._readers.rebase(backnode); + return ret; + } + + /** @returns {Reader} a new reader */ + reader() { + return this._readers.create(); } } -module.exports = CircularLinkedList; +module.exports = { + CircularLinkedList, + CircularLinkedListMR +}; + +/** + * @typedef OneWayNode + * @type {Object} + * @property {null | OneWayNode} next + * @property {any} value + */ diff --git a/src/lib/utils/structures/index.js b/src/lib/utils/structures/index.js index 6bdff505..2da053e6 100644 --- a/src/lib/utils/structures/index.js +++ b/src/lib/utils/structures/index.js @@ -20,6 +20,8 @@ const CircularArray = require('./circularArray'); const CircularLinkedList = require('./circularLinkedList'); module.exports = { - CircularArray, - CircularLinkedList + CircularArray: CircularArray.CircularArray, + CircularArrayMR: CircularArray.CircularArrayMR, + CircularLinkedList: CircularLinkedList.CircularLinkedList, + CircularLinkedListMR: CircularLinkedList.CircularLinkedListMR }; diff --git a/src/nodejs/restWorker.js b/src/nodejs/restWorker.js index 5fd5588e..34b6fac1 100644 --- a/src/nodejs/restWorker.js +++ b/src/nodejs/restWorker.js @@ -26,21 +26,23 @@ const logger = require('../lib/logger'); const util = require('../lib/utils/misc'); const ActivityRecorder = require('../lib/activityRecorder'); +const DataPipeline = require('../lib/dataPipeline'); +const EventListener = require('../lib/eventListener'); const deviceUtil = require('../lib/utils/device'); const retryPromise = require('../lib/utils/promise').retry; const persistentStorage = require('../lib/persistentStorage'); const configWorker = require('../lib/config'); const requestRouter = require('../lib/requestHandlers/router'); +const ResourceMonitor = require('../lib/resourceMonitor'); +const RuntimeConfig = require('../lib/runtimeConfig'); +const SystemPoller = require('../lib/systemPoller'); const configListenerModulesToLoad = [ - '../lib/eventListener', '../lib/consumers', '../lib/pullConsumers', - '../lib/systemPoller', '../lib/ihealth', '../lib/requestHandlers/connections', - '../lib/tracerManager.js', - '../lib/utils/monitor.js' + '../lib/tracerManager.js' ]; configListenerModulesToLoad.forEach((module) => { @@ -124,15 +126,32 @@ RestWorker.prototype._initializeApplication = function (success, failure) { this.activityRecorder = new ActivityRecorder(); this.activityRecorder.recordDeclarationActivity(configWorker); + const appCtx = { + configMgr: configWorker, + resourceMonitor: new ResourceMonitor(), + runtimeConfig: new RuntimeConfig() + }; + + appCtx.resourceMonitor.initialize(appCtx); + appCtx.runtimeConfig.initialize(appCtx); + + DataPipeline.initialize(appCtx); + EventListener.initialize(appCtx); + SystemPoller.initialize(appCtx); + // configure global socket maximum http.globalAgent.maxSockets = 5; https.globalAgent.maxSockets = 5; - // try to load pre-existing configuration - const ps = persistentStorage.persistentStorage; - // only RestStorage is supported for now - ps.storage = new persistentStorage.RestStorage(this); - ps.load() + appCtx.runtimeConfig.start() + .then(() => appCtx.resourceMonitor.start()) + .then(() => { + // try to load pre-existing configuration + const ps = persistentStorage.persistentStorage; + // only RestStorage is supported for now + ps.storage = new persistentStorage.RestStorage(this); + return ps.load(); + }) .then((loadedState) => { logger.debug(`Loaded state ${util.stringify(loadedState)}`); }) diff --git a/src/schema/1.35.0/actions_schema.json b/src/schema/1.35.0/actions_schema.json new file mode 100644 index 00000000..c658439b --- /dev/null +++ b/src/schema/1.35.0/actions_schema.json @@ -0,0 +1,187 @@ +{ + "$id": "actions_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Actions schema", + "description": "", + "type": "object", + "definitions": { + "baseActionsChain": { + "title": "Chain of Actions", + "description": "Actions to be performed on the data.", + "type": "array", + "items": { + "$ref": "#/definitions/baseActionObject" + } + }, + "baseActionObject": { + "title": "Base Action object", + "description": "Base object to build actions.", + "type": "object", + "properties": { + "enable": { + "title": "Enable", + "description": "Whether to enable this action in the declaration or not.", + "type": "boolean", + "default": true + } + } + }, + "baseConditionalActionObject": { + "title": "Base Action object with support for conditional statements", + "description": "Base Action object with support for conditional statements.", + "type": "object", + "allOf": [ + { "$ref": "#/definitions/baseActionObject" }, + { + "anyOf": [ + { + "properties": { + "ifAllMatch": { + "title": "If All Match", + "description": "The conditions that will be checked against. All must be true.", + "type": "object", + "additionalProperties": true + } + }, + "not": { "required": ["ifAnyMatch"] } + }, + { + "properties": { + "ifAnyMatch": { + "title": "If Any Match", + "description": "An array of ifAllMatch objects. Any individual ifAllMatch object may match, but each condition within an ifAllMatch object must be true", + "type": "array" + } + }, + "not": { "required": ["ifAllMatch"] } + } + ] + } + ] + }, + "subLocation": { + "title": "Location", + "description": "Used to specify a location in TS data. Use boolean type with value true to specify the location.", + "oneOf": [ + { + "type": "boolean", + "const": true + }, + { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/subLocation" + } + } + ] + }, + "locations": { + "title": "Location", + "description": "The location(s) to apply the action.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/subLocation" + } + }, + "setTagAction": { + "title": "setTag Action", + "description": "Action to assign a tag(s) to particular or default location", + "type": "object", + "allOf": [ + { "$ref": "#/definitions/baseConditionalActionObject" }, + { + "properties": { + "setTag": { + "title": "Set Tag", + "description": "The tag values to be added.", + "type": "object", + "additionalProperties": true + }, + "locations": { + "title": "Location", + "description": "The location(s) to apply the action.", + "allOf": [{ "$ref": "#/definitions/locations" }] + }, + "enable": {}, + "ifAllMatch": {}, + "ifAnyMatch": {} + }, + "additionalProperties": false, + "required": ["setTag"] + } + ] + }, + "includeDataAction": { + "title": "includeData Action", + "description": "Action to specify data fields to include in the output", + "type": "object", + "allOf": [ + { "$ref": "#/definitions/baseConditionalActionObject" }, + { + "properties": { + "includeData": { + "title": "Include Data", + "description": "The data fields to include in the output", + "type": "object", + "additionalProperties": false + }, + "locations": { + "title": "Location", + "description": "The location(s) to apply the action.", + "allOf": [{ "$ref": "#/definitions/locations" }] + }, + "enable": {}, + "ifAllMatch": {}, + "ifAnyMatch": {} + }, + "additionalProperties": false, + "required": ["includeData", "locations"] + } + ] + }, + "excludeDataAction": { + "title": "excludeData Action", + "description": "Action to specify data fields to exclude form the output", + "type": "object", + "allOf": [ + { "$ref": "#/definitions/baseConditionalActionObject" }, + { + "properties": { + "excludeData": { + "title": "Exclude Data", + "description": "The data fields to exclude from the output", + "type": "object", + "additionalProperties": false + }, + "locations": { + "title": "Location", + "description": "The location(s) to apply the action.", + "allOf": [{ "$ref": "#/definitions/locations" }] + }, + "enable": {}, + "ifAllMatch": {}, + "ifAnyMatch": {} + }, + "additionalProperties": false, + "required": ["excludeData", "locations"] + } + ] + }, + "inputDataStreamActionsChain": { + "title": "", + "description": "", + "allOf": [ + { "$ref": "#/definitions/baseActionsChain" }, + { + "items": { + "oneOf": [ + { "$ref": "#/definitions/excludeDataAction" }, + { "$ref": "#/definitions/includeDataAction" }, + { "$ref": "#/definitions/setTagAction" } + ] + } + } + ] + } + } +} \ No newline at end of file diff --git a/src/schema/1.35.0/base_schema.json b/src/schema/1.35.0/base_schema.json new file mode 100644 index 00000000..6acb1db3 --- /dev/null +++ b/src/schema/1.35.0/base_schema.json @@ -0,0 +1,310 @@ +{ + "$id": "base_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming", + "description": "", + "type": "object", + "definitions": { + "enable": { + "title": "Enable", + "description": "This property can be used to enable/disable the poller/listener" , + "type": "boolean" + }, + "trace": { + "title": "Trace", + "description": "Enables data dumping to file. Boolean uses pre-defined file location, however value could be a string which contains path to a specific file instead" , + "minLength": 1, + "type": ["boolean", "string"] + }, + "traceConfig": { + "title": "Trace (v2)", + "description": "Enables data dumping to file. Boolean uses pre-defined file location, however value could be a string which contains path to a specific file instead", + "type": "object", + "properties": { + "type": { + "title": "Trace type", + "description": "Trace type - output data or input data", + "type": "string", + "enum": ["output", "input"] + }, + "path": { + "title": "Path to trace file", + "description": "Path to trace file to write data to", + "type": "string", + "minLength": 1 + } + }, + "required": ["type"] + }, + "traceV2": { + "title": "Trace (v2)", + "description": "Enables data dumping to file. Boolean uses pre-defined file location, however value could be a string which contains path to a specific file instead", + "oneOf": [ + { "$ref": "#/definitions/traceConfig" }, + { + "type": "array", + "minItems": 1, + "maxItems": 2, + "uniqueItemProperties": ["type"], + "items": { + "allOf": [{ + "$ref": "#/definitions/traceConfig" + }] + } + } + ] + }, + "secret": { + "title": "Passphrase (secret)", + "description": "" , + "type": "object", + "properties": { + "class": { + "title": "Class", + "description": "Telemetry streaming secret class", + "type": "string", + "enum": [ "Secret" ], + "default": "Secret" + }, + "cipherText": { + "title": "Cipher Text: this contains a secret to encrypt", + "type": "string" + }, + "environmentVar": { + "title": "Environment Variable: this contains the named env var where the secret resides", + "type": "string", + "minLength": 1 + }, + "protected": { + "$comment": "Meta property primarily used to determine if 'cipherText' needs to be encrypted", + "title": "Protected", + "type": "string", + "enum": [ "plainText", "plainBase64", "SecureVault" ], + "default": "plainText" + } + }, + "oneOf": [ + { "required": [ "cipherText" ] }, + { "required": [ "environmentVar" ] } + ], + "f5secret": true + }, + "username": { + "$comment": "Common field for username to use everywhere in scheme", + "title": "Username", + "type": "string", + "minLength": 1 + }, + "stringOrSecret": { + "allOf": [ + { + "if": { "type": "string" }, + "then": {}, + "else": {} + }, + { + "if": { "type": "object" }, + "then": { "$ref": "base_schema.json#/definitions/secret" }, + "else": {} + } + ] + }, + "constants": { + "title": "Constants", + "description": "" , + "type": "object", + "properties": { + "class": { + "title": "Class", + "description": "Telemetry streaming constants class", + "type": "string", + "enum": [ "Constants" ] + } + }, + "additionalProperties": true + }, + "tag": { + "$comment": "Defaults do not get applied for $ref objects, so place defaults alongside instead.", + "title": "Tag", + "description": "" , + "type": "object", + "properties": { + "tenant": { + "title": "Tenant tag", + "type": "string", + "minLength": 1 + }, + "application": { + "title": "Application tag", + "type": "string", + "minLength": 1 + } + }, + "additionalProperties": true + }, + "match": { + "$comment": "Defaults do not get applied for $ref objects, so place defaults alongside instead.", + "title": "Pattern to filter data", + "description": "", + "type": "string" + }, + "enableHostConnectivityCheck": { + "$comment": "This property can be used to enable/disable the host connectivity check in configurations where this is in effect", + "title": "Host", + "description": "" , + "type": "boolean" + }, + "allowSelfSignedCert": { + "$comment": "This property can be used by consumers, system pollers to enable/disable SSL Cert check", + "title": "Allow Self-Signed Certificate", + "description": "" , + "type": "boolean" + }, + "host": { + "$comment": "This property can be used by consumers, system pollers", + "title": "Host", + "description": "" , + "type": "string", + "minLength": 1, + "anyOf": [ + { "format": "ipv4" }, + { "format": "ipv6" }, + { "format": "hostname" } + ], + "hostConnectivityCheck": true + }, + "port": { + "title": "Port", + "description": "" , + "type": "integer", + "minimum": 0, + "maximum": 65535 + }, + "protocol": { + "title": "Protocol", + "description": "" , + "type": "string", + "enum": [ "http", "https" ] + }, + "proxy": { + "title": "Proxy Configuration", + "description": "", + "type": "object", + "dependencies": { + "passphrase": [ "username" ] + }, + "required": [ "host" ], + "properties": { + "host": { + "$ref": "#/definitions/host" + }, + "port": { + "default": 80, + "allOf": [ + { + "$ref": "#/definitions/port" + } + ] + }, + "protocol": { + "default": "http", + "allOf": [ + { + "$ref": "#/definitions/protocol" + } + ] + }, + "enableHostConnectivityCheck": { + "$ref": "#/definitions/enableHostConnectivityCheck" + }, + "allowSelfSignedCert": { + "$ref": "#/definitions/allowSelfSignedCert" + }, + "username": { + "$ref": "#/definitions/username" + }, + "passphrase": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + } + }, + "properties": { + "class": { + "title": "Class", + "description": "Telemetry streaming top level class", + "type": "string", + "enum": [ "Telemetry" ] + }, + "schemaVersion": { + "title": "Schema version", + "description": "Version of ADC Declaration schema this declaration uses", + "type": "string", + "$comment": "IMPORTANT: In enum array, please put current schema version first, oldest-supported version last. Keep enum array sorted most-recent-first.", + "enum": [ "1.35.0", "1.34.0", "1.33.0", "1.32.0", "1.31.0", "1.30.0", "1.29.0", "1.28.0", "1.27.1", "1.27.0", "1.26.0", "1.25.0", "1.24.0", "1.23.0", "1.22.0", "1.21.0", "1.20.1", "1.20.0", "1.19.0", "1.18.0", "1.17.0", "1.16.0", "1.15.0", "1.14.0", "1.13.0", "1.12.0", "1.11.0", "1.10.0", "1.9.0", "1.8.0", "1.7.0", "1.6.0", "1.5.0", "1.4.0", "1.3.0", "1.2.0", "1.1.0", "1.0.0", "0.9.0" ], + "default": "1.35.0" + }, + "$schema": { + "title": "Schema", + "description": "", + "type": "string" + } + }, + "additionalProperties": { + "$comment": "AJV does not resolve defaults inside oneOf/anyOf, so instead use allOf. Any schema refs should also use allOf with an if/then/else on class", + "properties": { + "class": { + "title": "Class", + "type": "string", + "enum": [ + "Telemetry_System", + "Telemetry_System_Poller", + "Telemetry_Listener", + "Telemetry_Consumer", + "Telemetry_Pull_Consumer", + "Telemetry_iHealth_Poller", + "Telemetry_Endpoints", + "Telemetry_Namespace", + "Controls", + "Shared" + ] + } + }, + "allOf": [ + { + "$ref": "system_schema.json#" + }, + { + "$ref": "system_poller_schema.json#" + }, + { + "$ref": "listener_schema.json#" + }, + { + "$ref": "consumer_schema.json#" + }, + { + "$ref": "pull_consumer_schema.json#" + }, + { + "$ref": "ihealth_poller_schema.json#" + }, + { + "$ref": "endpoints_schema.json#" + }, + { + "$ref": "controls_schema.json#" + }, + { + "$ref": "shared_schema.json#" + }, + { + "$ref": "namespace_schema.json#" + } + ] + }, + "required": [ + "class" + ] +} diff --git a/src/schema/1.35.0/consumer_schema.json b/src/schema/1.35.0/consumer_schema.json new file mode 100644 index 00000000..a631f322 --- /dev/null +++ b/src/schema/1.35.0/consumer_schema.json @@ -0,0 +1,1490 @@ +{ + "$id": "consumer_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Consumer schema", + "description": "", + "type": "object", + "definitions": { + "jmesPathAction": { + "title": "JMESPath Action", + "description": "Will use a JMESPath expression to modify the incoming data payload", + "type": "object", + "allOf": [ + { "$ref": "actions_schema.json#/definitions/baseActionObject" }, + { + "properties": { + "JMESPath": { + "title": "JMESPath", + "description": "Will use a JMESPath expression to modify the incoming data payload", + "type": "object", + "additionalProperties": false + }, + "expression": { + "title": "Expression", + "description": "The JMESPath expression to be applied to the incoming data payload", + "type": "string", + "minLength": 1 + }, + "enable": {} + }, + "additionalProperties": false, + "required": ["JMESPath", "expression"] + } + ] + }, + "autoTaggingStatsd": { + "title": "Statsd auto tagging", + "description": "Will parse incoming payload for values to automatically add as tags.", + "type": "object", + "properties": { + "method": { + "title": "AutoTagging method", + "description": "AutoTagging method to use to fetch tags", + "type": "string", + "enum": ["sibling"] + } + }, + "additionalProperties": false, + "required": ["method"] + }, + "genericHttpActions": { + "title": "Actions", + "description": "Actions to be performed on the Generic HTTP Consumer.", + "allOf": [ + { "$ref": "actions_schema.json#/definitions/baseActionsChain" }, + { + "items": { + "oneOf": [ + { "$ref": "#/definitions/jmesPathAction" } + ] + } + } + ] + }, + "host": { + "$comment": "Required for certain consumers: standard property", + "title": "Host", + "description": "FQDN or IP address" , + "type": "string", + "minLength": 1, + "anyOf": [ + { "format": "ipv4" }, + { "format": "ipv6" }, + { "format": "hostname" } + ], + "hostConnectivityCheck": true + }, + "protocols": { + "$comment": "Required for certain consumers: standard property", + "title": "Protocols (all)", + "description": "" , + "type": "string", + "enum": [ "https", "http", "tcp", "udp", "binaryTcpTls", "binaryTcp" ] + }, + "port": { + "$comment": "Required for certain consumers: standard property", + "title": "Port", + "description": "" , + "type": "integer", + "minimum": 0, + "maximum": 65535 + }, + "path": { + "$comment": "Required for certain consumers: standard property", + "title": "Path", + "description": "Path to post data to", + "type": ["string", "object"], + "minLength": 1, + "f5expand": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/stringOrSecret" + } + ] + }, + "method": { + "$comment": "Required for certain consumers: standard property", + "title": "Method", + "description": "HTTP method to use (limited to sensical choices)" , + "type": "string", + "enum": [ "POST", "GET", "PUT" ] + }, + "headers": { + "$comment": "Required for certain consumers: standard property", + "title": "Headers", + "description": "HTTP headers to use" , + "type": "array", + "items": { + "properties": { + "name": { + "description": "Name of this header", + "type": "string", + "f5expand": true, + "minLength": 1 + }, + "value": { + "description": "Value of this header", + "type": ["string", "object"], + "f5expand": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/stringOrSecret" + } + ] + } + }, + "required": [ + "name", + "value" + ], + "additionalProperties": false + } + }, + "customOpts": { + "$comment": "Required for certain consumers: standard property", + "title": "Custom Opts (Client Library Dependent)", + "description": "Additional options for use by consumer client library. Refer to corresponding consumer lib documentation for acceptable keys and values." , + "type": "array", + "items": { + "properties": { + "name": { + "description": "Name of the option", + "type": "string", + "f5expand": true, + "minLength": 1 + }, + "value": { + "description": "Value of the option", + "minLength": 1, + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "allOf": [ + { + "f5expand": true + }, + { + "$ref": "base_schema.json#/definitions/stringOrSecret" + } + ] + } + ] + } + }, + "required": [ + "name", + "value" + ], + "additionalProperties": false + }, + "minItems": 1 + }, + "format": { + "$comment": "Required for certain consumers: Splunk and Azure_Log_Analytics", + "title": "Format (informs consumer additional formatting may be required)", + "description": "", + "type": "string" + }, + "username": { + "$comment": "Required for certain consumers: standard property", + "title": "Username", + "description": "" , + "minLength": 1, + "type": "string", + "f5expand": true + }, + "region": { + "$comment": "Required for certain consumers: AWS_CloudWatch, AWS_S3, Azure_Log_Analytics, Azure_App_Insights, DataDog", + "title": "Region", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "endpointUrl": { + "$comment": "Required for certain consumers: AWS_CloudWatch, AWS_S3", + "title": "endpoint url", + "description": "The full endpoint URL for service requests", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "bucket": { + "$comment": "Required for certain consumers: AWS_S3", + "title": "Bucket", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "maxAwsLogBatchSize": { + "$comment": "Required for certain consumers: AWS_CloudWatch", + "title": "Maximum Batch Size", + "description": "The maximum number of telemetry items to include in a payload to the ingestion endpoint", + "type": "integer", + "minimum": 1, + "default": 100, + "maximum": 10000 + }, + "logGroup": { + "$comment": "Required for certain consumers: AWS_CloudWatch", + "title": "Log Group", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "logStream": { + "$comment": "Required for certain consumers: AWS_CloudWatch", + "title": "Log Stream", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "metricNamespace": { + "$comment": "Required for certain consumers: AWS_CloudWatch", + "title": "Metric Namespace", + "description": "The namespace for the metrics", + "type": "string", + "f5expand": true, + "minLength": 1 + }, + "metricPrefix": { + "$comment": "Required for certain consumers: DataDog", + "title": "Metric Prefix", + "description": "The string value(s) to use as a metric prefix", + "type": "array", + "minItems": 1, + "items": { + "allOf": [{ + "type": "string", + "f5expand": true, + "minLength": 1 + }] + } + }, + "workspaceId": { + "$comment": "Required for certain consumers: Azure_Log_Analytics", + "title": "Workspace ID", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "useManagedIdentity": { + "$comment": "Required for certain consumers: Azure_Log_Analytics and Azure_Application_Insights", + "title": "Use Managed Identity", + "description": "Determines whether to use Managed Identity to perform authorization for Azure services", + "type": "boolean", + "default": false + }, + "appInsightsResourceName": { + "$comment": "Required for certain consumers: Azure_Application_Insights", + "title": "Application Insights Resource Name (Pattern)", + "description": "Name filter used to determine which App Insights resource to send metrics to. If not provided, TS will send metrics to App Insights in the subscription in which the managed identity has permissions to", + "type": "string", + "minLength": 1 + }, + "instrumentationKey": { + "$comment": "Required for certain consumers: Azure_Application_Insights", + "title": "Instrumentation Key", + "description": "Used to determine which App Insights resource to send metrics to", + "anyOf": [ + { + "type": "string", + "f5expand": true, + "minLength": 1 + }, + { + "type":"array", + "items": { + "type": "string", + "f5expand": true, + "minLength": 1 + }, + "minItems": 1 + } + ] + }, + "maxBatchIntervalMs": { + "$comment": "Required for certain consumers: Azure_Application_Insights", + "title": "Maximum Batch Interval (ms)", + "description": "The maximum amount of time to wait in milliseconds to for payload to reach maxBatchSize", + "type": "integer", + "minimum": 1000, + "default": 5000 + }, + "maxBatchSize": { + "$comment": "Required for certain consumers: Azure_Application_Insights", + "title": "Maximum Batch Size", + "description": "The maximum number of telemetry items to include in a payload to the ingestion endpoint", + "type": "integer", + "minimum": 1, + "default": 250 + }, + "topic": { + "$comment": "Required for certain consumers: Kafka", + "title": "Topic", + "description": "" , + "type": "string", + "f5expand": true + }, + "index": { + "$comment": "Required for certain consumers: ElasticSearch", + "title": "Index Name", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "apiVersion": { + "$comment": "Required for certain consumers: ElasticSearch", + "title": "API Version", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "dataType": { + "$comment": "Required for certain consumers: AWS_CloudWatch, ElasticSearch", + "title": "Data type", + "description": "" , + "type": "string", + "f5expand": true + }, + "authenticationProtocol": { + "$comment": "Required for certain consumers: Kafka", + "title": "Authentication Protocol", + "description": "" , + "type": "string", + "f5expand": true, + "enum": [ + "SASL-PLAIN", + "TLS", + "None" + ] + }, + "clientCertificate": { + "$comment": "Required for certain consumers: Kafka, Generic HTTP, OpenTelemetry_Exporter", + "title": "Client Certificate", + "description": "Certificate(s) to use when connecting to a secured endpoint.", + "type": "object", + "f5expand": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/secret" + } + ] + }, + "rootCertificate": { + "$comment": "Required for certain consumers: Kafka, Generic HTTP, OpenTelemetry_Exporter", + "title": "Root Certificate", + "description": "Certificate Authority root certificate, used to validate certificate chains.", + "type": "object", + "f5expand": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/secret" + } + ] + }, + "outputMode": { + "$comment": "Required for certain consumers: Generic HTTP", + "title": "output raw data flag", + "description": "Flag to request output of the raw data.", + "type": "string", + "enum": [ "processed", "raw" ] + }, + "projectId": { + "$comment": "Required for certain consumers: Google_Cloud_Monitoring", + "title": "Project ID", + "description": "The ID of the relevant project.", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "serviceEmail": { + "$comment": "Required for certain consumers: Google_Cloud_Monitoring, Google_Cloud_Logging", + "title": "Service Email", + "description": "The service email.", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "privateKeyId": { + "$comment": "Required for certain consumers when Service Account Token is not used: Google_Cloud_Monitoring, Google_Cloud_Logging", + "title": "Private Key ID", + "description": "The private key ID.", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "useServiceAccountToken": { + "$comment": "Used by certain consumers: Google_Cloud_Monitoring, Google_Cloud_Logging", + "title": "Use Service Account Token", + "description": "Determines whether to use Service Account Token to perform authorization for Google services", + "type": "boolean", + "default": false + }, + "logScope": { + "$comment": "Required for certain consumers: Google_Cloud_Logging", + "title": "Logging Scope Type", + "description": "" , + "enum": ["projects", "organizations", "billingAccounts", "folders"], + "f5expand": true + }, + "logScopeId": { + "$comment": "Required for certain consumers: Google_Cloud_Logging", + "title": "Logging Scope ID", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "logId": { + "$comment": "Required for certain consumers: Google_Cloud_Logging", + "title": "Logging ID", + "description": "" , + "type": "string", + "format": "regex", + "pattern": "^[a-zA-z0-9._-]+$", + "minLength": 1, + "f5expand": true + }, + "privateKey": { + "$comment": "Required for certain consumers: Kafka, Generic HTTP, OpenTelemetry_Exporter", + "title": "Private Key", + "description": "Private Key", + "type": "object", + "f5expand": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/secret" + } + ] + }, + "eventSchemaVersion": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Event Schema Version", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true, + "default": "1" + }, + "f5csTenantId": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "F5CS Tenant ID", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "f5csSensorId": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "F5CS Sensor ID", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "payloadSchemaNid": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Namespace ID for payloadSchema", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "serviceAccount": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Service Account", + "description": "Service Account to authentication" , + "type": "object", + "properties": { + "authType": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "SA Type", + "description": "" , + "type": "string", + "enum": ["google-auth" ] + }, + "type": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "SA Type", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "projectId": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Project Id", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "privateKeyId": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Private Key Id", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "privateKey": { + "$ref": "base_schema.json#/definitions/secret" + }, + "clientEmail": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Client Email", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "clientId": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Client Id", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "authUri": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Auth Uri", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "tokenUri": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Token Uri", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "authProviderX509CertUrl": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Auth Provider X509 Cert Url", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "clientX509CertUrl": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Client X509 Cert Url", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + } + }, + "additionalProperties": false, + "allOf": [ + { + "if": { "properties": { "authType": { "const": "google-auth" } } }, + "then": { + "required": [ + "type", + "projectId", + "privateKeyId", + "privateKey", + "clientEmail", + "clientId", + "authUri", + "tokenUri", + "authProviderX509CertUrl", + "clientX509CertUrl" + ] + }, + "else": {} + } + ] + }, + "targetAudience": { + "$comment": "Required for certain consumers: F5_Cloud", + "title": "Target Audience", + "description": "" , + "type": "string", + "minLength": 1, + "f5expand": true + }, + "useSSL": { + "$comment": "Required for certain consumers: F5_Cloud, OpenTelemetry_Exporter", + "title": "useSSL", + "description": "To decide if GRPC connection should use SSL and then it is secured" , + "type": "boolean", + "f5expand": true + }, + "compressionType": { + "$comment": "Required for certain consumers: DataDog, Splunk", + "title": "Data compression", + "description": "Whether or not to compress data and what compression to use before sending it to destination", + "type": "string", + "enum": ["none", "gzip"] + }, + "reportInstanceMetadata": { + "$comment": "Required for certain consumers: Google_Cloud_Monitoring, Google_Cloud_Logging", + "title": "Instance metadata reporting", + "description": "Enables instance metadata collection and reporting" , + "type": "boolean", + "f5expand": true + }, + "apiKey": { + "$comment": "Required for certain consumers: DataDog", + "title": "API key to use to push data", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "service": { + "$comment": "Required for certain consumers: DataDog", + "title": "The name of the service generating telemetry data", + "type": "string", + "minLength": 1, + "f5expand": true + }, + "convertBooleansToMetrics": { + "$comment": "Required for certain consumers: DataDog, Statsd, OpenTelemetry_Exporter", + "title": "Convert boolean values to metrics", + "description": "Whether or not to convert boolean values to metrics. True becomes 1, False becomes 0" , + "type": "boolean", + "f5expand": true, + "default": false + }, + "customTags": { + "$comment": "Required for certain consumers: DataDog", + "title": "Custom tags", + "description": "A collection of custom tags that are appended to the dynamically generated telemetry tags", + "type": "array", + "minItems": 1, + "items": { + "properties": { + "name": { + "description": "Name of this tag", + "type": "string", + "f5expand": true, + "minLength": 1 + }, + "value": { + "description": "Value of this tag", + "type": "string", + "f5expand": true, + "minLength": 1 + } + }, + "additionalProperties": false + } + }, + "customHttpOpts": { + "items": { + "allOf": [ + { + "if": { "properties": { "name": { "const": "keepAlive" } } }, + "then": { "properties": { "value": { "type": "boolean" } } } + }, + { + "if": { "properties": { "name": { "const": "keepAliveMsecs" } } }, + "then": { "properties": { "value": { "type": "integer", "minimum": 0 } } } + }, + { + "if": { "properties": { "name": { "const": "maxSockets" } } }, + "then": { "properties": { "value": { "type": "integer", "minimum": 0 } } } + }, + { + "if": { "properties": { "name": { "const": "maxFreeSockets" } } }, + "then": { "properties": { "value": { "type": "integer", "minimum": 0 } } } + } + ] + } + }, + "otelExporter": { + "$comment": "Required for certain consumers: OpenTelemetry_Exporter", + "title": "Open Telemetry Exporter", + "description": "" , + "type": "string", + "enum": ["grpc", "json", "protobuf" ] + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_Consumer" } } }, + "then": { + "required": [ + "class", + "type" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Consumer class", + "type": "string", + "enum": [ "Telemetry_Consumer" ] + }, + "enable": { + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "trace": { + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/trace" + } + ] + }, + "type": { + "title": "Type", + "description": "" , + "type": "string", + "enum": [ + "AWS_CloudWatch", + "AWS_S3", + "Azure_Log_Analytics", + "Azure_Application_Insights", + "DataDog", + "default", + "ElasticSearch", + "Generic_HTTP", + "Google_Cloud_Logging", + "Google_Cloud_Monitoring", + "Google_StackDriver", + "Graphite", + "Kafka", + "OpenTelemetry_Exporter", + "Splunk", + "Statsd", + "Sumo_Logic", + "F5_Cloud" + ] + }, + "enableHostConnectivityCheck": { + "$ref": "base_schema.json#/definitions/enableHostConnectivityCheck" + }, + "allowSelfSignedCert": { + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/allowSelfSignedCert" + } + ] + } + }, + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "addTags": {}, + "actions": {}, + "apiKey": {}, + "class": {}, + "customTags": {}, + "enable": {}, + "trace": {}, + "type": {}, + "enableHostConnectivityCheck": {}, + "allowSelfSignedCert": {}, + "host": {}, + "protocol": {}, + "port": {}, + "path": {}, + "method": {}, + "headers": {}, + "customOpts": {}, + "username": {}, + "passphrase": {}, + "format": {}, + "workspaceId": {}, + "useManagedIdentity": {}, + "instrumentationKey": {}, + "appInsightsResourceName": {}, + "maxBatchIntervalMs": {}, + "maxBatchSize": {}, + "region": {}, + "endpointUrl": {}, + "managementEndpointUrl": {}, + "odsOpinsightsEndpointUrl": {}, + "maxAwsLogBatchSize": {}, + "logGroup": {}, + "logStream": {}, + "metricNamespace": {}, + "metricPrefix": {}, + "bucket": {}, + "topic": {}, + "apiVersion": {}, + "index": {}, + "dataType": {}, + "authenticationProtocol": {}, + "projectId": {}, + "serviceEmail": {}, + "privateKey": {}, + "privateKeyId": {}, + "useServiceAccountToken": {}, + "clientCertificate": {}, + "rootCertificate": {}, + "outputMode": {}, + "fallbackHosts": {}, + "eventSchemaVersion": {}, + "f5csTenantId": {}, + "f5csSensorId": {}, + "payloadSchemaNid": {}, + "serviceAccount": {}, + "targetAudience": {}, + "useSSL": {}, + "proxy": {}, + "compressionType": {}, + "logScope": {}, + "logScopeId": {}, + "logId": {}, + "reportInstanceMetadata": {}, + "metricsPath": {}, + "service": {}, + "convertBooleansToMetrics": {}, + "exporter": {} + }, + "additionalProperties": false, + "dependencies": { + "actions": { + "allOf": [ + { + "properties": { "type": { "const": "Generic_HTTP" } } + } + ] + } + } + }, + { + "if": { "properties": { "type": { "const": "default" } } }, + "then": { + "required": [], + "properties": {} + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Generic_HTTP" } } }, + "then": { + "required": [ + "host" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "fallbackHosts": { + "type": "array", + "description": "List FQDNs or IP addresses to be used as fallback hosts" , + "minItems": 1, + "items": { + "allOf": [{ + "$ref": "#/definitions/host" + }] + } + }, + "protocol": { "$ref": "#/definitions/protocols", "default": "https" }, + "port": { "$ref": "#/definitions/port", "default": 443 }, + "path": { "$ref": "#/definitions/path", "default": "/" }, + "method": { "$ref": "#/definitions/method", "default": "POST" }, + "headers": { "$ref": "#/definitions/headers" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "proxy": { "$ref": "base_schema.json#/definitions/proxy" }, + "privateKey": { "$ref": "#/definitions/privateKey" }, + "clientCertificate": { "$ref": "#/definitions/clientCertificate" }, + "rootCertificate": { "$ref": "#/definitions/rootCertificate" }, + "outputMode": { "$ref": "#/definitions/outputMode", "default": "processed" }, + "actions": { "$ref": "#/definitions/genericHttpActions" }, + "compressionType": { "$ref": "#/definitions/compressionType", "default": "none" }, + "customOpts": { + "allOf": [ + { "$ref": "#/definitions/customOpts" }, + { "$ref": "#/definitions/customHttpOpts" } + ] + } + }, + "allOf": [ + { + "if": { "required": [ "clientCertificate" ] }, + "then": { "required": [ "privateKey" ] } + }, + { + "if": { "required": [ "privateKey" ] }, + "then": { "required": [ "clientCertificate" ] } + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Splunk" } } }, + "then": { + "required": [ + "host", + "passphrase" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "protocol": { "$ref": "#/definitions/protocols", "default": "https" }, + "port": { "$ref": "#/definitions/port", "default": 8088 }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "format": { "$ref": "#/definitions/format", "enum": [ "default", "legacy", "multiMetric" ], "default": "default" }, + "proxy": { "$ref": "base_schema.json#/definitions/proxy" }, + "compressionType": { "$ref": "#/definitions/compressionType", "default": "gzip" } + } + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Azure_Log_Analytics" } } }, + "then": { + "required": [ + "workspaceId" + ], + "properties": { + "workspaceId": { "$ref": "#/definitions/workspaceId" }, + "format": { "$ref": "#/definitions/format", "enum": [ "default", "propertyBased" ], "default": "default" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "useManagedIdentity": { "$ref": "#/definitions/useManagedIdentity", "default": false }, + "region": { "$ref": "#/definitions/region" }, + "managementEndpointUrl": { "$ref": "#/definitions/endpointUrl" }, + "odsOpinsightsEndpointUrl": { "$ref": "#/definitions/endpointUrl" } + }, + "allOf": [ + { + "dependencies": { + "passphrase": { + "anyOf": [ + { "not": {"required": [ "useManagedIdentity" ] } }, + { "properties": { "useManagedIdentity": { "const": false } } } + ] + } + } + }, + { + "if": { "not": { "required" : [ "useManagedIdentity"] } }, + "then": { "required": ["passphrase"] }, + "else": { + "if": { "properties": { "useManagedIdentity": { "const": true } } }, + "then": { "not": { "required": ["passphrase"] } }, + "else": { "required": ["passphrase"]} + } + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Azure_Application_Insights" } } }, + "then": { + "properties": { + "instrumentationKey": { "$ref": "#/definitions/instrumentationKey" }, + "maxBatchSize": { "$ref": "#/definitions/maxBatchSize", "default": 250 }, + "maxBatchIntervalMs": { "$ref": "#/definitions/maxBatchIntervalMs", "default": 5000 }, + "customOpts": { "$ref": "#/definitions/customOpts" }, + "useManagedIdentity": { "$ref": "#/definitions/useManagedIdentity", "default": false }, + "appInsightsResourceName": { "$ref": "#/definitions/appInsightsResourceName" }, + "region": { "$ref": "#/definitions/region" }, + "managementEndpointUrl": { "$ref": "#/definitions/endpointUrl" } + }, + "allOf": [ + { + "dependencies": { + "instrumentationKey": { + "allOf": [ + { + "anyOf": [ + { "not": { "required": [ "useManagedIdentity" ] } }, + { "properties": { "useManagedIdentity": { "const": false } } } + ] + }, + { + "not": { "required": ["appInsightsResourceName"] } + } + ] + } + } + }, + { + "if": { "not": { "required" : [ "useManagedIdentity"] } }, + "then": { "required": ["instrumentationKey"] }, + "else": { + "if": { "properties": { "useManagedIdentity": { "const": true } } }, + "then": { "not": { "required": ["instrumentationKey"] } }, + "else": { + "allOf": [ + { "required": [ "instrumentationKey" ]}, + { "not": { "required": [ "appInsightsResourceName" ] } } + ] + } + } + }, + { + "if": { "required": [ "appInsightsResourceName" ] }, + "then": { "properties": { "appInsightsResourceName": { "minLength": 1 } }} + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "AWS_CloudWatch" } } }, + "then": { + "required": [ + "region", + "dataType" + ], + "properties": { + "region": { "$ref": "#/definitions/region" }, + "dataType": { "$ref": "#/definitions/dataType", "default": "logs" }, + "username": { "$ref": "#/definitions/username" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "endpointUrl": { "$ref": "#/definitions/endpointUrl" } + }, + "allOf": [ + { "not": { "required": ["username"], "not": { "required": ["passphrase"] }}}, + { "not": { "required": ["passphrase"], "not": { "required": ["username"] }}}, + { + "if": { "properties": { "dataType": { "enum": ["logs", null] } } }, + "then": { + "properties": { + "maxAwsLogBatchSize": { "$ref": "#/definitions/maxAwsLogBatchSize", "default": 100 } + }, + "required": ["maxAwsLogBatchSize"] + } + }, + { "oneOf": + [ + { + "allOf": [ + { + "properties": { + "logGroup": { "$ref": "#/definitions/logGroup" }, + "logStream": { "$ref": "#/definitions/logStream" }, + "dataType": { + "allOf": + [ + { "$ref": "#/definitions/dataType"}, + { "enum": ["logs", null] } + ] + } + } + }, + { "required":[ "logGroup", "logStream" ] }, + { "not": { "required": ["metricNamespace"] }} + ] + }, + { + "allOf": [ + { + "properties": { + "metricNamespace": { "$ref": "#/definitions/metricNamespace" }, + "dataType": { + "allOf": [ + { "$ref": "#/definitions/dataType"}, + { "enum": ["metrics"] } + ] + } + } + }, + { "required":[ "metricNamespace" ] }, + { "not": { "required":[ "maxAwsLogBatchSize" ] }}, + { "not": { "required":[ "logStream" ] }}, + { "not": { "required":[ "logGroup" ] }} + ] + } + ] + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "AWS_S3" } } }, + "then": { + "required": [ + "region", + "bucket" + ], + "properties": { + "region": { "$ref": "#/definitions/region" }, + "bucket": { "$ref": "#/definitions/bucket" }, + "username": { "$ref": "#/definitions/username" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "endpointUrl": { "$ref": "#/definitions/endpointUrl" } + }, + "dependencies": { + "passphrase": [ "username" ], + "username":[ "passphrase" ] + } + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Graphite" } } }, + "then": { + "required": [ + "host" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "protocol": { "$ref": "#/definitions/protocols", "default": "https" }, + "port": { "$ref": "#/definitions/port", "default": 443 }, + "path": { "$ref": "#/definitions/path", "default": "/events/" } + } + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Kafka" } } }, + "then": { + "required": [ + "host", + "topic" + ], + "properties": { + "authenticationProtocol": { "$ref": "#/definitions/authenticationProtocol", "default": "None" }, + "host": { "$ref": "#/definitions/host" }, + "protocol": { "$ref": "#/definitions/protocols", "default": "binaryTcpTls" }, + "port": { "$ref": "#/definitions/port", "default": 9092 }, + "topic": { "$ref": "#/definitions/topic" } + }, + "allOf": [ + { + "if": { "properties": { "authenticationProtocol": { "const": "SASL-PLAIN" } } }, + "then": { + "required": [ + "username" + ], + "properties": { + "username": { "$ref": "#/definitions/username" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" } + }, + "dependencies": { + "passphrase": [ "username" ] + } + }, + "else": {} + }, + { + "if": { "properties": { "authenticationProtocol": { "const": "TLS" } } }, + "then": { + "required": [ + "privateKey", + "clientCertificate" + ], + "allOf": [ + { "not": { "required": [ "username" ] } }, + { "not": { "required": [ "passphrase" ] } } + ], + "properties": { + "privateKey": { "$ref": "#/definitions/privateKey" }, + "clientCertificate": { "$ref": "#/definitions/clientCertificate" }, + "rootCertificate": { "$ref": "#/definitions/rootCertificate" }, + "protocol": { "const": "binaryTcpTls" } + } + }, + "else": {} + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "ElasticSearch" } } }, + "then": { + "required": [ + "host", + "index" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "protocol": { "$ref": "#/definitions/protocols", "default": "https" }, + "port": { "$ref": "#/definitions/port", "default": 9200 }, + "path": { "$ref": "#/definitions/path" }, + "username": { "$ref": "#/definitions/username" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" }, + "apiVersion": { "$ref": "#/definitions/apiVersion", "default": "6.0" }, + "index": { "$ref": "#/definitions/index" } + }, + "allOf": [ + { + "if": { "properties": { "apiVersion": { "pattern": "^[0-6][.]|^[0-6]$" } } }, + "then": { + "properties": { + "dataType": { + "$ref": "#/definitions/dataType", + "default": "f5.telemetry", + "minLength": 1 + } + } + }, + "else": { + "if": { "properties": { "apiVersion": { "pattern": "^7[.]|^7$" } } }, + "then": { + "properties": { + "dataType": { + "$ref": "#/definitions/dataType", + "default": "_doc", + "minLength": 1 + } + } + }, + "else": { + "allOf": [ + { "not": { "required": [ "dataType" ] } } + ] + } + } + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Sumo_Logic" } } }, + "then": { + "required": [ + "host", + "passphrase" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "protocol": { "$ref": "#/definitions/protocols", "default": "https" }, + "port": { "$ref": "#/definitions/port", "default": 443 }, + "path": { "$ref": "#/definitions/path", "default": "/receiver/v1/http/" }, + "passphrase": { "$ref": "base_schema.json#/definitions/secret" } + } + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Statsd" } } }, + "then": { + "required": [ + "host" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "protocol": { + "title": "Protocol", + "type": "string", + "enum": [ "tcp", "udp" ], + "default": "udp" + }, + "port": { "$ref": "#/definitions/port", "default": 8125 }, + "addTags": { "$ref": "#/definitions/autoTaggingStatsd" }, + "convertBooleansToMetrics": { "$ref": "#/definitions/convertBooleansToMetrics", "default": "false" } + } + }, + "else": {} + }, + { + "if": { + "properties": { "type": { "enum": ["Google_Cloud_Monitoring", "Google_StackDriver", "Google_Cloud_Logging"] } } + }, + "then": { + "required": [ + "serviceEmail" + ], + "properties": { + "privateKeyId": { "$ref": "#/definitions/privateKeyId" }, + "serviceEmail": { "$ref": "#/definitions/serviceEmail" }, + "privateKey": { "$ref": "base_schema.json#/definitions/secret" }, + "useServiceAccountToken": { "$ref": "#/definitions/useServiceAccountToken", "default": false }, + "reportInstanceMetadata": { "$ref": "#/definitions/reportInstanceMetadata", "default": false } + }, + "allOf": [ + { + "dependencies": { + "privateKeyId": { + "anyOf": [ + { "not": {"required": [ "useServiceAccountToken" ] } }, + { "properties": { "useServiceAccountToken": { "const": false } } } + ] + } + } + }, + { + "dependencies": { + "privateKey": { + "anyOf": [ + { "not": {"required": [ "useServiceAccountToken" ] } }, + { "properties": { "useServiceAccountToken": { "const": false } } } + ] + } + } + }, + { + "if": { + "anyOf": [ + { "not": { "required" : [ "useServiceAccountToken"] } }, + { "properties": { "useServiceAccountToken": { "const": false } } } + ] + }, + "then": { "required": ["privateKeyId", "privateKey"] }, + "else": { "not": { "required": ["privateKeyId", "privateKey"] } } + }, + { + "if": { "properties": { "type": { "enum": ["Google_Cloud_Monitoring", "Google_StackDriver"] } } }, + "then": { + "properties": { + "projectId": { "$ref": "#/definitions/projectId"} + }, + "required": ["projectId"] + } + }, + { + "if": { "properties": { "type": { "const": "Google_Cloud_Logging" } } }, + "then": { + "properties": { + "logScope": { "$ref": "#/definitions/logScope", "default": "projects" }, + "logScopeId": { "$ref": "#/definitions/logScopeId"}, + "logId": { "$ref": "#/definitions/logId"} + }, + "required": ["logScope", "logScopeId", "logId"] + } + } + ] + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "F5_Cloud" } } }, + "then": { + "required": [ + "f5csTenantId", + "f5csSensorId", + "payloadSchemaNid", + "serviceAccount", + "targetAudience" + ], + "properties": { + "port": { "$ref": "#/definitions/port", "default": 443 }, + "eventSchemaVersion": { "$ref": "#/definitions/eventSchemaVersion" }, + "f5csTenantId": { "$ref": "#/definitions/f5csTenantId" }, + "f5csSensorId": { "$ref": "#/definitions/f5csSensorId" }, + "payloadSchemaNid": { "$ref": "#/definitions/payloadSchemaNid" }, + "serviceAccount": { "$ref": "#/definitions/serviceAccount" }, + "targetAudience": { "$ref": "#/definitions/targetAudience" }, + "useSSL": { "$ref": "#/definitions/useSSL", "default": true } + }, + "nodeSupportVersion": "8.11.1" + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "DataDog" } } }, + "then": { + "required": [ + "apiKey" + ], + "properties": { + "apiKey": { "$ref": "#/definitions/apiKey" }, + "compressionType": { "$ref": "#/definitions/compressionType", "default": "none" }, + "region": { "$ref": "#/definitions/region", "enum": ["US1", "US3", "EU1", "US1-FED"], "default": "US1" }, + "service": { "$ref": "#/definitions/service", "default": "f5-telemetry" }, + "metricPrefix": { "$ref": "#/definitions/metricPrefix" }, + "convertBooleansToMetrics": { "$ref": "#/definitions/convertBooleansToMetrics", "default": "false" }, + "customTags": { "$ref": "#/definitions/customTags" }, + "customOpts": { + "allOf": [ + { "$ref": "#/definitions/customOpts" }, + { "$ref": "#/definitions/customHttpOpts" } + ] + }, + "proxy": { "$ref": "base_schema.json#/definitions/proxy" } + } + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "OpenTelemetry_Exporter" } } }, + "then": { + "required": [ + "host", + "port" + ], + "properties": { + "host": { "$ref": "#/definitions/host" }, + "port": { "$ref": "#/definitions/port" }, + "headers": { "$ref": "#/definitions/headers" }, + "metricsPath": { "$ref": "#/definitions/path" }, + "convertBooleansToMetrics": { "$ref": "#/definitions/convertBooleansToMetrics", "default": "false" }, + "exporter": { "$ref": "#/definitions/otelExporter", "default": "protobuf" }, + "privateKey": { "$ref": "#/definitions/privateKey" }, + "clientCertificate": { "$ref": "#/definitions/clientCertificate" }, + "rootCertificate": { "$ref": "#/definitions/rootCertificate" } + }, + "dependencies": { + "clientCertificate": ["privateKey"], + "privateKey": ["clientCertificate"] + }, + "nodeSupportVersion": "8.11.1", + "allOf": [ + { + "if": { "properties": { "exporter": { "const": "grpc" } } } , + "then": { + "properties": { + "useSSL": { "$ref": "#/definitions/useSSL", "default": true } + }, + "allOf": [ + { + "if": { "properties": { "useSSL": { "const": false } } }, + "then": { + "allOf": [ + { "not": { "required": ["privateKey"] } }, + { "not": { "required": ["clientCertificate"] } }, + { "not": { "required": ["rootCertificate"] } } + ] + } + }, + { + "allOf": [ + { "not": { "required": ["metricsPath"] } }, + { "not": { "required": ["protocol"] } } + ] + } + ] + }, + "else": { + "properties": { + "protocol": { "$ref": "#/definitions/protocols", "default": "http", "enum": ["http", "https"] } + }, + "allOf": [ + { "not": { "required": ["useSSL"] } }, + { + "if": { "properties": { "protocol": { "const": "http" } } }, + "then": { + "allOf": [ + { "not": { "required": ["privateKey"] } }, + { "not": { "required": ["clientCertificate"] } }, + { "not": { "required": ["rootCertificate"] } } + ] + } + } + ] + } + } + ] + }, + "else": {} + } + ] + }, + "else": {} + } + ] +} diff --git a/src/schema/1.35.0/controls_schema.json b/src/schema/1.35.0/controls_schema.json new file mode 100644 index 00000000..c990a93c --- /dev/null +++ b/src/schema/1.35.0/controls_schema.json @@ -0,0 +1,166 @@ +{ + "$id": "controls_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Controls schema", + "description": "", + "type": "object", + "allOf": [ + { + "if": { "properties": { "class": { "const": "Controls" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Controls class", + "type": "string", + "enum": [ "Controls" ] + }, + "logLevel": { + "title": "Logging Level", + "description": "", + "type": "string", + "default": "debug", + "enum": [ + "verbose", + "debug", + "info", + "error" + ] + }, + "debug": { + "title": "Enable debug mode", + "description": "", + "type": "boolean", + "default": false + }, + "memoryThresholdPercent": { + "title": "Memory Usage Threshold (Percentage of Available Process Memory)", + "description": "Once memory usage reaches this value, processing may temporarily cease until levels return below threshold. Defaults to 90%", + "type": "integer", + "minimum": 1, + "maximum": 100, + "default": 90 + }, + "listenerMode": { + "title": "Event Listener events parsing mode", + "description": "Event Listener events parsing mode. \"buffer\" is more performant but under the high memory usage events may result in OOM. \"string\" is less performant but more chance to have lower RSS", + "type": "string", + "enum": [ + "buffer", + "string" + ] + }, + "listenerStrategy": { + "title": "Event Listener events buffering strategy due high memory usage events", + "description": "Event Listener events buffering strategy. \"drop\" drops all new chunks of data, but keeps pending data to process - less memory usage but loosing data. \"ring\" keeps buffering data by overriding peding data - higher memory usage but less chance to get data lost.", + "type": "string", + "enum": [ + "drop", + "ring" + ] + }, + "memoryMonitor": { + "title": "Memory Monitor configuration options", + "description": "Memory Monitor configuration options allow configuring thresholds for various parameters to help Telemetry Streaming avoid extreme conditions like Out-Of-Memory.", + "type": "object", + "properties": { + "interval": { + "title": "", + "description": "", + "enum": [ + "default", + "aggressive" + ], + "default": "default" + }, + "logFrequency": { + "title": "Logging Frequency (in sec.)", + "description": "Number of seconds to use to log information about memory usage. Defaults to 10 sec.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "logLevel": { + "title": "Logging Level", + "description": "Logging Level to use to log information about memory usage. Defaults to \"debug\"", + "default": "debug", + "allOf": [ + { "$ref": "#/allOf/0/then/properties/logLevel" } + ] + }, + "memoryThresholdPercent": { + "title": "Memory Usage Threshold (Percentage of Available Process Memory)", + "description": "Once memory usage reaches this value, processing may temporarily cease until levels return below threshold * \"thresholdReleasePercent\". Defaults to 90%. NOTE: the property is the same as the one from parent object but it take precedens over the parent's one if specified.", + "type": "integer", + "minimum": 1, + "maximum": 100 + }, + "osFreeMemory": { + "title": "OS Free memory (in MB)", + "description": "Amount of OS Free memory (in MB) below that processing may temporarily ceasae until levels return above theshold. Defaults to 30 MB.", + "type": "integer", + "minimum": 1, + "default": 30 + }, + "provisionedMemory": { + "title": "Provisioned Memory for Application (in MB.)", + "description": "Amount of Memory in MB. that application should not exceed. Once limit exceed, processing may temporarily cease until levels return below threshold. Defalts to 1400 MB.", + "type": "integer", + "minimum": 1, + "maximum": 1400 + }, + "thresholdReleasePercent": { + "title": "Memory Usage Threshold Release (Percentage of Available Threshold Memory)", + "description": "Once memory usage reaches value described in \"memoryThresholdPercent\", processing may temporarily cease until levels return below threshold * \"thresholdReleasePercent\". Defaults to 90%.", + "type": "integer", + "minimum": 1, + "maximum": 100, + "default": 90 + } + }, + "additionalProperties": false, + "anyOf": [ + { "required": ["interval"] }, + { "required": ["logFrequency"] }, + { "required": ["logLevel"] }, + { "required": ["memoryThresholdPercent"] }, + { "required": ["osFreeMemory"] }, + { "required": ["provisionedMemory"] }, + { "required": ["thresholdReleasePercent"] } + ] + }, + "runtime": { + "title": "Runtime Configuration Options. EXPERIMENTAL!", + "description": "Runtime Configuration Options (V8). Allows to tune the V8 configuration. EXPERIMENTAL!", + "type": "object", + "properties": { + "enableGC": { + "title": "Enables the V8 garbage collector. EXPERIMENTAL!", + "description": "Grants Telemetry Streaming access to the V8 garbage collector, which helps Telemetry Streaming cleanup memory when usage exceeds thresholds. EXPERIMENTAL!", + "type": "boolean", + "default": false + }, + "maxHeapSize": { + "title": "Increases the V8 maximum heap size. EXPERIMENTAL!", + "description": "Increases V8 maximum heap size to enable more memory usage and prevent Heap-Out-Of-Memory error. EXPERIMENTAL!", + "type": "number", + "minimum": 1400, + "default": 1400 + } + }, + "additionalProperties": false, + "anyOf": [ + { "required": ["enableGC"] }, + { "required": ["maxHeapSize"] } + ] + } + }, + "additionalProperties": false + }, + "else": {} + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/endpoints_schema.json b/src/schema/1.35.0/endpoints_schema.json new file mode 100644 index 00000000..1e12b4b3 --- /dev/null +++ b/src/schema/1.35.0/endpoints_schema.json @@ -0,0 +1,190 @@ +{ + "$id": "endpoints_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Endpoints schema", + "description": "", + "type": "object", + "definitions": { + "endpoint": { + "title": "Telemetry Endpoint", + "description": "", + "type": "object", + "properties": { + "enable": { + "title": "Enable endpoint", + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "name": { + "title": "Endpoint name", + "type": "string", + "minLength": 1 + }, + "numericalEnums": { + "title": "SNMP Options: print enums numerically", + "type": "boolean" + }, + "path": { + "title": "Path to query data from", + "type": "string", + "minLength": 1 + }, + "protocol": { + "title": "Endpoint protocol used to fetch data", + "type": "string", + "enum": ["http", "snmp"], + "default": "http" + } + }, + "allOf": [ + { + "if": { "properties": { "protocol": { "const": "snmp" } } }, + "then": { + "properties": { + "numericalEnums": { + "default": false + }, + "path": { + "pattern": "^[a-zA-Z0-9.]+$" + } + } + }, + "else": { + "not": { + "required": ["numericalEnums"] + } + } + } + ], + "additionalProperties": false + }, + "endpoints": { + "title": "Telemetry Endpoints", + "description": "", + "type": "object", + "properties": { + "enable": { + "title": "Enable endpoints", + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "basePath": { + "title": "Base Path", + "description": "Optional base path value to prepend to each individual endpoint paths", + "type": "string", + "default": "" + }, + "items": { + "title": "Items", + "description": "Object with each property an endpoint with their own properties", + "type": "object", + "additionalProperties": { + "allOf": [ + { + "$ref": "#/definitions/endpoint" + }, + { + "required": [ "path"] + } + ] + }, + "minProperties": 1 + } + } + }, + "endpointsObjectRef": { + "allOf": [ + { + "$ref": "#/definitions/endpoints" + }, + { + "properties": { + "enable": {}, + "basePath": {}, + "items": {} + }, + "required": [ "items" ], + "additionalProperties": false + } + ] + }, + "endpointObjectRef": { + "allOf": [ + { + "$ref": "#/definitions/endpoint" + }, + { + "properties": { + "enable": {}, + "name": {}, + "numericalEnums": {}, + "path": {}, + "protocol": {} + }, + "required": [ "name", "path" ], + "additionalProperties": false + } + ] + }, + "endpointsPointerRef": { + "title": "Telemetry_Endpoints Name", + "description": "Name of the Telemetry_Endpoints object", + "type": "string", + "declarationClass": "Telemetry_Endpoints", + "minLength": 1 + }, + "endpointsItemPointerRef": { + "title": "Telemetry_Endpoints Name and Item Key", + "description": "Name of the Telemetry_Endpoints object and the endpoint item key, e.g endpointsA/item1", + "type": "string", + "declarationClassProp": { + "path" :"Telemetry_Endpoints/items", + "partsNum": 2 + }, + "minLength": 1 + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_Endpoints" } } }, + "then": { + "required": [ + "class", + "items" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Endpoints class", + "type": "string", + "enum": [ "Telemetry_Endpoints" ] + } + }, + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "class": {}, + "enable": {}, + "basePath": {}, + "items": {} + }, + "additionalProperties": false + }, + { + "$ref": "#/definitions/endpoints" + } + ] + }, + "else": {} + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/ihealth_poller_schema.json b/src/schema/1.35.0/ihealth_poller_schema.json new file mode 100644 index 00000000..d5bcb9cf --- /dev/null +++ b/src/schema/1.35.0/ihealth_poller_schema.json @@ -0,0 +1,238 @@ +{ + "$id": "ihealth_poller_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming iHealth Poller schema", + "description": "", + "type": "object", + "definitions": { + "time24hr": { + "title": "Time in HH:MM, 24hr", + "description": "", + "type": "string", + "pattern": "^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]?$" + }, + "iHealthPoller": { + "$comment": "system_schema.json should be updated when new property added", + "title": "iHealth Poller", + "description": "", + "type": "object", + "required": [ + "interval", + "username", + "passphrase" + ], + "properties": { + "enable": { + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "trace": { + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/trace" + } + ] + }, + "proxy": { + "title": "Proxy configuration", + "properties": { + "port": { + "default": 80 + }, + "protocol": { + "default": "http" + } + }, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/proxy" + } + ] + }, + "username": { + "title": "iHealth Username", + "$ref": "base_schema.json#/definitions/username" + }, + "passphrase": { + "title": "iHealth Passphrase", + "$ref": "base_schema.json#/definitions/secret" + }, + "downloadFolder": { + "title": "Directory to download Qkview to", + "description": "", + "type": "string", + "minLength": 1, + "pathExists": true + }, + "interval": { + "title": "Operating interval", + "description": "" , + "type": "object", + "properties": { + "timeWindow": { + "title": "Two or more hours window in 24hr format that iHealth data can be sent", + "description": "", + "type": "object", + "properties": { + "start": { + "title": "Time when the window starts", + "$ref": "#/definitions/time24hr" + }, + "end": { + "title": "Time when the window ends", + "$ref": "#/definitions/time24hr" + } + }, + "timeWindowMinSize": 120, + "required": [ "start", "end" ], + "additionalProperties": false + }, + "frequency": { + "title": "Interval frequency", + "description": "", + "type": "string", + "default": "daily", + "enum": [ + "daily", + "weekly", + "monthly" + ] + } + + }, + "required": [ + "timeWindow" + ], + "allOf": [ + { + "if": { "properties": { "frequency": { "const": "daily" } } }, + "then": { + "properties": { + "timeWindow": {}, + "frequency": {} + }, + "additionalProperties": false + } + }, + { + "if": { "properties": { "frequency": { "const": "weekly" } } }, + "then": { + "properties": { + "timeWindow": {}, + "frequency": {}, + "day": { + "title": "", + "description": "", + "oneOf": [ + { + "type": "string", + "pattern": "^([mM]onday|[tT]uesday|[wW]ednesday|[tT]hursday|[fF]riday|[sS]aturday|[sS]unday)$" + }, + { + "$comment": "0 and 7 eq. Sunday", + "type": "integer", + "minimum": 0, + "maximum": 7 + } + ] + } + }, + "required": [ "day" ], + "additionalProperties": false + } + }, + { + "if": { "properties": { "frequency": { "const": "monthly" } } }, + "then": { + "properties": { + "timeWindow": {}, + "frequency": {}, + "day": { + "title": "", + "description": "", + "type": "integer", + "minimum": 1, + "maximum": 31 + } + }, + "required": [ "day" ], + "additionalProperties": false + } + } + ] + } + } + }, + "iHealthPollerPointerRef": { + "type": "string", + "minLength": 1, + "declarationClass": "Telemetry_iHealth_Poller" + }, + "iHealthPollerObjectRef": { + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "enable": {}, + "trace": {}, + "interval": {}, + "proxy": {}, + "username": {}, + "passphrase": {}, + "downloadFolder": {} + }, + "additionalProperties": false + }, + { + "$ref": "ihealth_poller_schema.json#/definitions/iHealthPoller" + } + ] + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_iHealth_Poller" } } }, + "then": { + "required": [ + "class", + "username", + "passphrase" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming iHealth Poller class", + "type": "string", + "enum": [ "Telemetry_iHealth_Poller" ] + } + }, + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "class": {}, + "enable": {}, + "trace": {}, + "interval": {}, + "proxy": {}, + "username": {}, + "passphrase": {}, + "downloadFolder": {} + }, + "additionalProperties": false + }, + { + "$ref": "#/definitions/iHealthPoller" + } + ] + }, + "else": {}, + "$comment": "Telemetry_iHealth_Poller should be either built-in within Telemetry_System or referenced by Telemetry_System(s), otherwise it will be treated as disabled" + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/listener_schema.json b/src/schema/1.35.0/listener_schema.json new file mode 100644 index 00000000..d3b9434e --- /dev/null +++ b/src/schema/1.35.0/listener_schema.json @@ -0,0 +1,85 @@ +{ + "$id": "listener_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming event listener schema", + "description": "", + "type": "object", + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_Listener" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Event Listener class", + "type": "string", + "enum": [ "Telemetry_Listener" ] + }, + "enable": { + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "trace": { + "default": false, + "oneOf": [ + { + "$ref": "base_schema.json#/definitions/trace" + }, + { + "$ref": "base_schema.json#/definitions/traceV2" + } + ] + }, + "port": { + "minimum": 1024, + "maximum": 65535, + "default": 6514, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/port" + } + ] + }, + "tag": { + "$comment": "Deprecated! Use actions with a setTag action.", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/tag" + } + ] + }, + "match": { + "default": "", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/match" + } + ] + }, + "actions": { + "title": "Actions", + "description": "Actions to be performed on the listener.", + "default": [ + { + "setTag": { + "tenant": "`T`", + "application": "`A`" + } + } + ], + "allOf": [{ "$ref": "actions_schema.json#/definitions/inputDataStreamActionsChain" }] + } + }, + "additionalProperties": false + }, + "else": {} + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/namespace_schema.json b/src/schema/1.35.0/namespace_schema.json new file mode 100644 index 00000000..f6cb09fc --- /dev/null +++ b/src/schema/1.35.0/namespace_schema.json @@ -0,0 +1,92 @@ +{ + "$id": "namespace_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Namespace schema", + "description": "", + "type": "object", + "definitions": { + "namespace": { + "required": [ + "class" + ], + "type": "object", + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Namespace class", + "type": "string", + "enum": [ "Telemetry_Namespace" ] + } + }, + "additionalProperties": { + "$comment": "All objects supported under a Telemetry Namespace", + "properties": { + "class": { + "title": "Class", + "type": "string", + "enum": [ + "Telemetry_System", + "Telemetry_System_Poller", + "Telemetry_Listener", + "Telemetry_Consumer", + "Telemetry_Pull_Consumer", + "Telemetry_iHealth_Poller", + "Telemetry_Endpoints", + "Shared" + ] + } + }, + "allOf": [ + { + "$ref": "system_schema.json#" + }, + { + "$ref": "system_poller_schema.json#" + }, + { + "$ref": "listener_schema.json#" + }, + { + "$ref": "consumer_schema.json#" + }, + { + "$ref": "pull_consumer_schema.json#" + }, + { + "$ref": "ihealth_poller_schema.json#" + }, + { + "$ref": "endpoints_schema.json#" + }, + { + "$ref": "shared_schema.json#" + } + ] + } + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_Namespace" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Namespace class", + "type": "string", + "enum": [ "Telemetry_Namespace" ] + } + }, + "allOf": [ + { + "$ref": "#/definitions/namespace" + } + ] + }, + "else": {} + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/pull_consumer_schema.json b/src/schema/1.35.0/pull_consumer_schema.json new file mode 100644 index 00000000..0747cbfd --- /dev/null +++ b/src/schema/1.35.0/pull_consumer_schema.json @@ -0,0 +1,101 @@ +{ + "$id": "pull_consumer_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming Pull Consumer schema", + "description": "", + "type": "object", + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_Pull_Consumer" } } }, + "then": { + "required": [ + "class", + "type", + "systemPoller" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming Pull Consumer class", + "type": "string", + "enum": [ "Telemetry_Pull_Consumer" ] + }, + "enable": { + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "trace": { + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/trace" + } + ] + }, + "type": { + "title": "Type", + "description": "" , + "type": "string", + "enum": [ + "default", + "Prometheus" + ] + }, + "systemPoller": { + "title": "Pointer to System Poller(s)", + "anyOf": [ + { + "$ref": "system_poller_schema.json#/definitions/systemPollerPointerRef" + }, + { + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "system_poller_schema.json#/definitions/systemPollerPointerRef" + } + ] + }, + "minItems": 1 + } + ] + } + }, + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "class": {}, + "enable": {}, + "trace": {}, + "type": {}, + "systemPoller": {} + }, + "additionalProperties": false + }, + { + "if": { "properties": { "type": { "const": "default" } } }, + "then": { + "required": [], + "properties": {} + }, + "else": {} + }, + { + "if": { "properties": { "type": { "const": "Prometheus" } } }, + "then": { + "required": [], + "properties": {} + }, + "else": {} + } + ] + }, + "else": {} + } + ] +} diff --git a/src/schema/1.35.0/shared_schema.json b/src/schema/1.35.0/shared_schema.json new file mode 100644 index 00000000..aa96cb2e --- /dev/null +++ b/src/schema/1.35.0/shared_schema.json @@ -0,0 +1,50 @@ +{ + "$id": "shared_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry streaming Shared schema", + "description": "", + "type": "object", + "allOf": [ + { + "if": { "properties": { "class": { "const": "Shared" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry streaming Shared class", + "type": "string", + "enum": [ "Shared" ] + } + }, + "additionalProperties": { + "properties": { + "class": { + "title": "Class", + "type": "string", + "enum": [ + "Constants", + "Secret" + ] + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Constants" } } }, + "then": { "$ref": "base_schema.json#/definitions/constants" }, + "else": {} + }, + { + "if": { "properties": { "class": { "const": "Secret" } } }, + "then": { "$ref": "base_schema.json#/definitions/secret" }, + "else": {} + } + ] + } + }, + "else": {} + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/system_poller_schema.json b/src/schema/1.35.0/system_poller_schema.json new file mode 100644 index 00000000..dcb3a454 --- /dev/null +++ b/src/schema/1.35.0/system_poller_schema.json @@ -0,0 +1,242 @@ +{ + "$id": "system_poller_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming system poller schema", + "description": "", + "type": "object", + "definitions": { + "systemPoller": { + "$comment": "system_schema.json should be updated when new property added", + "title": "System Poller", + "description": "", + "type": "object", + "properties": { + "enable": { + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "interval": { + "title": "Collection interval (in seconds)", + "description": "If endpointList is specified, minimum=1. Without endpointList, minimum=60 and maximum=60000. Allows setting interval=0 to not poll on an interval.", + "type": "integer", + "default": 300 + }, + "trace": { + "$ref": "base_schema.json#/definitions/trace" + }, + "tag": { + "$comment": "Deprecated! Use actions with a setTag action.", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/tag" + } + ] + }, + "actions": { + "title": "Actions", + "description": "Actions to be performed on the systemPoller.", + "default": [ + { + "setTag": { + "tenant": "`T`", + "application": "`A`" + } + } + ], + "allOf": [{ "$ref": "actions_schema.json#/definitions/inputDataStreamActionsChain" }] + }, + "endpointList": { + "title": "Endpoint List", + "description": "List of endpoints to use in data collection", + "oneOf": [ + { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "endpoints_schema.json#/definitions/endpointsPointerRef" + }, + { + "$ref": "endpoints_schema.json#/definitions/endpointsItemPointerRef" + }, + { + "if": { "required": [ "items" ]}, + "then": { + "$ref": "endpoints_schema.json#/definitions/endpointsObjectRef" + }, + "else": { + "$ref": "endpoints_schema.json#/definitions/endpointObjectRef" + } + } + + ] + }, + "minItems": 1 + }, + { + "$ref": "endpoints_schema.json#/definitions/endpointsPointerRef" + }, + { + "$ref": "endpoints_schema.json#/definitions/endpointsObjectRef" + } + ] + } + }, + "oneOf": [ + { + "allOf": [ + { + "if": { "required": [ "endpointList" ] }, + "then": { + "properties": { + "interval": { + "minimum": 1 + } + } + }, + "else": { + "properties":{ + "interval": { + "minimum": 60, + "maximum": 6000 + } + } + } + } + ] + }, + { + "allOf": [ + { + "properties": { + "interval": { + "enum": [0] + } + } + } + ] + } + ] + }, + "systemPollerPointerRef": { + "type": "string", + "minLength": 1, + "declarationClass": "Telemetry_System_Poller" + }, + "systemPollerObjectRef": { + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "enable": {}, + "trace": {}, + "interval": {}, + "tag": {}, + "actions": {}, + "endpointList": {} + }, + "additionalProperties": false + }, + { + "$ref": "#/definitions/systemPoller" + } + ] + } + }, + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_System_Poller" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming System Poller class", + "type": "string", + "enum": [ "Telemetry_System_Poller" ] + }, + "host": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "default": "localhost", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/host" + } + ] + }, + "port": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "default": 8100, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/port" + } + ] + }, + "protocol": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "default": "http", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/protocol" + } + ] + }, + "allowSelfSignedCert": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "title": "Allow Self-Signed Certificate", + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/allowSelfSignedCert" + } + ] + }, + "enableHostConnectivityCheck": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "$ref": "base_schema.json#/definitions/enableHostConnectivityCheck" + }, + "username": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "$ref": "base_schema.json#/definitions/username" + }, + "passphrase": { + "$comment": "Deprecated! Use Telemetry_System to define target device", + "$ref": "base_schema.json#/definitions/secret" + } + }, + "allOf": [ + { + "$comment": "This allows enforcement of no additional properties in this nested schema - could reuse above properties but prefer a separate block", + "properties": { + "class": {}, + "enable": {}, + "trace": {}, + "interval": {}, + "tag": {}, + "host": {}, + "port": {}, + "protocol": {}, + "allowSelfSignedCert": {}, + "enableHostConnectivityCheck": {}, + "username": {}, + "passphrase": {}, + "actions": {}, + "endpointList": {} + }, + "additionalProperties": false + }, + { + "$ref": "#/definitions/systemPoller" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/schema/1.35.0/system_schema.json b/src/schema/1.35.0/system_schema.json new file mode 100644 index 00000000..cba58faa --- /dev/null +++ b/src/schema/1.35.0/system_schema.json @@ -0,0 +1,121 @@ +{ + "$id": "system_schema.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Telemetry Streaming System schema", + "description": "", + "type": "object", + "allOf": [ + { + "if": { "properties": { "class": { "const": "Telemetry_System" } } }, + "then": { + "required": [ + "class" + ], + "properties": { + "class": { + "title": "Class", + "description": "Telemetry Streaming System class", + "type": "string", + "enum": [ "Telemetry_System" ] + }, + "enable": { + "title": "Enable all pollers attached to device", + "default": true, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/enable" + } + ] + }, + "trace": { + "$ref": "base_schema.json#/definitions/trace" + }, + "host": { + "title": "System connection address", + "default": "localhost", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/host" + } + ] + }, + "port": { + "title": "System connection port", + "default": 8100, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/port" + } + ] + }, + "protocol": { + "title": "System connection protocol", + "default": "http", + "allOf": [ + { + "$ref": "base_schema.json#/definitions/protocol" + } + ] + }, + "allowSelfSignedCert": { + "title": "Allow Self-Signed Certificate", + "default": false, + "allOf": [ + { + "$ref": "base_schema.json#/definitions/allowSelfSignedCert" + } + ] + }, + "enableHostConnectivityCheck": { + "$ref": "base_schema.json#/definitions/enableHostConnectivityCheck" + }, + "username": { + "title": "System Username", + "$ref": "base_schema.json#/definitions/username" + }, + "passphrase": { + "title": "System Passphrase", + "$ref": "base_schema.json#/definitions/secret" + }, + "systemPoller": { + "title": "System Poller declaration", + "oneOf": [ + { + "$ref": "system_poller_schema.json#/definitions/systemPollerPointerRef" + }, + { + "$ref": "system_poller_schema.json#/definitions/systemPollerObjectRef" + }, + { + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "system_poller_schema.json#/definitions/systemPollerObjectRef" + }, + { + "$ref": "system_poller_schema.json#/definitions/systemPollerPointerRef" + } + ] + }, + "minItems": 1 + } + ] + }, + "iHealthPoller": { + "title": "iHealth Poller declaration", + "oneOf": [ + { + "$ref": "ihealth_poller_schema.json#/definitions/iHealthPollerPointerRef" + }, + { + "$ref": "ihealth_poller_schema.json#/definitions/iHealthPollerObjectRef" + } + ] + } + }, + "additionalProperties": false + } + } + ] +} \ No newline at end of file diff --git a/src/schema/latest/base_schema.json b/src/schema/latest/base_schema.json index 9b678705..6acb1db3 100644 --- a/src/schema/latest/base_schema.json +++ b/src/schema/latest/base_schema.json @@ -242,8 +242,8 @@ "description": "Version of ADC Declaration schema this declaration uses", "type": "string", "$comment": "IMPORTANT: In enum array, please put current schema version first, oldest-supported version last. Keep enum array sorted most-recent-first.", - "enum": [ "1.34.0", "1.33.0", "1.32.0", "1.31.0", "1.30.0", "1.29.0", "1.28.0", "1.27.1", "1.27.0", "1.26.0", "1.25.0", "1.24.0", "1.23.0", "1.22.0", "1.21.0", "1.20.1", "1.20.0", "1.19.0", "1.18.0", "1.17.0", "1.16.0", "1.15.0", "1.14.0", "1.13.0", "1.12.0", "1.11.0", "1.10.0", "1.9.0", "1.8.0", "1.7.0", "1.6.0", "1.5.0", "1.4.0", "1.3.0", "1.2.0", "1.1.0", "1.0.0", "0.9.0" ], - "default": "1.34.0" + "enum": [ "1.35.0", "1.34.0", "1.33.0", "1.32.0", "1.31.0", "1.30.0", "1.29.0", "1.28.0", "1.27.1", "1.27.0", "1.26.0", "1.25.0", "1.24.0", "1.23.0", "1.22.0", "1.21.0", "1.20.1", "1.20.0", "1.19.0", "1.18.0", "1.17.0", "1.16.0", "1.15.0", "1.14.0", "1.13.0", "1.12.0", "1.11.0", "1.10.0", "1.9.0", "1.8.0", "1.7.0", "1.6.0", "1.5.0", "1.4.0", "1.3.0", "1.2.0", "1.1.0", "1.0.0", "0.9.0" ], + "default": "1.35.0" }, "$schema": { "title": "Schema", diff --git a/src/schema/latest/controls_schema.json b/src/schema/latest/controls_schema.json index 1cdd8879..c990a93c 100644 --- a/src/schema/latest/controls_schema.json +++ b/src/schema/latest/controls_schema.json @@ -22,7 +22,7 @@ "title": "Logging Level", "description": "", "type": "string", - "default": "info", + "default": "debug", "enum": [ "verbose", "debug", @@ -61,6 +61,101 @@ "drop", "ring" ] + }, + "memoryMonitor": { + "title": "Memory Monitor configuration options", + "description": "Memory Monitor configuration options allow configuring thresholds for various parameters to help Telemetry Streaming avoid extreme conditions like Out-Of-Memory.", + "type": "object", + "properties": { + "interval": { + "title": "", + "description": "", + "enum": [ + "default", + "aggressive" + ], + "default": "default" + }, + "logFrequency": { + "title": "Logging Frequency (in sec.)", + "description": "Number of seconds to use to log information about memory usage. Defaults to 10 sec.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "logLevel": { + "title": "Logging Level", + "description": "Logging Level to use to log information about memory usage. Defaults to \"debug\"", + "default": "debug", + "allOf": [ + { "$ref": "#/allOf/0/then/properties/logLevel" } + ] + }, + "memoryThresholdPercent": { + "title": "Memory Usage Threshold (Percentage of Available Process Memory)", + "description": "Once memory usage reaches this value, processing may temporarily cease until levels return below threshold * \"thresholdReleasePercent\". Defaults to 90%. NOTE: the property is the same as the one from parent object but it take precedens over the parent's one if specified.", + "type": "integer", + "minimum": 1, + "maximum": 100 + }, + "osFreeMemory": { + "title": "OS Free memory (in MB)", + "description": "Amount of OS Free memory (in MB) below that processing may temporarily ceasae until levels return above theshold. Defaults to 30 MB.", + "type": "integer", + "minimum": 1, + "default": 30 + }, + "provisionedMemory": { + "title": "Provisioned Memory for Application (in MB.)", + "description": "Amount of Memory in MB. that application should not exceed. Once limit exceed, processing may temporarily cease until levels return below threshold. Defalts to 1400 MB.", + "type": "integer", + "minimum": 1, + "maximum": 1400 + }, + "thresholdReleasePercent": { + "title": "Memory Usage Threshold Release (Percentage of Available Threshold Memory)", + "description": "Once memory usage reaches value described in \"memoryThresholdPercent\", processing may temporarily cease until levels return below threshold * \"thresholdReleasePercent\". Defaults to 90%.", + "type": "integer", + "minimum": 1, + "maximum": 100, + "default": 90 + } + }, + "additionalProperties": false, + "anyOf": [ + { "required": ["interval"] }, + { "required": ["logFrequency"] }, + { "required": ["logLevel"] }, + { "required": ["memoryThresholdPercent"] }, + { "required": ["osFreeMemory"] }, + { "required": ["provisionedMemory"] }, + { "required": ["thresholdReleasePercent"] } + ] + }, + "runtime": { + "title": "Runtime Configuration Options. EXPERIMENTAL!", + "description": "Runtime Configuration Options (V8). Allows to tune the V8 configuration. EXPERIMENTAL!", + "type": "object", + "properties": { + "enableGC": { + "title": "Enables the V8 garbage collector. EXPERIMENTAL!", + "description": "Grants Telemetry Streaming access to the V8 garbage collector, which helps Telemetry Streaming cleanup memory when usage exceeds thresholds. EXPERIMENTAL!", + "type": "boolean", + "default": false + }, + "maxHeapSize": { + "title": "Increases the V8 maximum heap size. EXPERIMENTAL!", + "description": "Increases V8 maximum heap size to enable more memory usage and prevent Heap-Out-Of-Memory error. EXPERIMENTAL!", + "type": "number", + "minimum": 1400, + "default": 1400 + } + }, + "additionalProperties": false, + "anyOf": [ + { "required": ["enableGC"] }, + { "required": ["maxHeapSize"] } + ] } }, "additionalProperties": false diff --git a/test/unit/constantsTests.js b/test/unit/constantsTests.js index 8c059c96..6c4700c8 100644 --- a/test/unit/constantsTests.js +++ b/test/unit/constantsTests.js @@ -57,12 +57,56 @@ describe('Constants', () => { }, APP_NAME: 'Telemetry Streaming', APP_THRESHOLDS: { - MONITOR_DISABLED: 'MONITOR_DISABLED', + MONITOR_DISABLED: 'MONITOR_DISABLED', // TODO: delete MEMORY: { + /** TODO: DELETE */ DEFAULT_MB: 1433, - DEFAULT_LIMIT_PERCENT: 90, OK: 'MEMORY_USAGE_OK', - NOT_OK: 'MEMORY_USAGE_HIGH' + NOT_OK: 'MEMORY_USAGE_HIGH', + /** TODO: DELETE END */ + + ARGRESSIVE_CHECK_INTERVALS: [ + { usage: 50, interval: 0.5 }, + { usage: 60, interval: 0.4 }, + { usage: 70, interval: 0.3 }, + { usage: 80, interval: 0.2 }, + { usage: 90, interval: 0.2 }, + { usage: 100, interval: 0.1 } + ], + DEFAULT_CHECK_INTERVALS: [ + { usage: 50, interval: 1.5 }, + { usage: 60, interval: 1.0 }, + { usage: 70, interval: 0.8 }, + { usage: 80, interval: 0.5 }, + { usage: 90, interval: 0.2 }, + { usage: 100, interval: 0.1 } + ], + // default GC call interval in seconds + DEFAULT_GC_INTERVAL: 60, + // default check interval in seconds + DEFAULT_INTERVAL: 5, + DEFAULT_HEAP_SIZE: 1400, + // 90% should be enough for everyone + DEFAULT_LIMIT_PERCENT: 90, + DEFAULT_LOG_FREQ: 10 * 1000, + DEFAULT_LOG_LEVEL: 'debug', + // min amount of system's free memory + DEFAULT_MIN_FREE_MEM: 30, + // default minimal check interval in seconds when mem usage is >= 100% + DEFAULT_MIN_INTERVAL: 0.1, + // default percent, when exceed that value app will disable processing + DEFAULT_OK_USAGE_PERCENT: 100, + // 90% should be enough to avoid processing state flapping + DEFAULT_RELEASE_PERCENT: 90, + STATE: { + OK: 'MEMORY_USAGE_BELOW_THRESHOLD', + NOT_OK: 'MEMORY_USAGE_ABOVE_THRESHOLD' + }, + TREND: { + DOWN: 'MEMORY_USAGE_GOES_DOWN', + NO_CHANGE: 'MEMORY_USAGE_NO_CHANGE', + UP: 'MEMORY_USAGE_GOES_UP' + } } }, CONFIG_CLASSES: { @@ -118,6 +162,7 @@ describe('Constants', () => { EVENT_LISTENER: { PARSER_MODE: 'buffer', // default parsing mode PARSER_MAX_ITERS_PER_CHECK: 1000, // how often to check the time spent on data processing + PARSER_MAX_KV_PAIRS: 2000, // max number of key=value pairs per message PARSER_MAX_MSG_SIZE: 16 * 1024, // max message size in chars (string) or bytes (buffer) PARSER_PREALLOC: 1000, // preallocated buffer size NETWORK_SERVICE_RESTART_DELAY: 10 * 1000, // 10 sec. delay before restart (units - ms.) diff --git a/test/unit/dataPipelineTests.js b/test/unit/dataPipelineTests.js index ef7082be..001fa136 100644 --- a/test/unit/dataPipelineTests.js +++ b/test/unit/dataPipelineTests.js @@ -24,13 +24,15 @@ const sinon = require('sinon'); const assert = require('./shared/assert'); const sourceCode = require('./shared/sourceCode'); const stubs = require('./shared/stubs'); +const testUtil = require('./shared/util'); const actionProcessor = sourceCode('src/lib/actionProcessor'); +const configWorker = sourceCode('src/lib/config'); const constants = sourceCode('src/lib/constants'); const consumers = sourceCode('src/lib/consumers'); const dataPipeline = sourceCode('src/lib/dataPipeline'); const forwarder = sourceCode('src/lib/forwarder'); -const monitor = sourceCode('src/lib/utils/monitor'); +const ResourceMonitor = sourceCode('src/lib/resourceMonitor'); const EVENT_TYPES = constants.EVENT_TYPES; @@ -311,7 +313,29 @@ describe('Data Pipeline', () => { }); describe('monitor "on check" event', () => { + let clock; let coreStub; + let resourceMonitor; + + const defaultDeclaration = { + class: 'Telemetry', + My_System: { + class: 'Telemetry_System', + trace: true, + systemPoller: [ + { + interval: 180 + }, + { + interval: 200 + } + ] + }, + My_Poller: { + class: 'Telemetry_System_Poller', + interval: 0 + } + }; const dataCtx = { data: { @@ -323,17 +347,6 @@ describe('Data Pipeline', () => { destinationIds: [1234, 6789] }; - const dataCtx2 = { - data: { - EOCTimestamp: '1556592720', - AggrInterval: '30', - HitCount: '3', - telemetryEventCategory: 'AVR' - }, - type: EVENT_TYPES.AVR_EVENT, - destinationIds: [4564] - }; - const options = { actions: [ { @@ -344,6 +357,18 @@ describe('Data Pipeline', () => { }; beforeEach(() => { + clock = stubs.clock(); + coreStub = stubs.default.coreStub({}); + resourceMonitor = new ResourceMonitor(); + + const appCtx = { + configMgr: configWorker, + resourceMonitor + }; + + resourceMonitor.initialize(appCtx); + dataPipeline.initialize(appCtx); + sinon.stub(consumers, 'getConsumers').returns([ { name: 'consumer1', @@ -358,44 +383,36 @@ describe('Data Pipeline', () => { id: 6789 } ]); - coreStub = stubs.default.coreStub({ logger: true }); + return resourceMonitor.start() + .then(() => Promise.all([ + configWorker.processDeclaration(testUtil.deepCopy(defaultDeclaration)), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 30 }) + ])); }); - it('should not forward when memory thresholds reached and log info for skipped data', () => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK) - .then(() => dataPipeline.process(dataCtx, options)) - .then(() => { - assert.strictEqual(forwardFlag, false, 'should not call forwarder'); - assert.strictEqual(dataPipeline.isEnabled(), false, 'should disable data pipeline'); - assert.includeMatch(coreStub.logger.messages.warning, 'MEMORY_USAGE_HIGH. Incoming data will not be forwarded'); - assert.includeMatch(coreStub.logger.messages.warning, 'Skipped Data - Category: "LTM" | Consumers: ["consumer1","consumer3"] | Addtl Info: "event_timestamp": "2019-01-01:01:01.000Z"'); - })); - - it('should re-enable when memory thresholds return to normal', () => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK) - .then(() => dataPipeline.process(dataCtx, options)) - .then(() => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.OK)) - .then(() => dataPipeline.process(dataCtx, options)) - .then(() => { - assert.strictEqual(forwardFlag, true, 'should call forwarder'); - assert.strictEqual(dataPipeline.isEnabled(), true, 'should enable data pipeline'); - assert.includeMatch(coreStub.logger.messages.warning, 'MEMORY_USAGE_OK. Resuming data pipeline processing.'); - })); + it('should not forward when memory thresholds reached and log info for skipped data', () => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 10; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + .then(() => dataPipeline.process(dataCtx, options)) + .then(() => { + assert.strictEqual(forwardFlag, false, 'should not call forwarder'); + assert.strictEqual(dataPipeline.isEnabled(), false, 'should disable data pipeline'); + }); + }); - it('should only log when status changed', () => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.OK) - .then(() => dataPipeline.process(dataCtx, options)) - .then(() => { - // default threshold ok, so emitting an OK should not trigger a log - assert.isTrue(coreStub.logger.proxy_warning.notCalled); - return monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK); - }) - .then(() => dataPipeline.process(dataCtx, options)) - .then(() => { - assert.includeMatch(coreStub.logger.messages.warning, 'MEMORY_USAGE_HIGH. Incoming data will not be forwarded.'); - assert.includeMatch(coreStub.logger.messages.warning, 'Skipped Data - Category: "LTM" | Consumers: ["consumer1","consumer3"] | Addtl Info: "event_timestamp": "2019-01-01:01:01.000Z"'); - return monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK); - }) - .then(() => dataPipeline.process(dataCtx2, options)) - .then(() => { - assert.includeMatch(coreStub.logger.messages.warning, 'Skipped Data - Category: "AVR" | Consumers: ["consumer2"] | Addtl Info: "EOCTimestamp": "1556592720"'); - })); + it('should re-enable when memory thresholds return to normal', () => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 10; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + .then(() => dataPipeline.process(dataCtx, options)) + .then(() => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 500; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => dataPipeline.process(dataCtx, options)) + .then(() => { + assert.strictEqual(forwardFlag, true, 'should call forwarder'); + assert.strictEqual(dataPipeline.isEnabled(), true, 'should enable data pipeline'); + }); + }); }); }); diff --git a/test/unit/declaration/classControlsTests.js b/test/unit/declaration/classControlsTests.js index 116f2a00..c47eb983 100644 --- a/test/unit/declaration/classControlsTests.js +++ b/test/unit/declaration/classControlsTests.js @@ -21,9 +21,8 @@ const moduleCache = require('../shared/restoreCache')(); const sinon = require('sinon'); -const assert = require('../shared/assert'); const common = require('./common'); -const declValidator = require('./common').validate; +const schemaValidationUtil = require('../shared/schemaValidation'); moduleCache.remember(); @@ -40,203 +39,221 @@ describe('Declarations -> Controls', () => { sinon.restore(); }); - describe('logLevel', () => { + schemaValidationUtil.generateSchemaBasicTests( + (decl) => validateMinimal(decl), + { + class: 'Controls' + }, [ { - logLevel: 'verbose', - expectedToPass: true + property: 'logLevel', + enumTests: { + allowed: ['verbose', 'debug', 'info', 'error'], + notAllowed: ['my-log-level', 'warning'] + }, + defaultValueTests: { + defaultValue: 'debug' + } }, { - logLevel: 'debug', - expectedToPass: true + property: 'debug', + booleanTests: true, + valueTests: { + invalid: 'debug' + }, + defaultValueTests: { + defaultValue: false + } }, { - logLevel: 'info', - expectedToPass: true + property: 'listenerMode', + enumTests: { + allowed: ['buffer', 'string'], + notAllowed: ['my-mode', 10] + } }, { - logLevel: 'error', - expectedToPass: true + property: 'listenerStrategy', + enumTests: { + allowed: ['drop', 'ring'], + notAllowed: ['my-mode', 10] + } }, { - logLevel: 'invalidValue', - expectedToPass: false + property: 'memoryThresholdPercent', + defaultValueTests: { + defaultValue: 90 + }, + numberRangeTests: { + minimum: 1, + maximum: 100 + } } - ].forEach((testCase) => { - it(`should ${testCase.expectedToPass ? '' : 'not '}allow to set "logLevel" to "${testCase.logLevel}"`, () => { - const data = { - class: 'Telemetry', - Controls: { - class: 'Controls', - logLevel: testCase.logLevel - } - }; - if (testCase.expectedToPass) { - return declValidator(data) - .then((validConfig) => { - assert.strictEqual(validConfig.Controls.logLevel, testCase.logLevel, `'should match "${testCase.logLevel}"`); - }); - } - return assert.isRejected(declValidator(data), /logLevel.*should be equal to one of the allowed value/); - }); - }); - }); - - describe('listenerMode', () => { + ] + ); + schemaValidationUtil.generateSchemaBasicTests( + (decl) => validateMinimal(decl), + { + class: 'Controls', + memoryMonitor: { + memoryThresholdPercent: 10 + } + }, [ { - listenerMode: 'string', - expectedToPass: true - }, - { - listenerMode: 'buffer', - expectedToPass: true + property: 'memoryMonitor', + optionalPropTests: true, + additionalPropsTests: { + allowed: { + interval: 'aggressive', + logFrequency: 10, + logLevel: 'debug', + osFreeMemory: 10, + provisionedMemory: 10 + }, + notAllowed: { + something: 'else' + } + }, + defaultValueTests: { + defaultValue: undefined + } }, { - listenerMode: 'other', - expectedToPass: false + property: 'memoryMonitor.interval', + enumTests: { + allowed: ['default', 'aggressive'], + notAllowed: ['my-log-level', 'warning'] + }, + defaultValueTests: { + defaultValue: 'default' + } }, { - listenerMode: 'mode', - expectedToPass: false - } - ].forEach((testCase) => { - it(`should ${testCase.expectedToPass ? '' : 'not '}allow to set "listenerMode" to "${testCase.listenerMode}"`, () => { - const data = { - class: 'Telemetry', - Controls: { - class: 'Controls', - listenerMode: testCase.listenerMode - } - }; - if (testCase.expectedToPass) { - return declValidator(data) - .then((validConfig) => { - assert.strictEqual(validConfig.Controls.listenerMode, testCase.listenerMode, `'should match "${testCase.listenerMode}"`); - }); - } - return assert.isRejected(declValidator(data), /listenerMode.*should be equal to one of the allowed value/); - }); - }); - }); - describe('listenerStrategy', () => { - [ - { - listenerStrategy: 'drop', - expectedToPass: true + property: 'memoryMonitor.logFrequency', + defaultValueTests: { + defaultValue: 10 + }, + numberRangeTests: { + minimum: 1 + } }, { - listenerStrategy: 'ring', - expectedToPass: true + property: 'memoryMonitor.logLevel', + enumTests: { + allowed: ['verbose', 'debug', 'info', 'error'], + notAllowed: ['my-log-level', 'warning'] + }, + defaultValueTests: { + defaultValue: 'debug' + } }, { - listenerStrategy: 'other', - expectedToPass: false + property: 'memoryMonitor.memoryThresholdPercent', + defaultValueTests: { + defaultValue: undefined + }, + numberRangeTests: { + minimum: 1, + maximum: 100 + } }, { - listenerStrategy: 'mode', - expectedToPass: false - } - ].forEach((testCase) => { - it(`should ${testCase.expectedToPass ? '' : 'not '}allow to set "listenerStrategy" to "${testCase.listenerStrategy}"`, () => { - const data = { - class: 'Telemetry', - Controls: { - class: 'Controls', - listenerStrategy: testCase.listenerStrategy - } - }; - if (testCase.expectedToPass) { - return declValidator(data) - .then((validConfig) => { - assert.strictEqual(validConfig.Controls.listenerStrategy, testCase.listenerStrategy, `'should match "${testCase.listenerStrategy}"`); - }); - } - return assert.isRejected(declValidator(data), /listenerStrategy.*should be equal to one of the allowed value/); - }); - }); - }); - - describe('debug', () => { - [ - { - debug: true, - expectedToPass: true + property: 'memoryMonitor.osFreeMemory', + defaultValueTests: { + defaultValue: 30 + }, + numberRangeTests: { + minimum: 1 + } }, { - debug: false, - expectedToPass: true + property: 'memoryMonitor.provisionedMemory', + defaultValueTests: { + defaultValue: undefined + }, + numberRangeTests: { + minimum: 1, + maximum: 1400 + } }, { - debug: 'invalidValue', - expectedToPass: false + property: 'memoryMonitor.thresholdReleasePercent', + defaultValueTests: { + defaultValue: 90 + }, + numberRangeTests: { + minimum: 1, + maximum: 100 + } } - ].forEach((testCase) => { - it(`should ${testCase.expectedToPass ? '' : 'not '}allow to set "debug" to "${testCase.debug}"`, () => { - const data = { - class: 'Telemetry', - Controls: { - class: 'Controls', - debug: testCase.debug - } - }; - if (testCase.expectedToPass) { - return declValidator(data) - .then((validConfig) => { - assert.strictEqual(validConfig.Controls.debug, testCase.debug, `'should match "${testCase.debug}"`); - }); - } - return assert.isRejected(declValidator(data), /debug.*should be boolean/); - }); - }); - }); + ] + ); - describe('memoryThresholdPercent', () => { + schemaValidationUtil.generateSchemaBasicTests( + (decl) => validateMinimal(decl), + { + class: 'Controls', + runtime: { + enableGC: true, + maxHeapSize: 1400 + } + }, [ { - memoryThresholdPercent: 1, - expectedToPass: true - }, - { - memoryThresholdPercent: 100, - expectedToPass: true - }, - { - memoryThresholdPercent: 50, - expectedToPass: true - }, - { - memoryThresholdPercent: 101, - expectedToPass: false, - errorMsg: /memoryThresholdPercent.*should be <= 100/ + property: 'runtime', + optionalPropTests: true, + additionalPropsTests: { + allowed: { + enableGC: true, + maxHeapSize: 1400 + }, + notAllowed: { + something: 'else', + memoryAllocator: 'default' + } + }, + defaultValueTests: { + defaultValue: undefined + } }, { - memoryThresholdPercent: 0, - expectedToPass: false, - errorMsg: /memoryThresholdPercent.*should be >= 1/ + property: 'runtime.enableGC', + booleanTests: true, + valueTests: { + invalid: 'debug' + }, + defaultValueTests: { + defaultValue: false + } }, { - memoryThresholdPercent: 'invalidValue', - expectedToPass: false, - errorMsg: /memoryThresholdPercent.*should be integer/ + property: 'runtime.maxHeapSize', + defaultValueTests: { + defaultValue: 1400 + }, + numberRangeTests: { + minimum: 1400 + } } - ].forEach((testCase) => { - it(`should ${testCase.expectedToPass ? '' : 'not '}allow to set "memoryThresholdPercent" to "${testCase.memoryThresholdPercent}"`, () => { - const data = { - class: 'Telemetry', - Controls: { - class: 'Controls', - memoryThresholdPercent: testCase.memoryThresholdPercent - } - }; - if (testCase.expectedToPass) { - return declValidator(data) - .then((validConfig) => { - assert.strictEqual(validConfig.Controls.memoryThresholdPercent, testCase.memoryThresholdPercent, `'should match "${testCase.memoryThresholdPercent}"`); - }); - } - return assert.isRejected(declValidator(data), testCase.errorMsg); - }); - }); - }); + ] + ); }); + +function validateMinimal(controlsProps, expectedProps, addtlContext) { + return common.validatePartOfIt( + { + class: 'Telemetry', + controls: { + class: 'Controls' + } + }, + 'controls', + controlsProps, + { + class: 'Controls' + }, + expectedProps, + addtlContext + ); +} diff --git a/test/unit/eventListener/data/parserTestsData.js b/test/unit/eventListener/data/parserTestsData.js index 1e965fbe..ee12556a 100644 --- a/test/unit/eventListener/data/parserTestsData.js +++ b/test/unit/eventListener/data/parserTestsData.js @@ -16,6 +16,8 @@ 'use strict'; +/* eslint-disable prefer-template */ + module.exports = { /** * Set of data to check actual and expected results only. @@ -31,24 +33,63 @@ module.exports = { name: 'simple multi-part data', chunks: [ 'chunk1', - 'chunk2', + 'chunk2$F', 'chunk3', 'chunk4', 'chunk5', 'chunk6', + '$F5telemet$yEventCategory', 'chunk7\n' ], expectedData: [ - 'chunk1chunk2chunk3chunk4chunk5chunk6chunk7' + 'chunk1chunk2$Fchunk3chunk4chunk5chunk6$F5telemet$yEventCategorychunk7' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 13 + ] + }, + { + name: 'work with UTF-8', + chunks: [ + 'привет', + 'chunk2$F', + 'мир', + 'chunk4', + 'chunk5', + 'chunk6', + '$F5telemet$yEventCategory', + 'chunk7\n' + ], + expectedData: [ + 'приветchunk2$Fмирchunk4chunk5chunk6$F5telemet$yEventCategorychunk7' + ], + mayHaveKeyValuePairs: [ + null ] }, { name: 'single syslog message', chunks: [ - '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0"' + '<0>Jul 6 22:37:15 bigip14.1.2.test $F5telemetryEventCategory="test",BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="off,lin=e",available_members="0",up_members="0",$F5telemetryEventCategory="test"' ], expectedData: [ - '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0"' + '<0>Jul 6 22:37:15 bigip14.1.2.test $F5telemetryEventCategory="test",BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="off,lin=e",available_members="0",up_members="0",$F5telemetryEventCategory="test"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([ + 61, 68, 87, 100, 115, 134, + 148, 159, 168, 187, 194, 198, + 214, 220, 235, 241, 251, 278, + 295, 311, 317, 327, 344, 347, + 361, 364, 383, 387, 406, 418, + 436, 440, 451, 455, 481 + ]) + ], + mayHaveF5EventCategory: [ + 37 ] }, { @@ -60,6 +101,14 @@ module.exports = { expectedData: [ '<134>Jul 6 22:37:49 bigip14.1.2.test info httpd(pam_audit)[13810]: 01070417:6: AUDIT - user admin - RAW: httpd(pam_audit): user=admin(admin) partition=[All] level=Administrator tty=(unknown) host=172.18.5.167 attempts=1 start="Mon Jul 6 22:37:49 2020" end="Mon Jul 6 22:37:49 2020"', '<87>Jul 6 22:37:49 bigip14.1.2.test debug httpd[13810]: pam_bigip_authz: pam_sm_acct_mgmt returning status SUCCESS' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -69,6 +118,12 @@ module.exports = { ], expectedData: [ '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms."\n <30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart. \n<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart."' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -76,11 +131,19 @@ module.exports = { chunks: [ '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms.\'\n <30>Jul ', '6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling ', - 'restart. \n<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart.\'\n ' + 'restart. \n<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, $F5tel$metryEventCategory="test", scheduling restart.\'\n ' ], expectedData: [ - '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms.\'\n <30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart. \n<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart.\'', + '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms.\'\n <30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart. \n<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, $F5tel$metryEventCategory="test", scheduling restart.\'', ' ' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 323, // non-strict match + 0 ] }, { @@ -94,6 +157,20 @@ module.exports = { ' line3', ' ', ' line5' + ], + mayHaveKeyValuePairs: [ + null, + null, + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0, + 0, + 0 ] }, { @@ -106,6 +183,18 @@ module.exports = { 'line2', 'line3', 'line4' + ], + mayHaveKeyValuePairs: [ + null, + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0, + 0 ] }, { @@ -116,24 +205,50 @@ module.exports = { expectedData: [ 'key1="value\n"', 'key2=\\"value' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([4]), + new Uint16Array([4]) + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { name: 'without trailing newline', chunks: [ - '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0"' + '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0=,10"' ], expectedData: [ - '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0"' + '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="1594100235",Microtimestamp="1594100235358418",errdefs_msgno="22327305",Hostname="bigip14.1.2.test",SlotId="0",globalBigiqConf="N/A",ObjectTagsList="N/A",pool_name="/Common/Shared/telemetry",errdefs_msg_name="pool modified",state="enabled",pool_description="",status_reason="",min_active_members="1",availability_state="offline",available_members="0",up_members="0=,10"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([ + 54, 67, 82, 101, 115, 126, 135, + 154, 161, 165, 181, 187, 202, 208, + 218, 245, 262, 278, 284, 294, 311, + 314, 328, 331, 350, 354, 373, 383, + 401, 405, 416 + ]) + ], + mayHaveF5EventCategory: [ + 0 ] }, { name: 'event with trailing newline', chunks: [ - '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. \n' + '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, $F5tel$emetryEventCategory="test", interval 112580ms. \n' ], expectedData: [ - '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. ' + '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, $F5tel$emetryEventCategory="test", interval 112580ms. ' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 82 ] }, { @@ -145,6 +260,14 @@ module.exports = { expectedData: [ '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. ', '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. ' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -159,6 +282,16 @@ module.exports = { '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. ', '<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart... and continued here ', '<134>Jul 6 22:37:49 bigip14.1.2.test info httpd(pam_audit)[13810]: 01070417:6: AUDIT - user admin - RAW: httpd(pam_audit): user=admin(admin) partition=[All] level=Administrator tty=(unknown) host=172.18.5.167 attempts=1 start="Mon Jul 6 22:37:49 2020" end="Mon Jul 6 22:37:49 2020"' + ], + mayHaveKeyValuePairs: [ + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -173,6 +306,16 @@ module.exports = { '<30>Jul 6 22:37:26 bigip14.1.2.test info dhclient[4079]: XMT: Solicit on mgmt, interval 112580ms. ', '<30>Jul 6 22:37:35 bigip14.1.2.test info systemd[1]: getty@tty0\x20ttyS0.service has no holdoff time, scheduling restart... and continued here ', '<134>Jul 6 22:37:49 bigip14.1.2.test info httpd(pam_audit)[13810]: 01070417:6: AUDIT - user admin - RAW: httpd(pam_audit): user=admin(admin) partition=[All] level=Administrator tty=(unknown) host=172.18.5.167 attempts=1 start="Mon Jul 6 22:37:49 2020" end="Mon Jul 6 22:37:49 2020"' + ], + mayHaveKeyValuePairs: [ + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -182,6 +325,12 @@ module.exports = { ], expectedData: [ '1'.repeat(520) + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -195,6 +344,20 @@ module.exports = { '1'.repeat(16 * 1024), '1'.repeat(16 * 1024), '1'.repeat(70000 - 4 * 16 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null, + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0, + 0, + 0 ] }, { @@ -207,6 +370,16 @@ module.exports = { '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="', `${'1'.repeat(16 * 1024)}`, `${'1'.repeat(4 * 1024)}",nextfield="1"` + ], + mayHaveKeyValuePairs: [ + new Uint16Array([54]), + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -220,6 +393,16 @@ module.exports = { '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="', `${'1'.repeat(16 * 1024)}`, '",nextfield="1"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([54]), + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -233,6 +416,16 @@ module.exports = { '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="a', `${'1'.repeat(16 * 1024)}`, '",nextfield="1"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([54]), + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -246,6 +439,16 @@ module.exports = { '<0>Jul 6 22:37:15 bigip14.1.2.test BigIP:EOCtimestamp="a', `${'1'.repeat(16 * 1024)}`, '",nextfield="1"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([54]), + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -257,6 +460,14 @@ module.exports = { expectedData: [ 'line1', 'line2="value\nanotherPart=test\nanotherLine=anotherValue' + ], + mayHaveKeyValuePairs: [ + null, + new Uint16Array([5]) + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -264,14 +475,18 @@ module.exports = { chunks: [ '{sep}' ], - expectedData: [] + expectedData: [], + mayHaveKeyValuePairs: [], + mayHaveF5EventCategory: [] }, { name: 'empty lines with line separator', chunks: [ '{sep}{sep}{sep}{sep}' ], - expectedData: [] + expectedData: [], + mayHaveKeyValuePairs: [], + mayHaveF5EventCategory: [] }, { name: 'line with trailing spaces', @@ -280,6 +495,12 @@ module.exports = { ], expectedData: [ ' ' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -289,6 +510,12 @@ module.exports = { ], expectedData: [ '\\n \\r\\n' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -299,6 +526,14 @@ module.exports = { expectedData: [ 'line1\\\\\\nstill line 1\\\\', 'line2\\\\' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -308,6 +543,12 @@ module.exports = { ], expectedData: [ 'line1"\\\\\\nstill line 1\\\\\n"line2\\\\' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -317,6 +558,12 @@ module.exports = { ], expectedData: [ 'line1"\\\\\\nstill line 1\\\\\r\n"line2\\\\' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -326,6 +573,12 @@ module.exports = { ], expectedData: [ 'line1\'\\\\\\nstill line 1\\\\\n\'line2\\\\' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -335,6 +588,12 @@ module.exports = { ], expectedData: [ 'line1\'\\\\\\nstill line 1\\\\\r\n\'line2\\\\' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -345,6 +604,14 @@ module.exports = { expectedData: [ 'line1\\\'\\\\\\nstill line 1\\\\', '\'line2\\\\' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -355,6 +622,14 @@ module.exports = { expectedData: [ 'line1\\"\\\\\\nstill line 1\\\\', '"line2\\\\' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -365,6 +640,14 @@ module.exports = { expectedData: [ 'line1', '"{sep}line3' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -375,6 +658,14 @@ module.exports = { expectedData: [ 'line1', 'line2"' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -384,6 +675,12 @@ module.exports = { ], expectedData: [ '"line1{sep}line2' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -393,6 +690,12 @@ module.exports = { ], expectedData: [ 'line1"{sep}line2' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -403,6 +706,14 @@ module.exports = { expectedData: [ '\'foo"bar""\none\'', '\'two""thr\nee"' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -413,6 +724,14 @@ module.exports = { expectedData: [ '\'line_1"still_line_1"\n\'', '"line_2"' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -424,6 +743,16 @@ module.exports = { 'key1=""\'\'', 'key2=\'\'', 'key3=""' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([4]), + new Uint16Array([4]), + new Uint16Array([4]) + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 ] }, { @@ -433,6 +762,25 @@ module.exports = { ], expectedData: [ '<134>May 4 11:01:56 localhost.localdomain ASM:unit_hostname="bigip1",management_ip_address="192.168.2.1",management_ip_address_2="",http_class_name="/Common/ASMTestPolicy",web_application_name="/Common/ASMTestPolicy",policy_name="/Common/ASMTestPolicy",policy_apply_date="2021-04-29 14:20:42",violations="",support_id="2508780119460416236",request_status="passed",response_code="0",ip_client="192.168.2.2",route_domain="0",method="OPTIONS",protocol="HTTP",query_string="param=tomatoes",x_forwarded_for_header_value="N/A",sig_ids="",sig_names="",date_time="2021-05-04 11:01:55",severity="Informational",attack_type="",geo_location="N/A",ip_address_intelligence="N/A",username="N/A",session_id="0",src_port="56022",dest_port="7878",dest_ip="192.168.2.3",sub_violations="",virus_name="N/A",violation_rating="0",websocket_direction="N/A",websocket_message_type="N/A",device_id="N/A",staged_sig_ids="",staged_sig_names="",threat_campaign_names="",staged_threat_campaign_names="",blocking_exception_reason="N/A",captcha_result="not_received",microservice="",vs_name="/Common/testvs",uri="/hello",fragment="",request="OPTIONS /hello?param=tomatoes HTTP/1.1\\r\\nHost: 192.168.2.3:7878\\r\\nUser-Agent: curl/7.64.1\\r\\nAccept: */*\\r\\nContent-Type: application/json\\r\\ntoken: 12341234\\r\\n\\r\\n",response="Response logging disabled"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([ + 60, 69, 91, 105, 129, 132, 148, 172, + 193, 217, 229, 253, 271, 293, 304, 307, + 318, 340, 355, 364, 378, 382, 392, 406, + 419, 423, 430, 440, 449, 456, 469, 486, + 515, 521, 529, 532, 542, 545, 555, 577, + 586, 602, 614, 617, 630, 636, 660, 666, + 675, 681, 692, 696, 705, 713, 723, 730, + 738, 752, 767, 770, 781, 787, 804, 808, + 828, 834, 857, 863, 873, 879, 894, 897, + 914, 917, 939, 942, 971, 974, 1000, 1006, + 1021, 1036, 1049, 1052, 1060, 1077, 1081, + 1090, 1099, 1102, 1110, 1280, 1289 + ]) + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -446,6 +794,25 @@ module.exports = { '<134>May 4 11:01:56 localhost.localdomain ASM:unit_hostname="bigip1",management_ip_address="192.168.2.1",management_ip_address_2="",http_class_name="/Common/ASMTestPolicy",web_application_name="/Common/ASMTestPolicy",policy_name="/Common/ASMTestPolicy",policy_apply_date="2021-04-29 14:20:42",violations="",support_id="2508780119460416236",request_status="passed",response_code="0",ip_client="192.168.2.2",route_domain="0",method="OPTIONS",protocol="HTTP",query_string="param=tomatoes",x_forwarded_for_header_value="N/A",sig_ids="",sig_names="",date_time="2021-05-04 11:01:55",severity="Informational",attack_type="",geo_location="N/A",ip_address_intelligence="N/A",username="N/A",session_id="0",src_port="56022",dest_port="7878",dest_ip="192.168.2.3",sub_violations="",virus_name="N/A",violation_rating="0",websocket_direction="N/A",websocket_message_type="N/A",device_id="N/A",staged_sig_ids="",staged_sig_names="",threat_campaign_names="",staged_threat_campaign_names="",blocking_exception_reason="N/A",captcha_result="not_received",microservice="",vs_name="/Common/testvs",uri="/hello",fragment="",request="OPTIONS /hello?param=tomatoes' + `${'/apples'.repeat(100)}` + 'HTTP/1.1\\r\\nHost: 192.168.2.3:7878\\r\\nUser-Agent: curl/7.64.1\\r\\nAccept: */*\\r\\nContent-Type: application/json\\r\\ntoken: 12341234\\r\\n\\r\\n",response="Response logging disabled"' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([ + 60, 69, 91, 105, 129, 132, 148, 172, + 193, 217, 229, 253, 271, 293, 304, 307, + 318, 340, 355, 364, 378, 382, 392, 406, + 419, 423, 430, 440, 449, 456, 469, 486, + 515, 521, 529, 532, 542, 545, 555, 577, + 586, 602, 614, 617, 630, 636, 660, 666, + 675, 681, 692, 696, 705, 713, 723, 730, + 738, 752, 767, 770, 781, 787, 804, 808, + 828, 834, 857, 863, 873, 879, 894, 897, + 914, 917, 939, 942, 971, 974, 1000, 1006, + 1021, 1036, 1049, 1052, 1060, 1077, 1081, + 1090, 1099, 1102, 1110, 1979, 1988 + ]) + ], + mayHaveF5EventCategory: [ + 0 ] }, { @@ -456,6 +823,14 @@ module.exports = { expectedData: [ '\\'.repeat(16 * 1024), '\\'.repeat(4 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 ] }, { @@ -463,7 +838,338 @@ module.exports = { chunks: [ '{sep}'.repeat(20 * 1024) ], - expectedData: [] + expectedData: [], + mayHaveKeyValuePairs: [], + mayHaveF5EventCategory: [] + }, + { + name: '=, chaos', + chunks: [ + '=,'.repeat(20 * 1024) + ], + expectedData: [ + '=,'.repeat(8 * 1024), + '=,'.repeat(8 * 1024), + '=,'.repeat(4 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 + ] + }, + { + name: '=, chaos', + chunks: [ + 'a' + '=,'.repeat(20 * 1024) + ], + expectedData: [ + 'a' + '=,'.repeat(8 * 1023) + '=,=,=,=,=,=,=,=', + ',='.repeat(8 * 1024), + ',='.repeat(4 * 1024) + ',' + ], + mayHaveKeyValuePairs: [ + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 + ] + }, + { + name: '"" chaos', + chunks: [ + '"'.repeat(20 * 1024) + ], + expectedData: [ + '"'.repeat(16 * 1024), + '"'.repeat(4 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: '\'\' chaos', + chunks: [ + '\''.repeat(20 * 1024) + ], + expectedData: [ + '\''.repeat(16 * 1024), + '\''.repeat(4 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: '"\'"\' chaos', + chunks: [ + '"\''.repeat(20 * 1024) + ], + expectedData: [ + '"\''.repeat(8 * 1023) + '"\'"\'"\'"\'"\'"\'"\'"\'', + '"\''.repeat(8 * 1023) + '"\'"\'"\'"\'"\'"\'"\'"\'', + '"\''.repeat(4 * 1024) + ], + mayHaveKeyValuePairs: [ + null, + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0, + 0 + ] + }, + { + name: 'filter out mayHaveKeyValuePairs according to position (example 1)', + chunks: [ + 'something=test' + '\\'.repeat(10 * 1024) + '"\n"something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + expectedData: [ + 'something=test' + '\\'.repeat(10 * 1024) + '"', + '"something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + mayHaveKeyValuePairs: [ + new Uint16Array([9]), + null + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: 'filter out mayHaveKeyValuePairs according to position (example 2)', + chunks: [ + 'something=test' + '\\'.repeat(10 * 1024) + '"\n""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + expectedData: [ + 'something=test' + '\\'.repeat(10 * 1024) + '"', + '""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + mayHaveKeyValuePairs: [ + new Uint16Array([9]), + new Uint16Array([12, 18, 29]) + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: 'filter out mayHaveKeyValuePairs according to position (example 3)', + chunks: [ + '\\'.repeat(16 * 1023) + 'something=test===' + ], + expectedData: [ + '\\'.repeat(16 * 1023) + 'something=test==', + '=' + ], + mayHaveKeyValuePairs: [ + null, + null + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: 'ignore $F5TelemetryEventCategory at the end of line (short)', + chunks: [ + 'myline $F5Telemetry=' + ], + expectedData: [ + 'myline $F5Telemetry=' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([19]) + ], + mayHaveF5EventCategory: [ + 0 + ] + }, + { + name: 'ignore $F5TelemetryEventCategory at the end of line (full)', + chunks: [ + 'myline $F5TelemetryEventCategory=' + ], + expectedData: [ + 'myline $F5TelemetryEventCategory=' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([32]) + ], + mayHaveF5EventCategory: [ + 8 + ] + }, + { + name: 'not ignore $F5TelemetryEventCategory at the end of line', + chunks: [ + 'myline $F5TelemetryEventCategory=v' + ], + expectedData: [ + 'myline $F5TelemetryEventCategory=v' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([32]) + ], + mayHaveF5EventCategory: [ + 8 + ] + }, + { + name: 'not ignore $F5TelemetryEventCategory when split into chunks', + chunks: [ + 'myline $F5Telemetry', + 'EventCategory=v' + ], + expectedData: [ + 'myline $F5TelemetryEventCategory=v' + ], + mayHaveKeyValuePairs: [ + new Uint16Array([32]) + ], + mayHaveF5EventCategory: [ + 8 + ] + }, + { + name: 'not ignore $F5Telemetry when enclosing with quotes', + chunks: [ + 'myline "something" "$F5TelemetryEventCategory=v' + ], + expectedData: [ + 'myline "something" "$F5TelemetryEventCategory=v' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 21 + ] + }, + { + name: 'ignore $F5Telemetry when out of bounds (example 1)', + chunks: [ + 'something=test"' + '\\'.repeat(10 * 1024) + '\n"$F5TelemetryEventCategory=v""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + expectedData: [ + 'something=test"' + '\\'.repeat(10 * 1024), + '"$F5TelemetryEventCategory=v""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + mayHaveKeyValuePairs: [ + new Uint16Array([9]), + null + ], + mayHaveF5EventCategory: [ + 0, // ignored first time + 2 + ] + }, + { + name: 'ignore $F5Telemetry when out of bounds (example 2)', + chunks: [ + 'something=test"' + '\\'.repeat(10 * 1024) + '$F5Tel\n"etryEventCategory=v""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + expectedData: [ + 'something=test"' + '\\'.repeat(10 * 1024) + '$F5Tel', + '"etryEventCategory=v""something2=test2,something3=test3' + '\\'.repeat(10 * 1024) + ], + mayHaveKeyValuePairs: [ + new Uint16Array([9]), + null + ], + mayHaveF5EventCategory: [ + 0, // ignored first time + 0 + ] + }, + { + name: 'ignore $F5Telemetry when enclosing with quotes (extra backlash)', + chunks: [ + 'myline "something" "$\\F5TelemetryEventCategory=v' + ], + expectedData: [ + 'myline "something" "$\\F5TelemetryEventCategory=v' + ], + mayHaveKeyValuePairs: [ + null + ], + mayHaveF5EventCategory: [ + 0 + ] + }, + { + name: 'process extra backlash in new line', + chunks: [ + 'myline "something" "$\\F5TelemetryEv"entC\r\\\nategory=v' + ], + expectedData: [ + 'myline "something" "$\\F5TelemetryEv"entC\r\\', + 'ategory=v' + ], + mayHaveKeyValuePairs: [ + null, + new Uint16Array([7]) + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] + }, + { + name: 'detect only 2000 key-value pairs per message', + chunks: [ + 'key=val,'.repeat(2030) + 'key=val\n', + 'key=va,'.repeat(2030) + 'key=va\n' + ], + expectedData: [ + 'key=val,'.repeat(2030) + 'key=val', + 'key=va,'.repeat(2030) + 'key=va' + ], + mayHaveKeyValuePairs: [ + new Uint16Array(generateKVOffsets('key=va1,', 2000).slice(0, 8000)), + new Uint16Array(generateKVOffsets('key=va,', 2000).slice(0, 8000)) + ], + mayHaveF5EventCategory: [ + 0, + 0 + ] } ] }; + +function generateKVOffsets(string, repeat) { + const eqOffset = string.indexOf('='); + const cmOffset = string.indexOf(','); + const slen = string.length; + const result = []; + + for (let i = 0; i < repeat; i += 1) { + result.push(eqOffset + slen * i, cmOffset + slen * i); + } + return result; +} diff --git a/test/unit/eventListener/messageStreamTests.js b/test/unit/eventListener/messageStreamTests.js deleted file mode 100644 index de83d637..00000000 --- a/test/unit/eventListener/messageStreamTests.js +++ /dev/null @@ -1,538 +0,0 @@ -/** - * Copyright 2024 F5, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -/* eslint-disable import/order */ -const moduleCache = require('../shared/restoreCache')(); - -const EventEmitter = require('events').EventEmitter; -const sinon = require('sinon'); -const net = require('net'); -const udp = require('dgram'); - -const assert = require('../shared/assert'); -const messageStreamTestData = require('../data/messageStreamTestsData'); -// const sourceCode = require('../shared/sourceCode'); -const stubs = require('../shared/stubs'); -const testUtil = require('../shared/util'); - -// const messageStream = sourceCode('src/lib/eventListener/messageStream'); - -moduleCache.remember(); - -describe.skip('Message Stream Receiver', () => { - let clock; - let dataCallbackSpy; - let messageStream; - let onMockCreatedCallback; - let rawDataCallbackSpy; - let receiverInst; - let serverMocks; - let socketId; - - const testPort = 6514; - const testAddr = 'localhost10'; - const testAddr6 = '::localhost10'; - const testBufferTimeout = 10 * 1000; - - class MockUdpServer extends EventEmitter { - setInitArgs(opts) { - this.opts = opts; - } - - bind() { - this.emit('listenMock', this, Array.from(arguments)); - } - - close() { - this.emit('closeMock', this, Array.from(arguments)); - } - } - - class MockTcpServer extends EventEmitter { - setInitArgs(opts) { - this.opts = opts; - } - - listen() { - this.emit('listenMock', this, Array.from(arguments)); - } - - close() { - this.emit('closeMock', this, Array.from(arguments)); - } - } - - class MockTcpSocket extends EventEmitter { - destroy() {} - } - - const getServerMock = (cls, ipv6) => serverMocks.find((mock) => mock instanceof cls && (ipv6 === undefined || (ipv6 && mock.opts.type === 'udp6') || (!ipv6 && mock.opts.type === 'udp4'))); - const createServerMock = (Cls, opts) => { - const mock = new Cls(); - mock.setInitArgs(opts); - serverMocks.push(mock); - if (onMockCreatedCallback) { - onMockCreatedCallback(mock); - } - return mock; - }; - - const createSocketInfo = (cls, ipv6) => { - socketId += 1; - if (cls === MockUdpServer) { - return { - address: ipv6 ? testAddr6 : testAddr, - port: testPort + socketId - }; - } - const socketMock = new MockTcpSocket(); - socketMock.remoteAddress = ipv6 ? testAddr6 : testAddr; - socketMock.remotePort = testPort + socketId; - return socketMock; - }; - - before(() => { - moduleCache.restore(); - }); - - beforeEach(() => { - clock = stubs.clock(); - dataCallbackSpy = sinon.spy(); - rawDataCallbackSpy = sinon.spy(); - serverMocks = []; - socketId = 0; - - receiverInst = new messageStream.MessageStream(testPort, { address: testAddr }); - receiverInst.on('messages', dataCallbackSpy); - receiverInst.on('rawData', rawDataCallbackSpy); - - sinon.stub(messageStream.MessageStream, 'MAX_BUFFER_TIMEOUT').value(testBufferTimeout); - - sinon.stub(udp, 'createSocket').callsFake((opts) => createServerMock(MockUdpServer, opts)); - sinon.stub(net, 'createServer').callsFake((opts) => createServerMock(MockTcpServer, opts)); - onMockCreatedCallback = (serverMock) => { - serverMock.on('listenMock', () => serverMock.emit('listening')); - serverMock.on('closeMock', (inst, args) => { - serverMock.emit('close'); - args[0](); // call callback - }); - }; - }); - - afterEach(() => { - sinon.restore(); - }); - - describe('"rawData" event', () => { - describe('raw data handling for each protocol', () => { - const testMessage = Buffer.from('<1>testData\n'); - - beforeEach(() => { - receiverInst.enableRawDataForwarding(); - }); - - it('should retrieve raw data via udp4 socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, false); - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual( - rawDataCallbackSpy.args, - [[{ - data: Buffer.from('<1>testData\n'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }]] - ); - })); - - it('should retrieve data via udp6 socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, true); - getServerMock(MockUdpServer, true).emit('message', testMessage, socketInfo); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual( - rawDataCallbackSpy.args, - [[{ - data: Buffer.from('<1>testData\n'), - protocol: 'udp', - senderKey: 'udp6-::localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }]] - ); - })); - - it('should retrieve data via tcp socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockTcpServer); - getServerMock(MockTcpServer).emit('connection', socketInfo); - socketInfo.emit('data', testMessage); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual( - rawDataCallbackSpy.args, - [[{ - data: Buffer.from('<1>testData\n'), - protocol: 'tcp', - senderKey: 'tcp-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }]] - ); - })); - - it('should retrieve raw data via all protocol at same time', () => receiverInst.start() - .then(() => { - const socketInfoTcp = createSocketInfo(MockTcpServer); // port 6515 - getServerMock(MockTcpServer).emit('connection', socketInfoTcp); - const socketInfoUdp4 = createSocketInfo(MockUdpServer, false); // port 6516 - const socketInfoUdp6 = createSocketInfo(MockUdpServer, true); // port 6517 - - socketInfoTcp.emit('data', Buffer.from('start')); - getServerMock(MockUdpServer, false).emit('message', Buffer.from('start'), socketInfoUdp4); - getServerMock(MockUdpServer, true).emit('message', Buffer.from('start'), socketInfoUdp6); - socketInfoTcp.emit('data', Buffer.from('end\n')); - getServerMock(MockUdpServer, false).emit('message', Buffer.from('end\n'), socketInfoUdp4); - getServerMock(MockUdpServer, true).emit('message', Buffer.from('end\n'), socketInfoUdp6); - - return receiverInst.stop(); - }) - .then(() => { - assert.sameDeepMembers( - rawDataCallbackSpy.args, - [ - [{ - data: Buffer.from('start'), - protocol: 'tcp', - senderKey: 'tcp-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('start'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6516', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('start'), - protocol: 'udp', - senderKey: 'udp6-::localhost10-6517', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('end\n'), - protocol: 'tcp', - senderKey: 'tcp-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('end\n'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6516', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('end\n'), - protocol: 'udp', - senderKey: 'udp6-::localhost10-6517', - timestamp: 0, - hrtime: [0, 0] - }] - ] - ); - })); - - it('should not enable raw data forwarding', () => { - receiverInst.disableRawDataForwarding(); - return receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, false); - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - clock.clockForward(100, { promisify: true }); - return testUtil.sleep(1000); // process pending promises - }) - .then(() => receiverInst.stop()) - .then(() => { - assert.lengthOf(rawDataCallbackSpy.args, 0, 'should not forward raw data once disabled'); - }); - }); - - it('should enable/disable raw data forwarding', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, false); // port 6515 - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - clock.clockForward(100, { promisify: true }); - return testUtil.sleep(1000); // process pending promises - }) - .then(() => { - assert.deepStrictEqual( - rawDataCallbackSpy.args, - [[{ - data: Buffer.from('<1>testData\n'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }]] - ); - - receiverInst.disableRawDataForwarding(); - const socketInfo = createSocketInfo(MockUdpServer, false); // port 6516 - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - return testUtil.sleep(1000); // process pending promises - }) - .then(() => { - assert.lengthOf(rawDataCallbackSpy.args, 1, 'should not forward raw data once disabled'); - }) - .then(() => { - receiverInst.enableRawDataForwarding(); - const socketInfo = createSocketInfo(MockUdpServer, false); // port 6517 - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - clock.clockForward(100, { promisify: true }); - return testUtil.sleep(1000); // process pending promises - }) - .then(() => receiverInst.stop()) - .then(() => { - assert.deepStrictEqual( - rawDataCallbackSpy.args, - [ - [{ - data: Buffer.from('<1>testData\n'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6515', - timestamp: 0, - hrtime: [0, 0] - }], - [{ - data: Buffer.from('<1>testData\n'), - protocol: 'udp', - senderKey: 'udp4-localhost10-6517', - timestamp: 2000, - hrtime: [2, 0] - }] - ] - ); - })); - }); - }); - - describe('"messages" event', () => { - describe('data handling for each protocol', () => { - const testMessage = '<1>testData\n'; - - it('should retrieve data via udp4 socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, false); - getServerMock(MockUdpServer, false).emit('message', testMessage, socketInfo); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual(dataCallbackSpy.args[0], [[testMessage.slice(0, -1)]]); - })); - - it('should retrieve data via udp6 socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, true); - getServerMock(MockUdpServer, true).emit('message', testMessage, socketInfo); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual(dataCallbackSpy.args[0], [[testMessage.slice(0, -1)]]); - })); - - it('should retrieve data via tcp socket', () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockTcpServer); - getServerMock(MockTcpServer).emit('connection', socketInfo); - socketInfo.emit('data', testMessage); - return receiverInst.stop(); - }) - .then(() => { - assert.deepStrictEqual(dataCallbackSpy.args[0], [[testMessage.slice(0, -1)]]); - })); - - it('should retrieve data via all protocol at same time', () => receiverInst.start() - .then(() => { - const socketInfoTcp = createSocketInfo(MockTcpServer); - getServerMock(MockTcpServer).emit('connection', socketInfoTcp); - const socketInfoUdp4 = createSocketInfo(MockUdpServer, false); - const socketInfoUdp6 = createSocketInfo(MockUdpServer, true); - - socketInfoTcp.emit('data', 'start'); - getServerMock(MockUdpServer, false).emit('message', 'start', socketInfoUdp4); - getServerMock(MockUdpServer, true).emit('message', 'start', socketInfoUdp6); - socketInfoTcp.emit('data', 'end\n'); - getServerMock(MockUdpServer, false).emit('message', 'end\n', socketInfoUdp4); - getServerMock(MockUdpServer, true).emit('message', 'end\n', socketInfoUdp6); - - return receiverInst.stop(); - }) - .then(() => { - assert.includeDeepMembers(dataCallbackSpy.args, [ - [['startend']], - [['startend']], - [['startend']] - ]); - })); - }); - - describe('chunked data', () => { - const fetchEvents = () => { - const events = []; - dataCallbackSpy.args.forEach((args) => { - args[0].forEach((arg) => events.push(arg)); - }); - return events; - }; - - messageStreamTestData.dataHandler.forEach((testConf) => { - const separators = JSON.stringify(testConf.chunks).indexOf('{sep}') !== -1 ? ['\n', '\r\n'] : ['']; - separators.forEach((sep) => { - let sepMsg = 'built-in the test new line separator'; - if (sep) { - sepMsg = sep.replace(/\n/g, '\\n').replace(/\r/g, '\\r'); - } - testUtil.getCallableIt(testConf)(`should process data - ${testConf.name} (${sepMsg})`, () => receiverInst.start() - .then(() => { - const socketInfo = createSocketInfo(MockUdpServer, false); - const server = getServerMock(MockUdpServer, false); - testConf.chunks.forEach((chunk) => server.emit('message', chunk.replace(/\{sep\}/g, sep), socketInfo)); - clock.clockForward(100, { promisify: true }); - return testUtil.sleep(testBufferTimeout * 4); // sleep to process pending tasks - }) - .then(() => receiverInst.stop()) - .then(() => { - assert.deepStrictEqual(fetchEvents(), testConf.expectedData); - })); - }); - }); - }); - }); - - describe('.restart()', () => { - it('should recreate all receivers on restart', () => receiverInst.start() - .then(() => { - assert.lengthOf(serverMocks, 3, 'should create 3 sockets'); - assert.strictEqual(getServerMock(MockUdpServer, false).opts.type, 'udp4', 'should create udp4 listener'); - assert.strictEqual(getServerMock(MockUdpServer, true).opts.type, 'udp6', 'should create udp6 listener'); - assert.strictEqual(getServerMock(MockTcpServer).opts.allowHalfOpen, false, 'should create tcp listener'); - return receiverInst.restart(); - }) - .then(() => { - assert.lengthOf(serverMocks, 6, 'should create 3 more sockets'); - assert.lengthOf(serverMocks.filter((mock) => mock.opts.type === 'udp4'), 2, 'should have 2 udp4 sockets'); - assert.lengthOf(serverMocks.filter((mock) => mock.opts.type === 'udp6'), 2, 'should have 2 udp6 sockets'); - assert.lengthOf(serverMocks.filter((mock) => mock.opts.allowHalfOpen === false), 2, 'should have 2 tcp sockets'); - })); - }); - - describe('.start()', () => { - it('should start receivers', () => receiverInst.start() - .then(() => { - assert.lengthOf(serverMocks, 3, 'should create 3 sockets'); - assert.strictEqual(getServerMock(MockUdpServer, false).opts.type, 'udp4', 'should create udp4 listener'); - assert.strictEqual(getServerMock(MockUdpServer, true).opts.type, 'udp6', 'should create udp6 listener'); - assert.strictEqual(getServerMock(MockTcpServer).opts.allowHalfOpen, false, 'should create tcp listener'); - assert.isTrue(receiverInst.isRunning(), 'should be in running state'); - })); - - it('should fail to start', () => { - let firstOnly = false; - onMockCreatedCallback = (serverMock) => { - if (!firstOnly) { - firstOnly = true; - serverMock.on('listenMock', () => { - serverMock.emit('close'); - }); - } else { - serverMock.on('listenMock', () => serverMock.emit('listening')); - } - }; - return assert.isRejected(receiverInst.start(), /socket closed before/) - .then(() => { - assert.isFalse(receiverInst.isRunning(), 'should not be in running state'); - }); - }); - - it('should throw error on unknown protocol', () => { - receiverInst.protocols = ['test']; - return assert.isRejected(receiverInst.start(), /Unknown protocol/); - }); - }); - - describe('.stop()', () => { - it('should be able to stop receiver without active receivers', () => receiverInst.stop() - .then(() => { - assert.isFalse(receiverInst.isRunning(), 'should not be in running state'); - assert.isTrue(receiverInst.hasState(messageStream.MessageStream.STATE.STOPPED), 'should have STOPPED state'); - })); - - it('should be able to stop receiver', () => { - const closeSpy = sinon.spy(); - return receiverInst.start() - .then(() => { - assert.isTrue(receiverInst.hasReceivers(), 'should have receivers started'); - getServerMock(MockTcpServer).on('close', closeSpy); - getServerMock(MockUdpServer, false).on('close', closeSpy); - getServerMock(MockUdpServer, true).on('close', closeSpy); - return receiverInst.stop(); - }) - .then(() => { - assert.strictEqual(closeSpy.callCount, 3, 'should close 3 sockets'); - assert.isFalse(receiverInst.isRunning(), 'should not be in running state'); - assert.isTrue(receiverInst.hasState(messageStream.MessageStream.STATE.STOPPED), 'should have STOPPED state'); - }); - }); - - it('should cleanup all pending tasks', () => { - let socketInfo; - let server; - return receiverInst.start() - .then(() => { - socketInfo = createSocketInfo(MockUdpServer, false); - server = getServerMock(MockUdpServer, false); - server.emit('message', 'test_message="', socketInfo); - - clock.clockForward(100, { promisify: true }); - return testUtil.sleep(testBufferTimeout * 4); // sleep to process pending tasks - }) - .then(() => { - assert.deepStrictEqual(dataCallbackSpy.args, [[['test_message="']]], 'should process incomplete message'); - server.emit('message', 'test_message_2="', socketInfo); - return testUtil.sleep(testBufferTimeout / 2); // sleep to process pending tasks - }) - .then(() => receiverInst.stop()) - .then(() => { - assert.lengthOf(dataCallbackSpy.args, 1, 'should not process second message when stopped'); - }); - }); - }); -}); diff --git a/test/unit/eventListener/networkServiceTests.js b/test/unit/eventListener/networkServiceTests.js index 1d92ff6f..3c587f87 100644 --- a/test/unit/eventListener/networkServiceTests.js +++ b/test/unit/eventListener/networkServiceTests.js @@ -36,6 +36,10 @@ const networkService = sourceCode('src/lib/eventListener/networkService'); moduleCache.remember(); describe('Event Listener / TCP and UDP Services', () => { + before(() => { + moduleCache.restore(); + }); + let coreStub; let dataReceivers; let receiverInst; @@ -98,10 +102,6 @@ describe('Event Listener / TCP and UDP Services', () => { assert.notIncludeMatch(coreStub.logger.messages[lvl], msg); } - before(() => { - moduleCache.restore(); - }); - beforeEach(() => { sinon.stub(constants.EVENT_LISTENER, 'NETWORK_SERVICE_RESTART_DELAY').value(1); coreStub = stubs.default.coreStub({ @@ -134,6 +134,7 @@ describe('Event Listener / TCP and UDP Services', () => { assert.deepStrictEqual(receiverInst.getRestartOptions(), { delay: 1 }); + assert.isTrue(receiverInst.restartsEnabled); }); }); diff --git a/test/unit/eventListener/parserTests.js b/test/unit/eventListener/parserTests.js index a403a13a..a38dfdb0 100644 --- a/test/unit/eventListener/parserTests.js +++ b/test/unit/eventListener/parserTests.js @@ -16,10 +16,11 @@ 'use strict'; -/* eslint-disable import/order */ +/* eslint-disable import/order, no-restricted-properties, prefer-template, no-bitwise */ const moduleCache = require('../shared/restoreCache')(); const sinon = require('sinon'); +const StringDecoder = require('string_decoder').StringDecoder; const assert = require('../shared/assert'); const parserTestData = require('./data/parserTestsData'); @@ -52,12 +53,17 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(p._buffers.allocated, 1000); assert.deepStrictEqual(p.maxSize, 16 * 1024); assert.deepStrictEqual(p.freeBuffers, 16 * 1024 + 1); + assert.isTrue(p.featKVPairs); + assert.isTrue(p.featF5EvtCategory); + assert.deepStrictEqual(p.features & Parser.FEAT_ALL, Parser.FEAT_ALL); + assert.deepStrictEqual(p.maxKVPairs, 2000); }); it('should use non-default values', () => { const p = new Parser(callback, { bufferPrealloc: 10, bufferSize: 11, + maxKVPairs: 0, maxSize: 100, mode: 'string' }); @@ -65,25 +71,118 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(p._buffers.allocated, 10); assert.deepStrictEqual(p.maxSize, 100); assert.deepStrictEqual(p.freeBuffers, 11); + assert.isFalse(p.featKVPairs); + assert.isTrue(p.featF5EvtCategory); + assert.deepStrictEqual(p.features & Parser.FEAT_ALL, Parser.FEAT_ALL); + assert.deepStrictEqual(p.maxKVPairs, 0); + }); + + it('should use non-default values (example 2)', () => { + const p = new Parser(callback, { + bufferPrealloc: 10, + bufferSize: 11, + features: Parser.FEAT_NONE, + maxSize: 100, + mode: 'string' + }); + assert.deepStrictEqual(p.mode, 'string'); + assert.deepStrictEqual(p._buffers.allocated, 10); + assert.deepStrictEqual(p.maxSize, 100); + assert.deepStrictEqual(p.freeBuffers, 11); + assert.isFalse(p.featKVPairs); + assert.isFalse(p.featF5EvtCategory); + assert.deepStrictEqual(p.features & Parser.FEAT_ALL, Parser.FEAT_NONE); + assert.deepStrictEqual(p.maxKVPairs, 2000); + }); + + it('should use non-default values (example 3)', () => { + const p = new Parser(callback, { + bufferPrealloc: 10, + bufferSize: 11, + features: Parser.FEAT_KV_PAIRS, + maxSize: 100, + mode: 'string' + }); + assert.deepStrictEqual(p.mode, 'string'); + assert.deepStrictEqual(p._buffers.allocated, 10); + assert.deepStrictEqual(p.maxSize, 100); + assert.deepStrictEqual(p.freeBuffers, 11); + assert.isTrue(p.featKVPairs); + assert.isFalse(p.featF5EvtCategory); + assert.deepStrictEqual(p.features & Parser.FEAT_ALL, Parser.FEAT_KV_PAIRS); + assert.deepStrictEqual(p.maxKVPairs, 2000); }); }); - describe('.process()', () => { - ['buffer', 'string'].forEach((mode) => { - describe(`mode = ${mode}`, () => { + describe('data processing', () => { + const inputModes = [ + 'regular', + 'byHalf' + ]; + const featMap = { + FEAT_KV_PAIRS: Parser.FEAT_KV_PAIRS, + FEAT_F5_EVT_CAT: Parser.FEAT_F5_EVT_CAT, + FEAT_ALL: Parser.FEAT_ALL, + FEAT_NONE: Parser.FEAT_NONE + }; + const modes = [ + 'buffer', + 'string' + ]; + + function checkCharCodesKVPairs(results, mayHaveKeyValuePairs) { + mayHaveKeyValuePairs.forEach((symb, idx) => { + if (symb) { + for (let i = 1; i < symb.length; i += 2) { + assert.deepStrictEqual( + results[idx][symb[i]], + i & 0b1 ? ',' : '=', + 'should match char codes at particular position' + ); + } + } + }); + } + + function checkCharCodesF5Telemetry(results, mayHaveF5EventCategory) { + mayHaveF5EventCategory.forEach((offset, idx) => { + if (offset) { + assert.deepStrictEqual(results[idx].slice(offset - 1, offset + 1), '$F', 'should match char codes at particular position'); + } + }); + } + + testUtil.product(inputModes, Object.keys(featMap), modes).forEach((product) => { + const feature = product[1]; + const inputMode = product[0]; + const mode = product[2]; + + describe(`mode = ${mode}, features = ${feature}, input = ${inputMode}`, () => { let callback; let makeInput; let parser; let results; + let mayHaveKeyValuePairs; + let mayHaveF5EventCategory; + let stringDecoder; + + const defaultStringDecoder = new StringDecoder('utf8'); if (mode === 'string') { - callback = (chunks) => { + callback = (chunks, hasKVPair, hasEvtCat) => { + mayHaveF5EventCategory.push(hasEvtCat); + mayHaveKeyValuePairs.push(hasKVPair); results.push(chunks.length === 1 ? chunks[0] : chunks.reduce((a, v) => a + v, '')); }; makeInput = (chunk) => [chunk, Buffer.from(chunk).length, chunk.length]; } else { - callback = (chunks) => { - results.push(chunks.length === 1 ? chunks[0].toString() : chunks.reduce((a, v) => a + v.toString(), '')); + callback = (chunks, hasKVPair, hasEvtCat) => { + mayHaveF5EventCategory.push(hasEvtCat); + mayHaveKeyValuePairs.push(hasKVPair); + + chunks = chunks.map((c) => stringDecoder.write(c)); + chunks.push(stringDecoder.end()); + results.push(chunks.join('')); }; makeInput = (chunk) => { chunk = Buffer.from(chunk); @@ -92,15 +191,21 @@ describe('Event Listener / Parser', () => { } beforeEach(() => { - parser = new Parser(callback, { mode }); + mayHaveF5EventCategory = []; + mayHaveKeyValuePairs = []; + parser = new Parser(callback, { mode, features: featMap[feature] }); results = []; + + if (stringDecoder !== defaultStringDecoder) { + stringDecoder = defaultStringDecoder; + } }); describe('Data sets', () => { parserTestData.process.forEach((testConf) => { const separators = JSON.stringify(testConf.chunks).indexOf('{sep}') !== -1 ? ['\n', '\r\n'] : ['']; separators.forEach((sep) => { - let sepMsg = 'built-in the test new line separator'; + let sepMsg = 'built in the test new line separator'; if (sep) { sepMsg = sep.replace(/\n/g, '\\n').replace(/\r/g, '\\r'); } @@ -109,13 +214,31 @@ describe('Event Listener / Parser', () => { let totalBytes = 0; let totalLength = 0; - testConf.chunks.forEach((chunk) => { - const payload = makeInput(chunk.replace(/\{sep\}/g, sep)); - totalBuffers += 1; - totalBytes += payload[1]; - totalLength += payload[2]; - parser.push(payload); - }); + if (inputMode === 'regular') { + testConf.chunks.forEach((chunk) => { + const payload = makeInput(chunk.replace(/\{sep\}/g, sep)); + totalBuffers += 1; + totalBytes += payload[1]; + totalLength += payload[2]; + parser.push(payload); + }); + } else { + testConf.chunks.forEach((chunk) => { + chunk = chunk.replace(/\{sep\}/g, sep); + const mid = (chunk.length / 2) >> 0; + const payloads = []; + if (mid) { + payloads.push(makeInput(chunk.slice(0, mid))); + } + payloads.push(makeInput(chunk.slice(mid))); + payloads.forEach((payload) => { + totalBuffers += 1; + totalBytes += payload[1]; + totalLength += payload[2]; + parser.push(payload); + }); + }); + } assert.deepStrictEqual(parser.buffers, totalBuffers, 'should match expected number of pending buffers'); assert.deepStrictEqual(parser.bytes, totalBytes, 'should match expected number of pending bytes'); @@ -127,6 +250,24 @@ describe('Event Listener / Parser', () => { testConf.expectedData.map((d) => d.replace(/\{sep\}/g, sep)) ); + if (parser.featKVPairs && testConf.mayHaveKeyValuePairs) { + assert.deepStrictEqual(mayHaveKeyValuePairs.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveKeyValuePairs, testConf.mayHaveKeyValuePairs, 'should match expected key-value pairs'); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + } else if (!parser.featKVPairs) { + assert.deepStrictEqual(mayHaveKeyValuePairs.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveKeyValuePairs, (new Array(results.length)).fill(null), 'should match expected key-value pairs'); + } + + if (parser.featF5EvtCategory && testConf.mayHaveF5EventCategory) { + assert.deepStrictEqual(mayHaveF5EventCategory.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveF5EventCategory, testConf.mayHaveF5EventCategory, 'should match expected event categories'); + checkCharCodesF5Telemetry(results, mayHaveF5EventCategory); + } else if (!parser.featF5EvtCategory) { + assert.deepStrictEqual(mayHaveF5EventCategory.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveF5EventCategory, (new Array(results.length)).fill(0), 'should match expected event categories'); + } + assert.deepStrictEqual(parser.buffers, 0, 'should have no buffers left'); assert.deepStrictEqual(parser.bytes, 0, 'should have no bytes left'); assert.deepStrictEqual(parser.length, 0, 'should have no bytes/chars left'); @@ -135,6 +276,65 @@ describe('Event Listener / Parser', () => { }); }); + if (mode === 'string') { + it('should process UTF-8 broken into parts', () => { + parser.push(['ключ=значение,$F5TelemetryEventCategory=категор\nия', 51]); + parser.process(true); + + assert.deepStrictEqual(results, [ + 'ключ=значение,$F5TelemetryEventCategory=категор', + 'ия' + ]); + assert.deepStrictEqual( + mayHaveKeyValuePairs, + parser.featKVPairs + ? [new Uint16Array([4, 13, 39]), null] + : [null, null] + ); + + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual( + mayHaveF5EventCategory, + parser.featF5EvtCategory ? [15, 0] : [0, 0] + ); + + checkCharCodesF5Telemetry(results, mayHaveF5EventCategory); + }); + } + + if (mode === 'buffer') { + it('should process UTF-8 broken into parts', () => { + Buffer.from('ключ=значение,$F5TelemetryEventCategory=категор\nия', 'utf-8') + .forEach((byte) => parser.push([Buffer.from([byte]), 1])); + + parser.process(true); + + assert.deepStrictEqual(results, [ + 'ключ=значение,$F5TelemetryEventCategory=категор', + 'ия' + ]); + + if (parser.featKVPairs) { + assert.notDeepEqual(mayHaveKeyValuePairs, [ + new Uint16Array([4, 13, 39]), + null + ], 'UTF-8 parsing fixed???????!!! HURAY'); + } else { + assert.deepStrictEqual(mayHaveKeyValuePairs, [null, null]); + } + + if (parser.featF5EvtCategory) { + assert.notDeepEqual(mayHaveF5EventCategory, [ + 15, + 0 + ], 'UTF-8 parsing fixed???????!!! HURAY'); + } else { + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0]); + } + }); + } + it('should not emit data on incomplete message (single buffer)', () => { parser.push(makeInput('firstLine\nsecondLineIncomple="value')); @@ -162,6 +362,16 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(parser.buffers, 0, 'should have no buffers left'); assert.deepStrictEqual(parser.bytes, 0, 'should have no bytes left'); assert.deepStrictEqual(parser.length, 0, 'should have no bytes/chars left'); + + assert.deepStrictEqual( + mayHaveKeyValuePairs, + parser.featKVPairs + ? [null, new Uint16Array([18])] + : [null, null] + ); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0]); }); it('should not emit data on incomplete message (multiple buffers)', () => { @@ -192,6 +402,16 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(parser.buffers, 0, 'should have no buffers left'); assert.deepStrictEqual(parser.bytes, 0, 'should have no bytes left'); assert.deepStrictEqual(parser.length, 0, 'should have no bytes/chars left'); + + assert.deepStrictEqual( + mayHaveKeyValuePairs, + parser.featKVPairs + ? [null, new Uint16Array([18])] + : [null, null] + ); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0]); }); it('should not emit data on incomplete message (multiple buffers)', () => { @@ -223,6 +443,16 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(parser.buffers, 0, 'should have no buffers left'); assert.deepStrictEqual(parser.bytes, 0, 'should have no bytes left'); assert.deepStrictEqual(parser.length, 0, 'should have no bytes/chars left'); + + assert.deepStrictEqual( + mayHaveKeyValuePairs, + parser.featKVPairs + ? [null, new Uint16Array([18])] + : [null, null] + ); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0]); }); it('should process empty line', () => { @@ -370,6 +600,212 @@ describe('Event Listener / Parser', () => { assert.deepStrictEqual(parser.freeBuffers, 5); }); }); + + describe('.erase()', () => { + it('should erase state', () => { + parser = new Parser(callback, { + bufferSize: 5, + mode, + maxSize: 100 + }); + + for (let i = 0; i < 2; i += 1) { + parser.push(makeInput('li')); + parser.push(makeInput(`ne #${i}\n`)); + } + + assert.deepStrictEqual(parser.buffers, 4); + assert.deepStrictEqual(parser.bytes, 16); + + parser.erase(); + + assert.deepStrictEqual(parser.buffers, 0); + assert.deepStrictEqual(parser.bytes, 0); + }); + }); + + it('should use non-default values (Uint32Array)', () => { + const p = new Parser(callback, { + maxSize: Math.pow(2, 16) + 100, + mode + }); + assert.deepStrictEqual(p.maxSize, Math.pow(2, 16) + 100); + + p.push(makeInput( + 'something=test' + + '\\'.repeat(64 * 1024) + + ',something2=test2,something3=test3\\\\\\\n' + )); + + assert.deepStrictEqual(p.bytes, 65588); + assert.deepStrictEqual(p.buffers, 1); + + p.process(); + + assert.deepStrictEqual(mayHaveKeyValuePairs, [ + new Uint32Array([9, 65550, 65561, 65567, 65578]) + ]); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0]); + }); + + it('should use custom maxSize', () => { + const p = new Parser(callback, { + maxSize: 100, + mode + }); + assert.deepStrictEqual(p.maxSize, 100); + + const str = 'something=testtest1,'; + + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + + assert.deepStrictEqual(p.bytes, str.length * 5 * 6); + assert.deepStrictEqual(p.buffers, 6); + + p.process(true); + assert.isFalse(parser.isReady(), 'should return false when no data'); + + assert.deepStrictEqual(results, [ + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5) + ]); + + assert.deepStrictEqual(mayHaveKeyValuePairs, [ + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]), + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]), + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]), + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]), + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]), + new Uint16Array([ + 9, 19, 29, 39, 49, 59, 69, 79, 89, 99 + ]) + ]); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0, 0, 0, 0, 0]); + }); + + it('should use custom maxKVPairs', () => { + const p = new Parser(callback, { + maxKVPairs: 2, + maxSize: 100, + mode + }); + assert.deepStrictEqual(p.maxKVPairs, 2); + assert.deepStrictEqual(p.maxSize, 100); + + const str = 'something=testtest1,'; + + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + p.push(makeInput(str.repeat(5))); + + assert.deepStrictEqual(p.bytes, str.length * 5 * 6); + assert.deepStrictEqual(p.buffers, 6); + + p.process(true); + assert.isFalse(parser.isReady(), 'should return false when no data'); + + assert.deepStrictEqual(results, [ + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5), + str.repeat(5) + ]); + + assert.deepStrictEqual(mayHaveKeyValuePairs, [ + new Uint16Array([ + 9, 19, 29, 39 + ]), + new Uint16Array([ + 9, 19, 29, 39 + ]), + new Uint16Array([ + 9, 19, 29, 39 + ]), + new Uint16Array([ + 9, 19, 29, 39 + ]), + new Uint16Array([ + 9, 19, 29, 39 + ]), + new Uint16Array([ + 9, 19, 29, 39 + ]) + ]); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0, 0, 0, 0, 0]); + }); + + it('should correctly process extended ASCII', () => { + stringDecoder = { + write(chunk) { + return chunk.toString('binary'); + }, + end() { + return ''; + } + }; + + const str = '"key=value,key=value'; + + const asciiTable = 255; + const buffer = Buffer.alloc(asciiTable + str.length); + for (let i = 0; i < asciiTable; i += 1) { + buffer[i] = i; + } + for (let i = asciiTable; i < asciiTable + str.length; i += 1) { + buffer[i] = str.charCodeAt(i - asciiTable); + } + const expected = buffer.toString('binary'); + + parser.push([ + mode === 'string' ? buffer.toString('binary') : buffer, + buffer.length + ]); + parser.process(true); + assert.isFalse(parser.isReady(), 'should return false when no data'); + + assert.deepStrictEqual(results, [ + expected.slice(0, 10), + expected.slice(11) + ]); + + assert.deepStrictEqual(mayHaveKeyValuePairs, [ + null, + parser.featKVPairs ? new Uint16Array([248, 254, 258]) : null + ]); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + + assert.deepStrictEqual(mayHaveF5EventCategory, [0, 0]); + }); }); }); }); diff --git a/test/unit/eventListener/streamTests.js b/test/unit/eventListener/streamTests.js index 7b0419f5..fcea24b8 100644 --- a/test/unit/eventListener/streamTests.js +++ b/test/unit/eventListener/streamTests.js @@ -16,10 +16,11 @@ 'use strict'; -/* eslint-disable import/order */ +/* eslint-disable import/order, no-bitwise */ const moduleCache = require('../shared/restoreCache')(); const sinon = require('sinon'); +const StringDecoder = require('string_decoder').StringDecoder; const assert = require('../shared/assert'); const parserTestData = require('./data/parserTestsData'); @@ -78,36 +79,89 @@ describe('Event Listener / Stream', () => { }); }); - describe('.proccess()', () => { - ['buffer', 'string'].forEach((mode) => { - describe(`mode = ${mode}`, () => { + describe('data processing', () => { + const inputModes = [ + 'regular', + 'byHalf' + ]; + const modes = [ + 'buffer', + 'string' + ]; + + function checkCharCodesKVPairs(results, mayHaveKeyValuePairs) { + mayHaveKeyValuePairs.forEach((symb, idx) => { + if (symb) { + for (let i = 1; i < symb.length; i += 2) { + assert.deepStrictEqual( + results[idx][symb[i]], + i & 0b1 ? ',' : '=', + 'should match char codes at particular position' + ); + } + } + }); + } + + function checkCharCodesF5Telemetry(results, mayHaveF5EventCategory) { + mayHaveF5EventCategory.forEach((offset, idx) => { + if (offset) { + assert.deepStrictEqual(results[idx].slice(offset - 1, offset + 1), '$F', 'should match char codes at particular position'); + } + }); + } + testUtil.product(inputModes, modes).forEach((product) => { + const inputMode = product[0]; + const mode = product[1]; + + describe(`mode = ${mode}, input = ${inputMode}`, () => { let callback; - let makeInput; let parser; let stream; let results; + let mayHaveKeyValuePairs; + let mayHaveF5EventCategory; + let stringDecoder; + + const defaultStringDecoder = new StringDecoder('utf8'); if (mode === 'string') { - callback = (chunks) => { + callback = (chunks, hasKVPair, hasEvtCat) => { + mayHaveF5EventCategory.push(hasEvtCat); + mayHaveKeyValuePairs.push(hasKVPair); results.push(chunks.length === 1 ? chunks[0] : chunks.reduce((a, v) => a + v, '')); }; - makeInput = (chunk) => [chunk, Buffer.from(chunk).length, chunk.length]; } else { - callback = (chunks) => { - results.push(chunks.length === 1 ? chunks[0].toString() : chunks.reduce((a, v) => a + v.toString(), '')); - }; - makeInput = (chunk) => { - chunk = Buffer.from(chunk); - return [chunk, chunk.length, chunk.length]; + callback = (chunks, hasKVPair, hasEvtCat) => { + mayHaveF5EventCategory.push(hasEvtCat); + mayHaveKeyValuePairs.push(hasKVPair); + if (chunks.length === 1) { + results.push(chunks[0].toString()); + } else { + chunks = chunks.map((c) => defaultStringDecoder.write(c)); + chunks.push(defaultStringDecoder.end()); + results.push(chunks.join('')); + } }; } + const makeInput = (chunk) => { + const buf = Buffer.from(chunk); + return [buf, buf.length, (mode === 'string') ? chunk.length : buf.length]; + }; + const makeInput2 = (chunk) => makeInput(chunk)[0]; beforeEach(() => { + mayHaveF5EventCategory = []; + mayHaveKeyValuePairs = []; parser = new Parser(callback, { mode }); - stream = new Stream(parser); results = []; + stream = new Stream(parser); + + if (stringDecoder !== defaultStringDecoder) { + stringDecoder = defaultStringDecoder; + } }); describe('Data sets', () => { @@ -124,16 +178,37 @@ describe('Event Listener / Stream', () => { let totalLength = 0; let lastTimePush; - testConf.chunks.forEach((chunk) => { - const payload = makeInput(chunk.replace(/\{sep\}/g, sep)); - totalBuffers += 1; - totalBytes += payload[1]; - totalLength += payload[2]; - - lastTimePush = stream.lastPushTimeDelta(); - stream.push(payload[0]); - assert.notDeepEqual(stream.lastPushTimeDelta(), lastTimePush); - }); + if (inputMode === 'regular') { + testConf.chunks.forEach((chunk) => { + const payload = makeInput(chunk.replace(/\{sep\}/g, sep)); + totalBuffers += 1; + totalBytes += payload[1]; + totalLength += payload[2]; + + lastTimePush = stream.lastPushTimeDelta(); + stream.push(payload[0]); + assert.notDeepEqual(stream.lastPushTimeDelta(), lastTimePush); + }); + } else { + testConf.chunks.forEach((chunk) => { + chunk = chunk.replace(/\{sep\}/g, sep); + const mid = (chunk.length / 2) >> 0; + const payloads = []; + if (mid) { + payloads.push(makeInput(chunk.slice(0, mid))); + } + payloads.push(makeInput(chunk.slice(mid))); + payloads.forEach((payload) => { + totalBuffers += 1; + totalBytes += payload[1]; + totalLength += payload[2]; + + lastTimePush = stream.lastPushTimeDelta(); + stream.push(payload[0]); + assert.notDeepEqual(stream.lastPushTimeDelta(), lastTimePush); + }); + }); + } assert.deepStrictEqual(stream.buffers, totalBuffers, 'should match expected number of pending buffers'); assert.deepStrictEqual(stream.bytes, totalBytes, 'should match expected number of pending bytes'); @@ -148,6 +223,18 @@ describe('Event Listener / Stream', () => { ); assert.notDeepEqual(stream.lastProcessTimeDelta(), lastTimeProcess); + if (testConf.mayHaveKeyValuePairs) { + assert.deepStrictEqual(mayHaveKeyValuePairs.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveKeyValuePairs, testConf.mayHaveKeyValuePairs, 'should match expected key-value pairs'); + checkCharCodesKVPairs(results, mayHaveKeyValuePairs); + } + + if (testConf.mayHaveF5EventCategory) { + assert.deepStrictEqual(mayHaveF5EventCategory.length, results.length, 'should match length of results'); + assert.deepStrictEqual(mayHaveF5EventCategory, testConf.mayHaveF5EventCategory, 'should match expected event categories'); + checkCharCodesF5Telemetry(results, mayHaveF5EventCategory); + } + assert.deepStrictEqual(stream.buffers, 0, 'should have no buffers left'); assert.deepStrictEqual(stream.bytes, 0, 'should have no bytes left'); assert.deepStrictEqual(stream.length, 0, 'should have no bytes/chars left'); @@ -206,7 +293,7 @@ describe('Event Listener / Stream', () => { assert.deepStrictEqual(stream.length, 0, 'should have no bytes/chars left'); }); - it('should flush parser if there is no free spots in parser\'s buffer ', () => { + it('should flush parser if there is no free spots in parser\'s buffer', () => { parser = new Parser(callback, { mode, bufferSize: 2 }); stream = new Stream(parser); @@ -287,6 +374,43 @@ describe('Event Listener / Stream', () => { assert.deepStrictEqual(results, ['testline']); }); + it('should erase all data', () => { + parser = new Parser(callback, { mode, bufferSize: 2 }); + stream = new Stream(parser); + + stream.push(makeInput2('inc')); + stream.push(makeInput2('omp')); + stream.push(makeInput2('let')); + stream.push(makeInput2('e m')); + stream.push(makeInput2('ess')); + stream.push(makeInput2('age')); + stream.push(makeInput2('\n')); + + assert.deepStrictEqual(stream.buffers, 7, 'should match expected number of pending buffers'); + assert.deepStrictEqual(stream.bytes, 19, 'should match expected number of pending bytes'); + assert.deepStrictEqual(stream.length, 19, 'should match expected number of pending bytes/chars'); + + assert.isTrue(stream.process(1e9)[0], 'should have data to process'); + assert.deepStrictEqual(results, []); + + stream.push(makeInput2('inc')); + stream.push(makeInput2('omp')); + stream.push(makeInput2('let')); + stream.push(makeInput2('e m')); + stream.push(makeInput2('ess')); + stream.push(makeInput2('age')); + stream.push(makeInput2('\n')); + + assert.deepStrictEqual(stream.buffers, 14, 'should match expected number of pending buffers'); + assert.deepStrictEqual(stream.bytes, 38, 'should match expected number of pending bytes'); + assert.deepStrictEqual(stream.length, 38, 'should match expected number of pending bytes/chars'); + + stream.erase(); + assert.deepStrictEqual(stream.buffers, 0, 'should match expected number of pending buffers'); + assert.deepStrictEqual(stream.bytes, 0, 'should match expected number of pending bytes'); + assert.deepStrictEqual(stream.length, 0, 'should match expected number of pending bytes/chars'); + }); + describe('"drop" strategy', () => { it('should drop new data when limits applied', () => { stream = new Stream(parser, { strategy: 'drop' }); diff --git a/test/unit/loggerTests.js b/test/unit/loggerTests.js index e4795c67..c05de797 100644 --- a/test/unit/loggerTests.js +++ b/test/unit/loggerTests.js @@ -118,7 +118,7 @@ describe('Logger', () => { const msg = `this is a ${logType} message`; logger[logType](msg); - if (logger.getLevel(logType) >= logger.getLevel()) { + if (logger.isLevelAllowed(logType)) { assert.lengthOf(coreStub.logger.messages[logType], 1); // check it contains the message - no exact match as prefix [telemetry] will be added assert.include(coreStub.logger.messages[logType][0], msg); diff --git a/test/unit/resourceMonitor/memoryMonitorTests.js b/test/unit/resourceMonitor/memoryMonitorTests.js new file mode 100644 index 00000000..b1488872 --- /dev/null +++ b/test/unit/resourceMonitor/memoryMonitorTests.js @@ -0,0 +1,594 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable import/order, no-use-before-define */ +const moduleCache = require('../shared/restoreCache')(); + +const sinon = require('sinon'); + +const assert = require('../shared/assert'); +const sourceCode = require('../shared/sourceCode'); +const stubs = require('../shared/stubs'); + +const MemoryMonitor = sourceCode('src/lib/resourceMonitor/memoryMonitor'); + +moduleCache.remember(); + +describe('Resource Monitor / Memory Monitor', () => { + let coreStub; + let results; + let memMon; + + const callback = (memCheck) => results.push(memCheck); + + before(() => { + moduleCache.restore(); + }); + + beforeEach(() => { + coreStub = stubs.default.coreStub({ logger: true, resourceMonitorUtils: true, utilMisc: true }); + coreStub.resourceMonitorUtils.osAvailableMem.restore(); + results = []; + global.gc = undefined; + }); + + afterEach(() => { + sinon.restore(); + return memMon.destroy(); + }); + + describe('.constructor()', () => { + it('should use default parameters', () => { + memMon = new MemoryMonitor(callback); + + assert.deepStrictEqual(memMon.freeMemoryLimit, 30); + assert.deepStrictEqual(memMon.provisioned, 4096); + assert.deepStrictEqual(memMon.releaseThreshold, 3317); + assert.deepStrictEqual(memMon.releasePercent, 90); + assert.deepStrictEqual(memMon.threshold, 3686); + assert.deepStrictEqual(memMon.thresholdPercent, 90); + assert.isFalse(memMon.gcEnabled); + }); + + it('should use non-default parameters', () => { + memMon = new MemoryMonitor(callback, { + freeMemoryLimit: 10, + provisioned: 100, + releasePercent: 50, + thresholdPercent: 50 + }); + + assert.deepStrictEqual(memMon.freeMemoryLimit, 10); + assert.deepStrictEqual(memMon.provisioned, 100); + assert.deepStrictEqual(memMon.releaseThreshold, 25); + assert.deepStrictEqual(memMon.releasePercent, 50); + assert.deepStrictEqual(memMon.threshold, 50); + assert.deepStrictEqual(memMon.thresholdPercent, 50); + assert.isFalse(memMon.gcEnabled); + }); + + it('should detect exposed GC', () => { + global.gc = () => {}; + memMon = new MemoryMonitor(callback); + + assert.deepStrictEqual(memMon.freeMemoryLimit, 30); + assert.deepStrictEqual(memMon.provisioned, 4096); + assert.deepStrictEqual(memMon.releaseThreshold, 3317); + assert.deepStrictEqual(memMon.releasePercent, 90); + assert.deepStrictEqual(memMon.threshold, 3686); + assert.deepStrictEqual(memMon.thresholdPercent, 90); + assert.isTrue(memMon.gcEnabled); + }); + }); + + describe('service activity', () => { + let clock; + let fsUtil; + + beforeEach(() => { + fsUtil = { + readFileSync: () => PROC_MEM_INFO_OUTPUT + }; + + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 10 * 1024 * 1024, + heapTotal: 20 * 1024 * 1024, + heapUsed: 15 * 1024 * 1024, + rss: 17 * 1024 * 1024 + }); + + clock = stubs.clock(); + }); + + it('should provide memory usage stats (simple check, default intervals - 1.5 sec)', () => { + memMon = new MemoryMonitor(callback, { + fs: fsUtil + }); + return Promise.all([ + memMon.start(), + clock.clockForward(1000, { promisify: true, repeat: 2, delay: 50 }) + ]) + .then(() => { + assert.lengthOf(results, 1); + assert.deepNestedInclude(results[0], { + hrtimestamp: 1500000000, + interval: { min: 0, max: 50, interval: 1.5 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 10, + free: 2048, + freeLimit: 30, + freeUtilizationPercent: 0, + heapTotal: 20, + heapUsed: 15, + provisioned: 4096, + release: 3317, + releasePercent: 90, + rss: 17, + threshold: 3686, + thresholdPercent: 90, + thresholdUtilzationPercent: 0.4612045577862181, + utilization: 17, + utilizationPercent: 0.4150390625 + } + }); + }); + }); + + it('should provide memory usage stats (non default intervals)', () => { + memMon = new MemoryMonitor(callback, { + freeMemoryLimit: 10, + fs: fsUtil, + intervals: [ + { usage: 50, interval: 0.5 }, + { usage: 90, interval: 0.3 }, + { usage: 100, interval: 0.1 } + ], + provisioned: 250, + thresholdPercent: 80 + }); + return Promise.all([ + memMon.start(), + clock.clockForward(300, { promisify: true, repeat: 2, delay: 10 }) + ]) + .then(() => { + assert.lengthOf(results, 1); + assert.deepNestedInclude(results[0], { + interval: { min: 0, max: 50, interval: 0.5 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 10, + free: 2048, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 8.5, + utilization: 17, + utilizationPercent: 6.800000000000001 + } + }); + + fsUtil.readFileSync = () => 'MemAvailable: 9000 kB'; + }) + .then(() => clock.clockForward(300, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 2); + assert.deepNestedInclude(results[1], { + interval: { min: 100, max: 9007199254740991, interval: 1 }, + thresholdStatus: 'MEMORY_USAGE_ABOVE_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 10, + free: 8.7890625, + freeLimit: 10, + freeUtilizationPercent: 112.109375, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 8.5, + utilization: 17, + utilizationPercent: 6.800000000000001 + } + }); + + fsUtil.readFileSync = () => PROC_MEM_INFO_OUTPUT; + }) + .then(() => clock.clockForward(600, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 3); + assert.deepNestedInclude(results[2], { + interval: { min: 0, max: 50, interval: 0.5 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 10, + free: 2048, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 8.5, + utilization: 17, + utilizationPercent: 6.800000000000001 + } + }); + }) + .then(() => clock.clockForward(75, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 3); + fsUtil.readFileSync = () => 'MemFree: 9000 kB'; + }) + .then(() => clock.clockForward(300, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 4); + assert.deepNestedInclude(results[3], { + interval: { min: 0, max: 50, interval: 0.5 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 10, + free: -1, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 8.5, + utilization: 17, + utilizationPercent: 6.800000000000001 + } + }); + + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 25 * 1024 * 1024, + heapTotal: 20 * 1024 * 1024, + heapUsed: 15 * 1024 * 1024, + rss: 17 * 1024 * 1024 + }); + }) + .then(() => clock.clockForward(300, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 5); + assert.deepNestedInclude(results[4], { + interval: { min: 0, max: 50, interval: 0.5 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_GOES_UP', + usage: { + external: 25, + free: -1, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 12.5, + utilization: 25, + utilizationPercent: 10 + } + }); + fsUtil.readFileSync = () => 'MemAvailable: 20000 kB'; + }) + .then(() => clock.clockForward(100, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 6); + assert.deepNestedInclude(results[5], { + interval: { min: 90, max: 100, interval: 0.1 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_NO_CHANGE', + usage: { + external: 25, + free: 19.53125, + freeLimit: 10, + freeUtilizationPercent: 90.46875, + heapTotal: 20, + heapUsed: 15, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 17, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 12.5, + utilization: 25, + utilizationPercent: 10 + } + }); + fsUtil.readFileSync = () => 'MemAvai: 20000 kB'; + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 220 * 1024 * 1024, + heapTotal: 220 * 1024 * 1024, + heapUsed: 220 * 1024 * 1024, + rss: 220 * 1024 * 1024 + }); + }) + .then(() => clock.clockForward(100, { promisify: true, repeat: 2, delay: 10 })) + .then(() => { + assert.lengthOf(results, 7); + assert.deepNestedInclude(results[6], { + interval: { min: 100, max: 9007199254740991, interval: 1 }, + thresholdStatus: 'MEMORY_USAGE_ABOVE_THRESHOLD', + trend: 'MEMORY_USAGE_GOES_UP', + usage: { + external: 220, + free: -1, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 220, + heapUsed: 220, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 220, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 110.00000000000001, + utilization: 220, + utilizationPercent: 88 + } + }); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 190 * 1024 * 1024, + heapTotal: 190 * 1024 * 1024, + heapUsed: 190 * 1024 * 1024, + rss: 190 * 1024 * 1024 + }); + }) + .then(() => clock.clockForward(100, { promisify: true, repeat: 10, delay: 10 })) + .then(() => { + assert.lengthOf(results, 8); + assert.deepNestedInclude(results[7], { + interval: { min: 100, max: 9007199254740991, interval: 1 }, + thresholdStatus: 'MEMORY_USAGE_ABOVE_THRESHOLD', + trend: 'MEMORY_USAGE_GOES_DOWN', + usage: { + external: 190, + free: -1, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 190, + heapUsed: 190, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 190, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 95, + utilization: 190, + utilizationPercent: 76 + } + }); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 100 * 1024 * 1024, + heapTotal: 100 * 1024 * 1024, + heapUsed: 100 * 1024 * 1024, + rss: 100 * 1024 * 1024 + }); + }) + .then(() => clock.clockForward(100, { promisify: true, repeat: 12, delay: 10 })) + .then(() => { + assert.lengthOf(results, 9); + assert.deepNestedInclude(results[8], { + interval: { min: 50, max: 90, interval: 0.3 }, + thresholdStatus: 'MEMORY_USAGE_BELOW_THRESHOLD', + trend: 'MEMORY_USAGE_GOES_DOWN', + usage: { + external: 100, + free: -1, + freeLimit: 10, + freeUtilizationPercent: 0, + heapTotal: 100, + heapUsed: 100, + provisioned: 250, + release: 180, + releasePercent: 90, + rss: 100, + threshold: 200, + thresholdPercent: 80, + thresholdUtilzationPercent: 50, + utilization: 100, + utilizationPercent: 40 + } + }); + }); + }); + + it('should stop and resume activity', () => { + memMon = new MemoryMonitor(callback, { + fs: fsUtil, + intervals: [ + { usage: 100, interval: 0.1 } + ], + provisioned: 250 + }); + + return Promise.all([ + memMon.start(), + clock.clockForward(25, { promisify: true, repeat: 10, delay: 2 }) + ]) + .then(() => { + assert.lengthOf(results, 2); + return Promise.all([ + memMon.stop(), + clock.clockForward(25, { promisify: true, repeat: 10, delay: 2 }) + ]); + }) + .then(() => { + assert.lengthOf(results, 2); + return Promise.all([ + memMon.restart(), + clock.clockForward(25, { promisify: true, repeat: 10, delay: 2 }) + ]); + }) + .then(() => { + assert.lengthOf(results, 4); + return Promise.all([ + memMon.restart(), + clock.clockForward(25, { promisify: true, repeat: 10, delay: 2 }) + ]); + }) + .then(() => { + assert.lengthOf(results, 6); + }); + }); + + it('should start with appropriate interval according to mem usage', () => { + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 450 * 1024 * 1024, + heapTotal: 20 * 1024 * 1024, + heapUsed: 15 * 1024 * 1024, + rss: 17 * 1024 * 1024 + }); + memMon = new MemoryMonitor(callback, { + fs: fsUtil, + intervals: [ + { usage: 50, interval: 0.5 }, + { usage: 90, interval: 0.1 } + ], + provisioned: 250 + }); + + return Promise.all([ + memMon.start(), + clock.clockForward(250, { promisify: true, repeat: 10, delay: 2 }) + ]) + .then(() => { + assert.lengthOf(results, 2); + }); + }); + + it('should call GC', () => { + global.gc = sinon.spy(); + memMon = new MemoryMonitor(callback, { + fs: fsUtil, + intervals: [ + { usage: 50, interval: 0.5 }, + { usage: 90, interval: 0.1 } + ], + provisioned: 250 + }); + assert.isTrue(memMon.gcEnabled); + + return Promise.all([ + memMon.start(), + clock.clockForward(600, { promisify: true, repeat: 30, delay: 1 }) + ]) + .then(() => { + assert.deepStrictEqual(global.gc.callCount, 0); + return clock.clockForward(600, { promisify: true, repeat: 30, delay: 1 }); + }) + .then(() => { + assert.deepStrictEqual(global.gc.callCount, 0); + return clock.clockForward(60000, { promisify: true, repeat: 2, delay: 1 }); + }) + .then(() => { + assert.deepStrictEqual(global.gc.callCount, 2); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 450 * 1024 * 1024, + heapTotal: 20 * 1024 * 1024, + heapUsed: 15 * 1024 * 1024, + rss: 17 * 1024 * 1024 + }); + return clock.clockForward(1500, { promisify: true, repeat: 10, delay: 1 }); + }) + .then(() => { + assert.isAbove(global.gc.callCount, 15); + assert.isBelow(global.gc.callCount, 21); + }); + }); + }); +}); + +const PROC_MEM_INFO_OUTPUT = ` +MemTotal: 16434000 kB +MemFree: 617812 kB +MemAvailable: 2097152 kB +Buffers: 391188 kB +Cached: 1353364 kB +SwapCached: 0 kB +Active: 2302144 kB +Inactive: 633088 kB +Active(anon): 1429616 kB +Inactive(anon): 3956 kB +Active(file): 872528 kB +Inactive(file): 629132 kB +Unevictable: 244068 kB +Mlocked: 244084 kB +SwapTotal: 1023996 kB +SwapFree: 1023996 kB +Dirty: 796 kB +Writeback: 0 kB +AnonPages: 1458780 kB +Mapped: 367316 kB +Shmem: 74592 kB +Slab: 242204 kB +SReclaimable: 181236 kB +SUnreclaim: 60968 kB +KernelStack: 8464 kB +PageTables: 16128 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 3084708 kB +Committed_AS: 2498648 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 161868 kB +VmallocChunk: 34359341052 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +CmaTotal: 0 kB +CmaFree: 0 kB +HugePages_Total: 6012 +HugePages_Free: 19 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 124800 kB +DirectMap2M: 6166528 kB +DirectMap1G: 12582912 kB +`; diff --git a/test/unit/resourceMonitor/resourceMonitorTests.js b/test/unit/resourceMonitor/resourceMonitorTests.js new file mode 100644 index 00000000..90764a69 --- /dev/null +++ b/test/unit/resourceMonitor/resourceMonitorTests.js @@ -0,0 +1,1448 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable import/order, no-use-before-define */ +const moduleCache = require('../shared/restoreCache')(); + +const sinon = require('sinon'); + +const assert = require('../shared/assert'); +const sourceCode = require('../shared/sourceCode'); +const stubs = require('../shared/stubs'); + +const APP_THRESHOLDS = sourceCode('src/lib/constants').APP_THRESHOLDS; +const configWorker = sourceCode('src/lib/config'); +const persistentStorage = sourceCode('src/lib/persistentStorage'); +const ResourceMonitor = sourceCode('src/lib/resourceMonitor'); + +moduleCache.remember(); + +describe('Resource Monitor / Resource Monitor', () => { + let coreStub; + let resourceMonitor; + let events; + + function eraseEvents() { + events = { + all: [], + check: [], + notOk: [], + ok: [], + stop: [] + }; + } + + before(() => { + moduleCache.restore(); + }); + + beforeEach(() => { + eraseEvents(); + resourceMonitor = new ResourceMonitor(); + resourceMonitor.ee.on(APP_THRESHOLDS.MEMORY.STATE.NOT_OK, (stats) => { + const evt = { name: 'notOk', stats }; + events.notOk.push(evt); + events.all.push(evt); + }); + resourceMonitor.ee.on(APP_THRESHOLDS.MEMORY.STATE.OK, (stats) => { + const evt = { name: 'ok', stats }; + events.ok.push(evt); + events.all.push(evt); + }); + resourceMonitor.ee.on('memoryMonitorStop', () => { + const evt = { name: 'stop' }; + events.stop.push(evt); + events.all.push(evt); + }); + resourceMonitor.ee.on('memoryCheckStatus', (stats) => { + const evt = { name: 'check', stats }; + events.check.push(evt); + events.all.push(evt); + }); + + coreStub = stubs.default.coreStub({}, { logger: { ignoreLevelChange: false } }); + coreStub.persistentStorage.loadData = { config: { } }; + coreStub.utilMisc.generateUuid.numbersOnly = false; + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 10 * 1024 * 1024, + heapTotal: 10 * 1024 * 1024, + heapUsed: 10 * 1024 * 1024, + rss: 10 * 1024 * 1024 + }); + coreStub.resourceMonitorUtils.osAvailableMem.free = 500; + + return configWorker.cleanup() + .then(() => persistentStorage.persistentStorage.load()) + .then(() => resourceMonitor.initialize({ configMgr: configWorker })); + }); + + afterEach(() => resourceMonitor.destroy() + .then(() => { + sinon.restore(); + })); + + describe('constructor', () => { + it('should create a new instance', () => { + assert.isTrue(resourceMonitor.restartsEnabled); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: {}, + enabled: false, + logging: { + freq: 10 * 1000, + lastMessage: 0, + level: 'debug' + } + }); + }); + }); + + describe('lifecycle', () => { + let clock; + + beforeEach(() => { + clock = stubs.clock(); + }); + + it('should ignore changes in configuration when destroyed', () => resourceMonitor.start() + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + return resourceMonitor.destroy(); + }) + .then(() => { + assert.isTrue(resourceMonitor.isDestroyed()); + assert.isFalse(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(resourceMonitor.isDestroyed()); + assert.isFalse(resourceMonitor.isRunning()); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + })); + + it('should start service without a configuration', () => resourceMonitor.start() + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: {}, + enabled: false, + logging: { + freq: 10 * 1000, + lastMessage: 0, + level: 'debug' + } + }); + + return clock.clockForward(30000, { promisify: true, once: true }); + }) + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.isEmpty(events.all); + })); + + it('should not generate log messages when log level is not debug or verbose', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + coreStub.logger.setLogLevel('verbose'); + coreStub.logger.removeAllMessages(); + + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(coreStub.logger.messages.warning); + assert.isEmpty(coreStub.logger.messages.info); + assert.isNotEmpty(coreStub.logger.messages.verbose); + assert.isNotEmpty(coreStub.logger.messages.debug); + assert.includeMatch(coreStub.logger.messages.verbose, /MEMORY_USAGE_BELOW_THRESHOLD/); + assert.includeMatch(coreStub.logger.messages.debug, /MEMORY_USAGE_BELOW_THRESHOLD/); + + coreStub.logger.setLogLevel('debug'); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(coreStub.logger.messages.verbose); + assert.isAbove(coreStub.logger.messages.debug.length, 2); + assert.notIncludeMatch(coreStub.logger.messages.verbose, /MEMORY_USAGE_BELOW_THRESHOLD/); + assert.includeMatch(coreStub.logger.messages.debug, /MEMORY_USAGE_BELOW_THRESHOLD/); + + coreStub.logger.setLogLevel('info'); + coreStub.logger.removeAllMessages(); + + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(coreStub.logger.messages.verbose); + assert.isEmpty(coreStub.logger.messages.debug); + coreStub.logger.setLogLevel('error'); + coreStub.logger.removeAllMessages(); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + logLevel: 'error', + logFrequency: 30 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 11 }); + }) + .then(() => { + assert.lengthOf(coreStub.logger.messages.error, 1); + assert.isEmpty(coreStub.logger.messages.verbose); + assert.isEmpty(coreStub.logger.messages.debug); + + assert.includeMatch(coreStub.logger.messages.error, /MEMORY_USAGE_BELOW_THRESHOLD/); + })); + + it('should work according to declaration content', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry' + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isFalse(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: {}, + enabled: false, + logging: { + freq: 10 * 1000, + lastMessage: 0, + level: 'debug' + } + }); + + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isEmpty(events.all); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isTrue(resourceMonitor.isMemoryMonitorActive); + assert.isNotNull(resourceMonitor.memoryState); + assert.isAbove(events.check.length, 15); + assert.deepStrictEqual( + events.check[events.check.length - 1].stats, + resourceMonitor.memoryState + ); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 4096, + releasePercent: 90, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 85501, + level: 'debug' + } + }); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry' + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.lengthOf(events.stop, 1); + + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isEmpty(events.all); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + })); + + it('should use `warning` level when status changed', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isNotEmpty(events.ok); + assert.isEmpty(events.notOk); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + + coreStub.logger.setLogLevel('info'); + coreStub.logger.removeAllMessages(); + eraseEvents(); + + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 10000 * 1024 * 1024, + heapTotal: 10000 * 1024 * 1024, + heapUsed: 10000 * 1024 * 1024, + rss: 10000 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.ok); + assert.isNotEmpty(events.notOk); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + + assert.notIncludeMatch(coreStub.logger.messages.all, /MEMORY_USAGE_BELOW_THRESHOLD/); + assert.includeMatch(coreStub.logger.messages.warning, /MEMORY_USAGE_ABOVE_THRESHOLD/); + assert.notIncludeMatch(coreStub.logger.messages.info, /MEMORY_USAGE_ABOVE_THRESHOLD/); + + assert.deepStrictEqual(coreStub.logger.messages.all.reduce( + (a, v) => a + (/MEMORY_USAGE_ABOVE_THRESHOLD/.test(v) ? 1 : 0), + 0 + ), 1); + + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 10 * 1024 * 1024, + heapTotal: 10 * 1024 * 1024, + heapUsed: 10 * 1024 * 1024, + rss: 10 * 1024 * 1024 + }); + + coreStub.logger.removeAllMessages(); + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isNotEmpty(events.ok); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + + assert.notIncludeMatch(coreStub.logger.messages.all, /MEMORY_USAGE_ABOVE_THRESHOLD/); + assert.notIncludeMatch(coreStub.logger.messages.warning, /MEMORY_USAGE_BELOW_THRESHOLD/); + assert.includeMatch(coreStub.logger.messages.info, /MEMORY_USAGE_BELOW_THRESHOLD/); + + assert.deepStrictEqual(coreStub.logger.messages.all.reduce( + (a, v) => a + (/MEMORY_USAGE_BELOW_THRESHOLD/.test(v) ? 1 : 0), + 0 + ), 1); + })); + + it('should apply custom configuration from declaration', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry' + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryThresholdPercent: 90, + memoryMonitor: { + provisionedMemory: 500 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ])) + .then(() => { + assert.isFalse(resourceMonitor.isRunning()); + assert.isFalse(resourceMonitor.isMemoryMonitorActive); + assert.isNull(resourceMonitor.memoryState); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + }) + .then(() => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryThresholdPercent: 90, + memoryMonitor: { + provisionedMemory: 500, + thresholdReleasePercent: 80 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ])) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 500, + releasePercent: 80, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 85501, + level: 'debug' + } + }); + eraseEvents(); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 400 * 1024 * 1024, + heapTotal: 400 * 1024 * 1024, + heapUsed: 400 * 1024 * 1024, + rss: 400 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 460 * 1024 * 1024, + heapTotal: 460 * 1024 * 1024, + heapUsed: 460 * 1024 * 1024, + rss: 460 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isNotEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 400 * 1024 * 1024, + heapTotal: 400 * 1024 * 1024, + heapUsed: 400 * 1024 * 1024, + rss: 400 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 300 * 1024 * 1024, + heapTotal: 300 * 1024 * 1024, + heapUsed: 300 * 1024 * 1024, + rss: 300 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isNotEmpty(events.ok); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 400 * 1024 * 1024, + heapTotal: 400 * 1024 * 1024, + heapUsed: 400 * 1024 * 1024, + rss: 400 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryThresholdPercent: 90, + memoryMonitor: { + provisionedMemory: 500, + memoryThresholdPercent: 70 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 500, + releasePercent: 90, + thresholdPercent: 70 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 264001, + level: 'debug' + } + }); + assert.isNotEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + }) + .then(() => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryThresholdPercent: 50, + memoryMonitor: { + provisionedMemory: 500 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ])) + .then(() => { + assert.isEmpty(events.notOk); + assert.isEmpty(events.ok); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + }) + .then(() => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryThresholdPercent: 90, + memoryMonitor: { + provisionedMemory: 500 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ])) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 500, + releasePercent: 90, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 323201, + level: 'debug' + } + }); + assert.isEmpty(events.notOk); + assert.isNotEmpty(events.ok); + assert.isEmpty(events.stop); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + })); + + it('should log a message when provisioned is more that configured', () => { + coreStub.utilMisc.getRuntimeInfo.maxHeapSize = 1000; + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryThresholdPercent: 100, + memoryMonitor: { + provisionedMemory: 1300, + interval: 'aggressive' + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 0.5, usage: 50 }, + { interval: 0.4, usage: 60 }, + { interval: 0.3, usage: 70 }, + { interval: 0.2, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 1000, + releasePercent: 90, + thresholdPercent: 100 + }, + enabled: false, + logging: { + freq: 10 * 1000, + lastMessage: 0, + level: 'debug' + } + }); + assert.includeMatch(coreStub.logger.messages.all, /Please, adjust memory limit/); + assert.includeMatch(coreStub.logger.messages.all, /More frequent Memory Monior checks are enabled/); + assert.includeMatch(coreStub.logger.messages.all, /Disabling Memory Monitor due high threshold percent value/); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig.config.provisioned, 1000); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 1300, + memoryThresholdPercent: 100 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.includeMatch(coreStub.logger.messages.all, /Please, adjust memory limit/); + assert.includeMatch(coreStub.logger.messages.all, /Disabling Memory Monitor due high threshold percent value/); + assert.isEmpty(events.stop); + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig.config.provisioned, 1000); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + }); + }); + + it('should notify when not enough OS free memory', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryMonitor: { + osFreeMemory: 50, + interval: 'aggressive', + memoryThresholdPercent: 80 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 50, + intervals: [ + { interval: 0.5, usage: 50 }, + { interval: 0.4, usage: 60 }, + { interval: 0.3, usage: 70 }, + { interval: 0.2, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 4096, + releasePercent: 90, + thresholdPercent: 80 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 23501, + level: 'debug' + } + }); + assert.isEmpty(events.notOk); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + coreStub.resourceMonitorUtils.osAvailableMem.free = 40; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isNotEmpty(events.notOk); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + coreStub.resourceMonitorUtils.osAvailableMem.free = 400; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isNotEmpty(events.ok); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + memoryMonitor: { + interval: 'aggressive', + memoryThresholdPercent: 80 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isEmpty(events.notOk); + assert.isTrue(resourceMonitor.isProcessingEnabled()); + coreStub.resourceMonitorUtils.osAvailableMem.free = 20; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isNotEmpty(events.notOk); + assert.isFalse(resourceMonitor.isProcessingEnabled()); + eraseEvents(); + coreStub.resourceMonitorUtils.osAvailableMem.free = 400; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(resourceMonitor.isProcessingEnabled()); + assert.isEmpty(events.notOk); + assert.isNotEmpty(events.ok); + assert.isEmpty(events.stop); + })); + + it('should update check intervals according to declaration', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + logLevel: 'verbose' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 4096, + releasePercent: 90, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 25501, + level: 'debug' + } + }); + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + // default interval is 1.5 for low pressure. for 30seconds we should have at least 10 + assert.isBelow(events.check.length, 25); + eraseEvents(); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + interval: 'aggressive' + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 0.5, usage: 50 }, + { interval: 0.4, usage: 60 }, + { interval: 0.3, usage: 70 }, + { interval: 0.2, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 4096, + releasePercent: 90, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 87001, + level: 'debug' + } + }); + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + // default interval is 0.5 for low pressure. for 30seconds we should have at least 20 + assert.isAbove(events.check.length, 35); + eraseEvents(); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + listener: { + class: 'Telemetry_Listener' + }, + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + interval: 'default' + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.deepStrictEqual(resourceMonitor.memoryMonitorConfig, { + config: { + freeMemoryLimit: 30, + intervals: [ + { interval: 1.5, usage: 50 }, + { interval: 1, usage: 60 }, + { interval: 0.8, usage: 70 }, + { interval: 0.5, usage: 80 }, + { interval: 0.2, usage: 90 }, + { interval: 0.1, usage: 100 } + ], + provisioned: 4096, + releasePercent: 90, + thresholdPercent: 90 + }, + enabled: true, + logging: { + freq: 10 * 1000, + lastMessage: 148501, + level: 'debug' + } + }); + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + // default interval is 0.5 for low pressure. for 30seconds we should have at least 20 + assert.isBelow(events.check.length, 25); + })); + }); + + describe('ProcessingState', () => { + let cbEvents; + let clock; + let onDisableCb; + let onEnableCb; + let ps; + + function eraseCbEvents() { + cbEvents = { + onDisable: [], + onEnable: [] + }; + } + + beforeEach(() => { + clock = stubs.clock(); + + onDisableCb = () => { + const memState = ps.memoryState; + assert.deepStrictEqual(memState.thresholdStatus, APP_THRESHOLDS.MEMORY.STATE.NOT_OK); + cbEvents.onDisable.push(memState); + }; + onEnableCb = () => { + const memState = ps.memoryState; + if (memState) { + assert.deepStrictEqual(memState.thresholdStatus, APP_THRESHOLDS.MEMORY.STATE.OK); + } + cbEvents.onEnable.push(memState); + }; + + ps = resourceMonitor.initializePState(onEnableCb, onDisableCb); + + eraseCbEvents(); + }); + + it('should allow processing once created', () => { + assert.isTrue(ps.enabled); + }); + + it('should not call callbacks on initialization', () => { + ps.initialize(onEnableCb, onDisableCb); + assert.isEmpty(cbEvents.onDisable); + assert.isEmpty(cbEvents.onEnable); + + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + .then(() => { + assert.isEmpty(cbEvents.onDisable); + assert.isEmpty(cbEvents.onEnable); + }); + }); + + it('should change state according to memory usage', () => Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isTrue(ps.enabled); + // should not call callbacks if enabled already + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isNotEmpty(events.all); + + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 290 * 1024 * 1024, + heapTotal: 290 * 1024 * 1024, + heapUsed: 290 * 1024 * 1024, + rss: 290 * 1024 * 1024 + }); + + ps.initialize(onEnableCb, onDisableCb); + assert.isTrue(ps.enabled); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + })); + + it('should change state according to memory usage', () => { + ps.initialize(onEnableCb, onDisableCb); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isTrue(ps.enabled); + // should not call callbacks if enabled already + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isNotEmpty(events.all); + }) + .then(() => { + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 290 * 1024 * 1024, + heapTotal: 290 * 1024 * 1024, + heapUsed: 290 * 1024 * 1024, + rss: 290 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 100 * 1024 * 1024, + heapTotal: 100 * 1024 * 1024, + heapUsed: 100 * 1024 * 1024, + rss: 100 * 1024 * 1024 + }); + eraseCbEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onDisable); + assert.lengthOf(cbEvents.onEnable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onDisable); + assert.lengthOf(cbEvents.onEnable, 1); + assert.isNotEmpty(events.all); + + eraseCbEvents(); + coreStub.resourceMonitorUtils.osAvailableMem.free = 20; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 500; + eraseCbEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onDisable); + assert.lengthOf(cbEvents.onEnable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onDisable); + assert.lengthOf(cbEvents.onEnable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }); + }); + + it('should change enable processing when memory monitor deactivated', () => { + ps.initialize(onEnableCb, onDisableCb); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isTrue(ps.enabled); + // should not call callbacks if enabled already + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isNotEmpty(events.all); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isNotEmpty(events.all); + eraseEvents(); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isEmpty(events.all); + coreStub.resourceMonitorUtils.osAvailableMem.free = 20; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + assert.isEmpty(events.all); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 1); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + }); + }); + + it('should enable processing once resource monitor destroyed', () => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 20; + ps.initialize(onEnableCb, onDisableCb); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + return resourceMonitor.destroy(); + }) + .then(() => clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 })) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 1); + assert.lengthOf(cbEvents.onDisable, 1); + assert.isNotEmpty(events.all); + }); + }); + + it('should enable/disable processing according to the state', () => { + ps.initialize(onEnableCb, onDisableCb); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]) + .then(() => clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 })) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + coreStub.resourceMonitorUtils.osAvailableMem.free = 20; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.isEmpty(cbEvents.onDisable); + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + provisionedMemory: 300, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 })) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.isEmpty(cbEvents.onEnable); + assert.lengthOf(cbEvents.onDisable, 1); + coreStub.resourceMonitorUtils.osAvailableMem.free = 500; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 1); + assert.lengthOf(cbEvents.onDisable, 1); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 1); + assert.lengthOf(cbEvents.onDisable, 1); + Object.assign(coreStub.resourceMonitorUtils.appMemoryUsage, { + external: 340 * 1024 * 1024, + heapTotal: 340 * 1024 * 1024, + heapUsed: 340 * 1024 * 1024, + rss: 340 * 1024 * 1024 + }); + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isFalse(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 1); + assert.lengthOf(cbEvents.onDisable, 2); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + provisionedMemory: 500, + memoryThresholdPercent: 90 + } + }, + listener: { + class: 'Telemetry_Listener' + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 2); + assert.lengthOf(cbEvents.onDisable, 2); + + return Promise.all([ + configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + logLevel: 'verbose', + memoryMonitor: { + provisionedMemory: 500, + memoryThresholdPercent: 90 + } + } + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => { + assert.isTrue(ps.enabled); + assert.lengthOf(cbEvents.onEnable, 2); + assert.lengthOf(cbEvents.onDisable, 2); + }); + }); + }); +}); diff --git a/test/unit/resourceMonitor/utilsTests.js b/test/unit/resourceMonitor/utilsTests.js new file mode 100644 index 00000000..bc2f4978 --- /dev/null +++ b/test/unit/resourceMonitor/utilsTests.js @@ -0,0 +1,119 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable import/order */ +const moduleCache = require('../shared/restoreCache')(); + +const sinon = require('sinon'); + +const assert = require('../shared/assert'); +const sourceCode = require('../shared/sourceCode'); +const stubs = require('../shared/stubs'); + +const rmUtils = sourceCode('src/lib/resourceMonitor/utils'); + +moduleCache.remember(); + +describe('Resource Monitor / Utils', () => { + before(() => { + moduleCache.restore(); + }); + + afterEach(() => sinon.restore()); + + describe('.appMemoryUsage()', () => { + it('should return memory stats', () => { + const memUsage = rmUtils.appMemoryUsage(); + ['external', 'heapTotal', 'heapUsed', 'rss'].forEach((prop) => { + assert.isNumber(memUsage[prop]); + assert.isAbove(memUsage[prop], 0); + }); + + stubs.default.coreStub({ resourceMonitorUtils: true }); + assert.deepStrictEqual(rmUtils.appMemoryUsage(), { + external: 100, + heapTotal: 101, + heapUsed: 90, + rss: 300 + }); + }); + }); + + describe('.bytesToMegabytes()', () => { + it('should convert bytes to megabytes', () => { + assert.deepStrictEqual(rmUtils.bytesToMegabytes(0), 0); + assert.deepStrictEqual(rmUtils.bytesToMegabytes(1024 * 1024), 1); + assert.deepStrictEqual(rmUtils.bytesToMegabytes(2 * 1024 * 1024), 2); + }); + }); + + describe('.formatFloat()', () => { + it('should format float number', () => { + assert.deepStrictEqual(rmUtils.formatFloat(1), '1.00'); + assert.deepStrictEqual(rmUtils.formatFloat(1.11), '1.11'); + assert.deepStrictEqual(rmUtils.formatFloat(1.1111111111, 3), '1.111'); + }); + }); + + describe('.megabytesToStr()', () => { + it('should convert number to string', () => { + assert.deepStrictEqual(rmUtils.megabytesToStr(2), '2.00 MB'); + assert.deepStrictEqual(rmUtils.megabytesToStr(2.3436), '2.34 MB'); + }); + }); + + describe('.osAvailableMem()', () => { + it('should read and parse memory info', () => { + const PROC_MEM_INFO_OUTPUT = ` + MemTotal: 16434000 kB + MemFree: 617812 kB + MemAvailable: 2097152 kB + Buffers: 391188 kB + Cached: 1353364 kB + `; + assert.deepStrictEqual(rmUtils.osAvailableMem(() => PROC_MEM_INFO_OUTPUT), 2048); + + stubs.default.coreStub({ resourceMonitorUtils: true }); + assert.deepStrictEqual(rmUtils.osAvailableMem(), 100); + }); + + it('should return -1 when unable to parse', () => { + const PROC_MEM_INFO_OUTPUT = ` + MemTotal: 16434000 kB + MemFree: 617812 kB + Buffers: 391188 kB + Cached: 1353364 kB + `; + assert.deepStrictEqual(rmUtils.osAvailableMem(() => PROC_MEM_INFO_OUTPUT), -1); + }); + }); + + describe('.percentToStr()', () => { + it('should convert number to string', () => { + assert.deepStrictEqual(rmUtils.percentToStr(2), '2.00%'); + assert.deepStrictEqual(rmUtils.percentToStr(2.3436), '2.34%'); + }); + }); + + describe('.wrapMB()', () => { + it('should convert number to string', () => { + assert.deepStrictEqual(rmUtils.wrapMB(2), '2 MB'); + assert.deepStrictEqual(rmUtils.wrapMB(2.3436), '2.3436 MB'); + }); + }); +}); diff --git a/test/unit/runtimeConfig/bigstart_restnode b/test/unit/runtimeConfig/bigstart_restnode new file mode 100644 index 00000000..212de10d --- /dev/null +++ b/test/unit/runtimeConfig/bigstart_restnode @@ -0,0 +1,7 @@ +#!/bin/sh + +if [ -f /service/${service}/debug ]; then + exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1 +else + exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1 +fi diff --git a/test/unit/runtimeConfig/runtimeConfigTests.js b/test/unit/runtimeConfig/runtimeConfigTests.js new file mode 100644 index 00000000..afac164c --- /dev/null +++ b/test/unit/runtimeConfig/runtimeConfigTests.js @@ -0,0 +1,1168 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable import/order, no-template-curly-in-string, prefer-regex-literals */ +const moduleCache = require('../shared/restoreCache')(); + +const fs = require('fs'); +const memfs = require('memfs'); +const nock = require('nock'); +const pathUtil = require('path'); +const sinon = require('sinon'); + +const assert = require('../shared/assert'); +const sourceCode = require('../shared/sourceCode'); +const stubs = require('../shared/stubs'); +const testUtil = require('../shared/util'); + +const configWorker = sourceCode('src/lib/config'); +const deviceUtil = sourceCode('src/lib/utils/device'); +const persistentStorage = sourceCode('src/lib/persistentStorage'); +const RuntimeConfig = sourceCode('src/lib/runtimeConfig'); +const updater = sourceCode('src/lib/runtimeConfig/updater'); + +moduleCache.remember(); + +describe('Resource Monitor / Resource Monitor', () => { + const RESTNODE_SCRIPT_FNAME = '/etc/bigstart/scripts/restnoded'; + const UPDATER_DIR = pathUtil.join(__dirname, '../../../src/lib/runtimeConfig'); + const UPDATER_LOGS = pathUtil.join(UPDATER_DIR, 'logs.txt'); + + let clock; + let coreStub; + let isBashEnabled; + let processExitStub; + let remoteCmds; + let remoteCmbStub; + let restApiSysDB; + let runtimeConfig; + let virtualFS; + let volume; + + before(() => { + moduleCache.restore(); + + volume = new memfs.Volume(); + virtualFS = memfs.createFsFromVolume(volume); + }); + + beforeEach(() => { + clock = stubs.clock(); + + remoteCmds = []; + + remoteCmbStub = sinon.stub(deviceUtil.DeviceAsyncCLI.prototype, 'execute'); + remoteCmbStub.callsFake((cmd) => { + remoteCmds.push(cmd); + if (cmd.indexOf('updater') !== -1) { + updater.main(virtualFS); + } + return Promise.resolve(); + }); + + processExitStub = sinon.stub(process, 'exit'); + processExitStub.callsFake(() => {}); + + volume.reset(); + + volume.mkdirSync(pathUtil.dirname(RESTNODE_SCRIPT_FNAME), { recursive: true }); + volume.mkdirSync(UPDATER_DIR, { recursive: true }); + + virtualFS.writeFileSync( + RESTNODE_SCRIPT_FNAME, + fs.readFileSync(pathUtil.join(__dirname, 'bigstart_restnode')) + ); + + coreStub = stubs.default.coreStub({}, { logger: { ignoreLevelChange: false } }); + coreStub.persistentStorage.loadData = { config: { } }; + + runtimeConfig = new RuntimeConfig(virtualFS); + + isBashEnabled = true; + restApiSysDB = () => [200, { value: isBashEnabled }]; + testUtil.mockEndpoints([{ + endpoint: '/mgmt/tm/sys/db/systemauth.disablebash', + method: 'get', + response: () => restApiSysDB(), + options: { + times: 100 + } + }]); + + return configWorker.cleanup() + .then(() => persistentStorage.persistentStorage.load()) + .then(() => runtimeConfig.initialize({ configMgr: configWorker })); + }); + + afterEach(() => runtimeConfig.destroy() + .then(() => { + nock.cleanAll(); + sinon.restore(); + })); + + function deleteScript() { + return virtualFS.unlinkSync(RESTNODE_SCRIPT_FNAME); + } + + function getScript() { + return virtualFS.readFileSync(RESTNODE_SCRIPT_FNAME).toString(); + } + + function getTaskID() { + return JSON.parse(virtualFS.readFileSync(pathUtil.join(UPDATER_DIR, 'config.json'))).id; + } + + function processDeclaration(decl) { + return configWorker.processDeclaration({ + class: 'Telemetry', + controls: { + class: 'Controls', + runtime: decl + } + }); + } + + describe('.initialize()', () => { + it('should log message when unable to subscribe to config updates', () => { + const rc = new RuntimeConfig(); + rc.initialize({}); + + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to subscribe to configuration updates/ + ); + }); + }); + + it('should process default runtime configuration', () => runtimeConfig.start() + .then(() => { + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration(), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /No changes found between running configuration and the new one/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should process runtime configuration with default values', () => runtimeConfig.start() + .then(() => { + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: false, + maxHeapSize: 1400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /No changes found between running configuration and the new one/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should do nothing when unable to read configuration from the script', () => runtimeConfig.start() + .then(() => { + deleteScript(); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: false, + maxHeapSize: 1400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to read configuration from the startup script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.throws(() => getScript()); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should do nothing when bash disabled', () => runtimeConfig.start() + .then(() => { + isBashEnabled = false; + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Shell not available, unable to proceed with task execution/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should fail when unable to run remote command', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.rejects(new Error('expected error')); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /no logs available/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Attempt to update the runtime configuration failed! See logs for more details/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task failed/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should fail when changes were not applied to the startup script', () => runtimeConfig.start() + .then(() => { + sinon.stub(updater, 'main').callsFake(() => {}); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Configuration was not applied to the script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task failed/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.includeMatch(remoteCmds, `${process.argv[0]} ${UPDATER_DIR}/updater.js`); + remoteCmds = []; + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should finish task once restart scheduled', () => runtimeConfig.start() + .then(() => { + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /New configuration was successfully applied to the startup script! Scheduling service restart in 1 min/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Restarting service to apply new changes for the runtime configuraiton/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.includeMatch(remoteCmds, `${process.argv[0]} ${UPDATER_DIR}/updater.js`); + assert.includeMatch(remoteCmds, 'bigstart restart restnoded'); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should force restart when unable to schedule restart via remote cmd', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.onSecondCall().rejects(new Error('expected error')); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /New configuration was successfully applied to the startup script! Scheduling service restart in 1 min/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Restarting service to apply new changes for the runtime configuraiton/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to restart service via bigstart/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to restart service gracefully/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual(processExitStub.callCount, 1); + + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should report that bash disabled when unable check its status', () => runtimeConfig.start() + .then(() => { + restApiSysDB = () => [ + 404, + 'Not Found' + ]; + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Shell not available, unable to proceed with task execution/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.isEmpty(remoteCmds); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should not fail when unable to read script configuration once remote cmd executed', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.onFirstCall().callsFake(() => { + virtualFS.writeFileSync(RESTNODE_SCRIPT_FNAME, 'something'); + return Promise.resolve(); + }); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Trying to execute "updater" script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to read configuration from the startup script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + assert.deepStrictEqual( + getScript(), + 'something' + ); + remoteCmds = []; + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should stop task once service stopped', () => runtimeConfig.start() + .then(() => { + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(5, { promisify: true, delay: 1, repeat: 50 }), + testUtil.waitTill(() => { + try { + assert.includeMatch( + coreStub.logger.messages.all, + /New configuration was successfully applied to the startup script! Scheduling service restart/ + ); + return true; + } catch (err) { + return false; + } + }, 1) + ]); + }) + .then(() => Promise.all([ + runtimeConfig.stop(), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 50 }) + ])) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /New configuration was successfully applied to the startup script! Scheduling service restart/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task stopped/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task emitted event "stopped"/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.includeMatch(remoteCmds, `${process.argv[0]} ${UPDATER_DIR}/updater.js`); + remoteCmds = []; + coreStub.logger.removeAllMessages(); + + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should retry task if possible', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.onFirstCall().callsFake(() => Promise.resolve()); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Configuration was not applied to the script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task failed/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Retrying attempt to update the startup script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /New configuration was successfully applied to the startup script! Scheduling service restart/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.includeMatch(remoteCmds, `${process.argv[0]} ${UPDATER_DIR}/updater.js`); + remoteCmds = []; + coreStub.logger.removeAllMessages(); + + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should not retry task more than once', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.callsFake(() => Promise.resolve()); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Retrying attempt to update the startup script/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task failed/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /No retries left for the failed task/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + coreStub.logger.removeAllMessages(); + return clock.clockForward(3000, { promisify: true, delay: 10, repeat: 10 }); + }) + .then(() => { + assert.isEmpty(remoteCmds); + assert.isEmpty(coreStub.logger.messages.all); + })); + + it('should remove log files before task execution', () => runtimeConfig.start() + .then(() => { + virtualFS.writeFileSync(UPDATER_LOGS, 'existing-data'); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.notIncludeMatch(coreStub.logger.messages.all, /existing-data/); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + + let logs = ''; + try { + logs = virtualFS.readFileSync(UPDATER_LOGS).toString(); + } catch (err) { + // do nothing + } + assert.notIncludeMatch(logs, /existing-data/); + })); + + it('should schedule next task and cancel the current one', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.onSecondCall().callsFake(() => processDeclaration({ + enableGC: false, + maxHeapSize: 2500 + }) + .then(() => testUtil.sleep(5000)) + .then(() => Promise.reject(new Error('expected error')))); + + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Task emitted event "stopped"/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task stopped/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2500 /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + })); + + it('should schedule next task and cancel the current one and existing "next" task too', () => runtimeConfig.start() + .then(() => { + remoteCmbStub.onSecondCall().callsFake(() => processDeclaration({ + enableGC: false, + maxHeapSize: 2500 + }) + .then(() => processDeclaration({ + enableGC: true, + maxHeapSize: 2600 + })) + .then(() => testUtil.sleep(5000)) + .then(() => Promise.reject(new Error('expected error')))); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Task emitted event "stopped"/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task stopped/ + ); + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2600 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + })); + + it('should not schedule next task when service restart requested', () => runtimeConfig.start() + .then(() => { + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Task done/ + ); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + coreStub.logger.removeAllMessages(); + return Promise.all([ + processDeclaration({}), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.includeMatch( + coreStub.logger.messages.all, + /Unable to schedule next task: the service restart requested already/ + ); + assert.notIncludeMatch( + coreStub.logger.messages.all, + /Task (done|failed|stopped)/ + ); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + })); + + it('should apply configuration', () => runtimeConfig.start() + .then(() => Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ])) + .then(() => { + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + // should not apply a new one due restart request + return Promise.all([ + processDeclaration(), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + return Promise.all([ + runtimeConfig.restart(), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 10 }) + ]); + }) + .then(() => Promise.all([ + processDeclaration({ + enableGC: true, + maxHeapSize: 2400 + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ])) + .then(() => { + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node --max_old_space_size=2400 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + return Promise.all([ + processDeclaration({}), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + // should not apply due restart request + return Promise.all([ + processDeclaration({ + enableGC: true + }), + clock.clockForward(6000, { promisify: true, delay: 1, repeat: 100 }) + ]); + }) + .then(() => { + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ` # ID:${getTaskID()}`, + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + })); + + // TODO: + // - test that sumbits multiple declarations (one by one) + // - update RestWorker tests if needed (add new stub) +}); diff --git a/test/unit/runtimeConfig/updaterTests.js b/test/unit/runtimeConfig/updaterTests.js new file mode 100644 index 00000000..cf41f296 --- /dev/null +++ b/test/unit/runtimeConfig/updaterTests.js @@ -0,0 +1,837 @@ +/** + * Copyright 2024 F5, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +/* eslint-disable import/order, no-template-curly-in-string, prefer-regex-literals */ +const moduleCache = require('../shared/restoreCache')(); + +const fs = require('fs'); +const memfs = require('memfs'); +const pathUtil = require('path'); +const sinon = require('sinon'); + +const assert = require('../shared/assert'); +const sourceCode = require('../shared/sourceCode'); +const testUtil = require('../shared/util'); + +const updater = sourceCode('src/lib/runtimeConfig/updater'); + +moduleCache.remember(); + +describe('Runtime Config / Updater', () => { + const RESTNODE_SCRIPT_FNAME = '/etc/bigstart/scripts/restnoded'; + const UPDATER_DIR = pathUtil.join(__dirname, '../../../src/lib/runtimeConfig'); + const UPDATER_LOGS = pathUtil.join(UPDATER_DIR, 'logs.txt'); + + let appCtx; + let virtualFS; + let volume; + + before(() => { + moduleCache.restore(); + + volume = new memfs.Volume(); + virtualFS = memfs.createFsFromVolume(volume); + }); + + afterEach(() => { + sinon.restore(); + }); + + beforeEach(() => { + volume.reset(); + + volume.mkdirSync(pathUtil.dirname(RESTNODE_SCRIPT_FNAME), { recursive: true }); + volume.mkdirSync(UPDATER_DIR, { recursive: true }); + + virtualFS.writeFileSync( + RESTNODE_SCRIPT_FNAME, + fs.readFileSync(pathUtil.join(__dirname, 'bigstart_restnode')) + ); + + appCtx = { + fsUtil: virtualFS, + logger: { + debug() {}, + error() {}, + exception() {}, + info() {} + } + }; + }); + + function createTask(data) { + updater.saveScriptConfigFile(data, appCtx); + } + function getLogs() { + return updater.readLogsFile(appCtx).split('\n'); + } + function getScript() { + return virtualFS.readFileSync(RESTNODE_SCRIPT_FNAME).toString(); + } + function getCurrentConfig() { + return updater.fetchConfigFromScript(appCtx); + } + + describe('.enrichScriptConfig()', () => { + it('should add defaults for missing options', () => { + assert.deepStrictEqual( + updater.enrichScriptConfig({}), + { + gcEnabled: false, + heapSize: 1400 + } + ); + }); + + it('should not add defaults for existing options', () => { + assert.deepStrictEqual( + updater.enrichScriptConfig({ + gcEnabled: true, + heapSize: 2000 + }), + { + gcEnabled: true, + heapSize: 2000 + } + ); + }); + }); + + describe('.fetchConfigFromScript()', () => { + it('should return null when unable to read config (no file)', () => { + virtualFS.unlinkSync(RESTNODE_SCRIPT_FNAME); + assert.isNull(updater.fetchConfigFromScript(appCtx)); + }); + + it('should return null when unable to read config (garbage data)', () => { + virtualFS.writeFileSync(RESTNODE_SCRIPT_FNAME, 'something'); + assert.isNull(updater.fetchConfigFromScript(appCtx)); + }); + }); + + describe('.main()', () => { + it('should do nothing when no config provided', () => { + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + assert.includeMatch(getLogs(), /No config found, nothing to apply to the script/); + }); + }); + + it('should do nothing when config has no ID', () => { + createTask({ gcEnabled: true }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + assert.includeMatch(getLogs(), /No config found, nothing to apply to the script/); + }); + }); + + it('should do nothing when unable to read restnode script', () => { + virtualFS.unlinkSync(RESTNODE_SCRIPT_FNAME); + createTask({ gcEnabled: true, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.includeMatch(logs, /Unable to read "restnode" startup script/); + }); + }); + + it('should do nothing when unable to read configuration from the script file', () => { + virtualFS.writeFileSync(RESTNODE_SCRIPT_FNAME, 'something useless'); + createTask({ gcEnabled: true, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.includeMatch(logs, /No configuration read from the script/); + assert.includeMatch(logs, /The "restnode" startup script not modified!/); + }); + }); + + it('should not fail when unable to write data to file', () => { + createTask({ id: '123' }); + sinon.stub(virtualFS, 'writeFileSync').throws(new Error('test')); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /Done!/); + assert.includeMatch(logs, /Unable to write data to file/); + }); + }); + + it('should override logs', () => { + createTask({ id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + virtualFS.writeFileSync(UPDATER_LOGS, 'checkpoint', { flags: 'a' }); + assert.includeMatch(getLogs(), /checkpoint/); + + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.notIncludeMatch(getLogs(), /checkpoint/); + }); + }); + + it('should apply empty configuration from the file', () => { + createTask({ id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Enabling GC config./); + assert.notIncludeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '123' + } + ); + }); + }); + + it('should apply configuration from the file (GC only, example 1)', () => { + createTask({ gcEnabled: true, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Enabling GC config./); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: true, + heapSize: 1400, + id: '123' + } + ); + }); + }); + + it('should apply configuration from the file (GC only, example 2)', () => { + createTask({ gcEnabled: false, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Enabling GC config./); + assert.notIncludeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '123' + } + ); + }); + }); + + it('should apply configuration from the file (heapSize only, example 1)', () => { + createTask({ heapSize: 2000, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Enabling GC config./); + assert.includeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node --max_old_space_size=2000 /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 2000, + id: '123' + } + ); + }); + }); + + it('should apply configuration from the file (heapSize only, example 2)', () => { + createTask({ heapSize: 1400, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Enabling GC config./); + assert.notIncludeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '123' + } + ); + }); + }); + + it('should apply configuration from the file (heapSize only, example 3)', () => { + createTask({ heapSize: 500, id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + const logs = getLogs(); + assert.notIncludeMatch(logs, /No config found, nothing to apply to the script/); + assert.notIncludeMatch(logs, /Unable to read "restnode" startup script/); + assert.notIncludeMatch(logs, /No configuration read from the script/); + assert.notIncludeMatch(logs, /The "restnode" startup script not modified!/); + assert.notIncludeMatch(logs, /Enabling GC config./); + assert.includeMatch(logs, /Upading heap size./); + assert.notIncludeMatch(logs, /Upading memory allocator config/); + assert.includeMatch(logs, /Adding "notice" block to the script./); + assert.includeMatch(logs, /Done!/); + + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + // that's ok, default size of V8 heap, can't see to 500 without affecting other apps + heapSize: 1400, + id: '123' + } + ); + }); + }); + + it('should apply configuration', () => { + createTask({ id: '123' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:123', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '123' + } + ); + createTask({ id: '456' }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:456', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '456' + } + ); + createTask({ + id: '456', + gcEnabled: true, + heapSize: 1500 + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:456', + ' exec /usr/bin/f5-rest-node --max_old_space_size=1500 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: true, + heapSize: 1500, + id: '456' + } + ); + createTask({ + id: '456', + gcEnabled: true, + heapSize: 1500 + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.notIncludeMatch(getLogs(), /Done!/); + assert.includeMatch(getLogs(), /The "restnode" startup script not modified!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:456', + ' exec /usr/bin/f5-rest-node --max_old_space_size=1500 --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: true, + heapSize: 1500, + id: '456' + } + ); + createTask({ + id: '456', + gcEnabled: false, + heapSize: 1600 + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:456', + ' exec /usr/bin/f5-rest-node --max_old_space_size=1600 /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1600, + id: '456' + } + ); + createTask({ + id: '765', + gcEnabled: true, + heapSize: 500 + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:765', + ' exec /usr/bin/f5-rest-node --expose-gc /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: true, + heapSize: 1400, + id: '765' + } + ); + createTask({ + id: '765' + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:765', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '765' + } + ); + createTask({ + id: '765' + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /The "restnode" startup script not modified/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:765', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '765' + } + ); + createTask({ + id: '345' + }); + updater.main(virtualFS); + // sleep to let data be flushed to FS + return testUtil.sleep(10); + }) + .then(() => { + assert.includeMatch(getLogs(), /Done!/); + assert.deepStrictEqual( + getScript(), + [ + '#!/bin/sh', + '', + 'if [ -f /service/${service}/debug ]; then', + ' exec /usr/bin/f5-rest-node --debug /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'else', + ' # ATTENTION. The block below modified by F5 BIG-IP Telemetry Streaming!', + ' # To restore original behavior, uncomment the next line and remove the block below.', + ' #', + ' # exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + ' #', + ' # The block below should be removed to restore original behavior!', + ' # ID:345', + ' exec /usr/bin/f5-rest-node /usr/share/rest/node/src/restnode.js -p 8105 --logLevel finest -i ${LOG_FILE} -s none ${RCWFeature} >> /var/tmp/${service}.out 2>&1', + 'fi', + '' + ].join('\n') + ); + assert.deepStrictEqual( + getCurrentConfig(), + { + gcEnabled: false, + heapSize: 1400, + id: '345' + } + ); + }); + }); + }); +}); diff --git a/test/unit/shared/stubs.js b/test/unit/shared/stubs.js index fb434281..5752f90f 100644 --- a/test/unit/shared/stubs.js +++ b/test/unit/shared/stubs.js @@ -110,29 +110,34 @@ const _module = module.exports = { if (typeof repeatTimes === 'undefined' || (repeatTimes && repeatTimes > callCount)) { return fwdOptions.promisify ? Promise.resolve().then(timeTick) : timeTick(); } + return fwdOptions.promisify ? Promise.resolve() : undefined; } function then(ticks) { - if (stopClockForward) { - return; - } - if (ticks === false) { - return; + if (stopClockForward || ticks === false || !ctx.fakeClock) { + return fwdOptions.promisify ? Promise.resolve() : undefined; } ticks = typeof ticks === 'number' ? ticks : tickStep; - if (ctx.fakeClock) { - if (typeof fwdOptions.delay === 'number') { - ctx.fakeClock._setTimeout(doTimeTick, fwdOptions.delay, ticks); - } else if (fwdOptions.promisify) { - ctx.fakeClock._setImmediate(doTimeTick, ticks); - } else { - doTimeTick(ticks); + if (typeof fwdOptions.delay === 'number') { + if (fwdOptions.promisify) { + return new Promise((resolve) => { + ctx.fakeClock._setTimeout(resolve, fwdOptions.delay); + }) + .then(() => doTimeTick(ticks)); } + return ctx.fakeClock._setTimeout(doTimeTick, fwdOptions.delay, ticks); + } + if (fwdOptions.promisify) { + return new Promise((resolve) => { + ctx.fakeClock._setImmediate(resolve); + }) + .then(() => doTimeTick(ticks)); } + return doTimeTick(ticks); } function timeTick() { let nextTick = tickStep; if (stopClockForward) { - return; + return fwdOptions.promisify ? Promise.resolve() : undefined; } if (fwdOptions.cb) { nextTick = fwdOptions.cb(); @@ -141,10 +146,9 @@ const _module = module.exports = { if (!fwdOptions.promisify) { throw new Error('Callback passed to "clockForward" returned Promise but "clockForward" was not configured to use Promises!'); } - nextTick.then(then); - } else { - then(nextTick); + return nextTick.then(then); } + return then(nextTick); } return fwdOptions.promisify ? Promise.resolve().then(timeTick) : timeTick(); }; @@ -189,6 +193,12 @@ const _module = module.exports = { if (coreModules.persistentStorage) { ctx.persistentStorage = _module.persistentStorage(coreModules.persistentStorage, options.persistentStorage); } + if (coreModules.resourceMonitorUtils) { + ctx.resourceMonitorUtils = _module.resourceMonitorUtils( + coreModules.resourceMonitorUtils, + options.resourceMonitorUtils + ); + } if (coreModules.teemReporter) { ctx.teemReporter = _module.teemReporter(coreModules.teemReporter, options.teemReporter); } @@ -468,6 +478,34 @@ const _module = module.exports = { return ctx; }, + /** + * Stub for ResourceMonitor + * + * @param {module} resourceMonitor - module + * + * @returns {ResourceMonitorUtilsStubCtx} stub context + */ + resourceMonitorUtils(resourceMonitorUtils) { + const ctx = { + appMemoryUsage: sinon.stub(resourceMonitorUtils, 'appMemoryUsage'), + osAvailableMem: sinon.stub(resourceMonitorUtils, 'osAvailableMem') + }; + ctx.appMemoryUsage.external = 100; + ctx.appMemoryUsage.heapTotal = 101; + ctx.appMemoryUsage.heapUsed = 90; + ctx.appMemoryUsage.rss = 300; + ctx.appMemoryUsage.callsFake(() => ({ + external: ctx.appMemoryUsage.external, + heapTotal: ctx.appMemoryUsage.heapTotal, + heapUsed: ctx.appMemoryUsage.heapUsed, + rss: ctx.appMemoryUsage.rss + })); + + ctx.osAvailableMem.free = 100; + ctx.osAvailableMem.callsFake(() => ctx.osAvailableMem.free); + return ctx; + }, + /** * Stub for TeemReporter * @@ -628,7 +666,11 @@ const _module = module.exports = { }); ctx.getRuntimeInfo.nodeVersion = '4.6.0'; - ctx.getRuntimeInfo.callsFake(() => ({ nodeVersion: ctx.getRuntimeInfo.nodeVersion })); + ctx.getRuntimeInfo.maxHeapSize = 4096; + ctx.getRuntimeInfo.callsFake(() => ({ + maxHeapSize: ctx.getRuntimeInfo.maxHeapSize, + nodeVersion: ctx.getRuntimeInfo.nodeVersion + })); ctx.networkCheck.resolves(); return ctx; } @@ -653,6 +695,7 @@ _module.default = { deviceUtil: 'src/lib/utils/device', logger: 'src/lib/logger', persistentStorage: 'src/lib/persistentStorage', + resourceMonitorUtils: 'src/lib/resourceMonitor/utils', teemReporter: 'src/lib/teemReporter', tracer: 'src/lib/utils/tracer', utilMisc: 'src/lib/utils/misc' @@ -722,22 +765,6 @@ _module.default = { * @property {Object} preExistingListeners - listeners to restore * @property {object} stub - sinon stub */ -/** - * @typedef LoggerStubCtx - * @type {object} - * @property {object} messages - logged messages - * @property {Array} messages.all - all logged messages - * @property {Array} messages.debug - debug messages - * @property {Array} messages.error - error messages - * @property {Array} messages.info - info messages - * @property {Array} messages.warning - warning messages - * @property {Array} logLevelHistory - log level history - * @property {object} proxy_verbose - sinon stub for Logger.logger.verbose - * @property {object} proxy_debug - sinon stub for Logger.logger.debug - * @property {object} proxy_info - sinon stub for Logger.logger.info - * @property {object} proxy_warning - sinon stub for Logger.logger.warning - * @property {object} proxy_erro - sinon stub for Logger.logger.error - */ /** * @typedef iHealthPollerStubCtx * @type {object} @@ -755,6 +782,22 @@ _module.default = { * @property {object} QkviewManager.initialize - stub for QkviewManager.prototype.initialize * @property {object} QkviewManager.process - stub for QkviewManager.prototype.process */ +/** + * @typedef LoggerStubCtx + * @type {object} + * @property {object} messages - logged messages + * @property {Array} messages.all - all logged messages + * @property {Array} messages.debug - debug messages + * @property {Array} messages.error - error messages + * @property {Array} messages.info - info messages + * @property {Array} messages.warning - warning messages + * @property {Array} logLevelHistory - log level history + * @property {object} proxy_verbose - sinon stub for Logger.logger.verbose + * @property {object} proxy_debug - sinon stub for Logger.logger.debug + * @property {object} proxy_info - sinon stub for Logger.logger.info + * @property {object} proxy_warning - sinon stub for Logger.logger.warning + * @property {object} proxy_erro - sinon stub for Logger.logger.error + */ /** * @typedef PersistentStorageStubCtx * @type {object} @@ -771,6 +814,17 @@ _module.default = { * @property {boolean} savedStateParse - parse '_data_' property of saved state if exist * @property {object} storage - sinon stub for persistentStorage.persistentStorage.storage */ +/** + * @typedef ResourceMonitorUtilsStubCtx + * @type {object} + * @property {object} appMemoryUsage - stub for appMemoryUsage + * @property {number} appMemoryUsage.external - `external` value + * @property {number} appMemoryUsage.heapTotal - `heapTotal` value + * @property {number} appMemoryUsage.heapUsed - `heapUsed` value + * @property {number} appMemoryUsage.rss - `rss` value + * @property {object} osAvailableMem - stub for osAvailableMem + * @property {number} osAvailableMem.free - free memory value + */ /** * @typedef TeemReporterStubCtx * @type {object} @@ -793,5 +847,7 @@ _module.default = { * @property {number} generateUuid.uuidCounter - counter value * @property {boolean} generateUuid.numbersOnly - numbers only * @property {object} getRuntimeInfo - stub for getRuntimeInfo + * @property {string} getRuntimeInfo.nodeVersion - node.js version + * @property {number} getRuntimeInfo.maxHeapSize - V8 max heap size * @property {object} networkCheck - stub for networkCheck */ diff --git a/test/unit/systemPollerTests.js b/test/unit/systemPollerTests.js index e933cbc8..ad0f9a1a 100644 --- a/test/unit/systemPollerTests.js +++ b/test/unit/systemPollerTests.js @@ -28,8 +28,7 @@ const systemPollerConfigTestsData = require('./data/systemPollerTestsData'); const testUtil = require('./shared/util'); const configWorker = sourceCode('src/lib/config'); -const constants = sourceCode('src/lib/constants'); -const monitor = sourceCode('src/lib/utils/monitor'); +const ResourceMonitor = sourceCode('src/lib/resourceMonitor'); const systemPoller = sourceCode('src/lib/systemPoller'); const SystemStats = sourceCode('src/lib/systemStats'); const tracerMgr = sourceCode('src/lib/tracerManager'); @@ -725,7 +724,7 @@ describe('System Poller', () => { }); }); - describe('monitor "on check" event', () => { + describe('Resource Monitor', () => { const defaultDeclaration = { class: 'Telemetry', My_System: { @@ -746,39 +745,66 @@ describe('System Poller', () => { } }; + let clock; let pollerTimers; + let resourceMonitor; beforeEach(() => { + clock = stubs.clock(); + resourceMonitor = new ResourceMonitor(); + + const appCtx = { + configMgr: configWorker, + resourceMonitor + }; + + resourceMonitor.initialize(appCtx); + systemPoller.initialize(appCtx); + pollerTimers = {}; sinon.stub(systemPoller, 'getPollerTimers').returns(pollerTimers); - return configWorker.processDeclaration(testUtil.deepCopy(defaultDeclaration)) + + return resourceMonitor.start() + .then(() => Promise.all([ + configWorker.processDeclaration(testUtil.deepCopy(defaultDeclaration)), + clock.clockForward(3000, { promisify: true, delay: 1, repeat: 30 }) + ])) .then(() => { assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be active'); assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be active'); }); }); - it('should disable running pollers when thresholds not ok', () => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK) - .then(() => { - assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be inactive'); - assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be inactive'); - assert.isFalse(systemPoller.isEnabled(), 'should set processingEnabled to false'); - })); + afterEach(() => resourceMonitor.destroy()); - it('should enable disabled pollers when thresholds become ok', () => monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.NOT_OK) - .then(() => { - assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be inactive'); - assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be inactive'); - assert.isFalse(systemPoller.isEnabled(), 'should set processingEnabled to false'); - return monitor.safeEmitAsync('check', constants.APP_THRESHOLDS.MEMORY.OK); - }) - .then(() => { - assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be active'); - assert.strictEqual(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.intervalInS, 180, 'should set configured interval'); - assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be active'); - assert.strictEqual(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.intervalInS, 200, 'should set configured interval'); - assert.isTrue(systemPoller.isEnabled(), 'should set processingEnabled to true'); - })); + it('should disable running pollers when thresholds not ok', () => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 10; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + .then(() => { + assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be inactive'); + assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be inactive'); + assert.isFalse(systemPoller.isEnabled(), 'should set processingEnabled to false'); + }); + }); + + it('should enable disabled pollers when thresholds become ok', () => { + coreStub.resourceMonitorUtils.osAvailableMem.free = 10; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }) + .then(() => { + assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be inactive'); + assert.isFalse(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be inactive'); + assert.isFalse(systemPoller.isEnabled(), 'should set processingEnabled to false'); + coreStub.resourceMonitorUtils.osAvailableMem.free = 500; + return clock.clockForward(3000, { promisify: true, delay: 1, repeat: 10 }); + }) + .then(() => { + assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.isActive(), 'should be active'); + assert.strictEqual(pollerTimers['f5telemetry_default::My_System::SystemPoller_1'].timer.intervalInS, 180, 'should set configured interval'); + assert.isTrue(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.isActive(), 'should be active'); + assert.strictEqual(pollerTimers['f5telemetry_default::My_System::SystemPoller_2'].timer.intervalInS, 200, 'should set configured interval'); + assert.isTrue(systemPoller.isEnabled(), 'should set processingEnabled to true'); + }); + }); }); }); }); diff --git a/test/unit/utils/deviceTests.js b/test/unit/utils/deviceTests.js index ec4d34fb..88663ae5 100644 --- a/test/unit/utils/deviceTests.js +++ b/test/unit/utils/deviceTests.js @@ -57,13 +57,11 @@ describe('Device Util', () => { it('should gather device info', () => { sinon.stub(deviceUtil, 'getDeviceType').resolves(constants.DEVICE_TYPE.BIG_IP); sinon.stub(deviceUtil, 'getDeviceVersion').resolves({ version: '14.0.0' }); - sinon.stub(deviceUtil, 'getDeviceNodeMemoryLimit').resolves(1888); return deviceUtil.gatherHostDeviceInfo() .then(() => { assert.deepStrictEqual( deviceUtil.getHostDeviceInfo(), { - NODE_MEMORY_LIMIT: 1888, TYPE: 'BIG-IP', VERSION: { version: '14.0.0' }, RETRIEVE_SECRETS_FROM_TMSH: false @@ -408,76 +406,6 @@ describe('Device Util', () => { }); }); - describe('.getDeviceNodeMemoryLimit()', () => { - afterEach(() => { - nock.cleanAll(); - }); - - it('should return default value when db variables NOT set', () => { - testUtil.mockEndpoints([{ - endpoint: '/mgmt/tm/sys/db/provision.extramb', - response: { - kind: 'tm:sys:db:dbstate', - name: 'provision.extramb', - value: '0', - valueRange: 'integer min:0 max:8192' - } - }, - { - endpoint: '/mgmt/tm/sys/db/restjavad.useextramb', - response: { - kind: 'tm:sys:db:dbstate', - name: 'restjavad.useextramb', - value: 'false', - valueRange: 'false true' - } - }]); - return assert.becomes( - deviceUtil.getDeviceNodeMemoryLimit(constants.LOCAL_HOST), - constants.APP_THRESHOLDS.MEMORY.DEFAULT_MB - ); - }); - - it('should return db value when db variables set', () => { - testUtil.mockEndpoints([{ - endpoint: '/mgmt/tm/sys/db/provision.extramb', - response: { - kind: 'tm:sys:db:dbstate', - name: 'provision.extramb', - value: '2048', - valueRange: 'integer min:0 max:8192' - } - }, - { - endpoint: '/mgmt/tm/sys/db/restjavad.useextramb', - response: { - kind: 'tm:sys:db:dbstate', - name: 'restjavad.useextramb', - value: 'true', - valueRange: 'false true' - } - }]); - return assert.becomes(deviceUtil.getDeviceNodeMemoryLimit(constants.LOCAL_HOST), 2048); - }); - - it('should return default value when an error occurs during lookup', () => { - testUtil.mockEndpoints([{ - endpoint: '/mgmt/tm/sys/db/restjavad.useextramb', - response: { - kind: 'tm:sys:db:dbstate', - name: 'restjavad.useextramb', - value: 'true', - valueRange: 'false true' - } - // no second mock to simulate failure to retrieve from provision.db endpoint - }]); - return assert.becomes( - deviceUtil.getDeviceNodeMemoryLimit(constants.LOCAL_HOST), - constants.APP_THRESHOLDS.MEMORY.DEFAULT_MB - ); - }); - }); - describe('.makeDeviceRequest()', () => { it('should preserve device\'s default port, protocol, HTTP method and etc.', () => { testUtil.mockEndpoints( diff --git a/test/unit/utils/miscTests.js b/test/unit/utils/miscTests.js index bd9d34ca..6aadaad4 100644 --- a/test/unit/utils/miscTests.js +++ b/test/unit/utils/miscTests.js @@ -21,6 +21,7 @@ const moduleCache = require('../shared/restoreCache')(); const net = require('net'); const sinon = require('sinon'); +const v8 = require('v8'); const assert = require('../shared/assert'); const sourceCode = require('../shared/sourceCode'); @@ -149,12 +150,17 @@ describe('Misc Util', () => { describe('.getRuntimeInfo()', () => { beforeEach(() => { sinon.stub(process, 'version').value('v14.5.1'); + sinon.stub(v8, 'getHeapStatistics').callsFake(() => ({ + heap_size_limit: 2000 * 1024 * 1024 + })); }); it('should return runtime info', () => { - const nodeVersion = process.version; - const returnValue = util.getRuntimeInfo().nodeVersion; - assert(returnValue === '14.5.1', 'getRuntimeInfo returns wrong value 1'); - assert(returnValue === nodeVersion.substring(1), 'getRuntimeInfo returns wrong value 2'); + const returnValue = util.getRuntimeInfo(); + assert.deepStrictEqual(returnValue, { + maxHeapSize: 2000, + nodeVersion: '14.5.1' + }); + assert.deepStrictEqual(returnValue.nodeVersion, process.version.slice(1)); }); }); @@ -204,6 +210,21 @@ describe('Misc Util', () => { }); }); + describe('.deepFreeze()', () => { + it('should make deep freeze for an object', () => { + const src = { schedule: { frequency: 'daily', time: { start: '04:20', end: '6:00' } } }; + let copy = util.deepCopy(src); + copy = util.deepFreeze(copy); + assert.deepStrictEqual(copy, src, 'should be equal'); + + // let's check that copy is deeply freezed + assert.throws(() => { + copy.schedule.frequency = 'frequency'; + }); + assert.deepStrictEqual(copy, src, 'should be equal'); + }); + }); + describe('.parseJsonWithDuplicateKeys()', () => { it('should parse JSON string without duplicate keys', () => { const input = JSON.stringify({ diff --git a/test/unit/utils/monitorTests.js b/test/unit/utils/monitorTests.js deleted file mode 100644 index 15dbcf55..00000000 --- a/test/unit/utils/monitorTests.js +++ /dev/null @@ -1,293 +0,0 @@ -/** - * Copyright 2024 F5, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -'use strict'; - -/* eslint-disable import/order */ -const moduleCache = require('../shared/restoreCache')(); - -const sinon = require('sinon'); - -const assert = require('../shared/assert'); -const sourceCode = require('../shared/sourceCode'); -const stubs = require('../shared/stubs'); -const testUtil = require('../shared/util'); - -const APP_THRESHOLDS = sourceCode('src/lib/constants').APP_THRESHOLDS; -const config = sourceCode('src/lib/config'); -const deviceUtil = sourceCode('src/lib/utils/device'); -const logger = sourceCode('src/lib/logger'); -const monitor = sourceCode('src/lib/utils/monitor'); -const timers = sourceCode('src/lib/utils/timers'); - -moduleCache.remember(); - -/** - * TODO: refactor tests: - * - add more complex and reliable tests - */ -describe('Monitor Util', () => { - let disabledEnvVarStub; - let loggerStub; - - before(() => { - moduleCache.restore(); - }); - - beforeEach(() => { - // disabled by default, otherwise test imports can trigger multiple monitor instance starts - // since we're bypassing the singleton in tests using restoreCache - disabledEnvVarStub = sinon.stub(process.env, APP_THRESHOLDS.MONITOR_DISABLED); - disabledEnvVarStub.value(undefined); - loggerStub = stubs.logger(logger); - }); - - afterEach(() => { - monitor.stop(); - sinon.restore(); - }); - - describe('config "on change" event', () => { - const mockConfig1 = { - components: [ - { - class: 'Telemetry_System_Poller', - enable: true - }, - { - class: 'Controls', - namespace: 'f5telemetry_default' - // prior to v1.18 - // memoryThresholdPercent: undefined - } - ] - }; - const mockConfig2 = { - components: [ - { - class: 'Telemetry_System_Poller', - enable: true - }, - { - class: 'Controls', - memoryThresholdPercent: 50, - namespace: 'f5telemetry_default' - } - ] - }; - const mockConfig3 = { - components: [ - { - class: 'Telemetry_System_Poller', - enable: false - }, - { - class: 'Controls', - memoryThresholdPercent: 76, - namespace: 'f5telemetry_default' - } - ] - }; - - it('should enable monitor checks when there are components enabled', () => config.emitAsync('change', mockConfig1) - .then(() => { - assert.instanceOf(monitor.timer, timers.BasicTimer); - assert.strictEqual(monitor.memoryThreshold, 1290); - assert.strictEqual(monitor.timer.intervalInS, 5, 'should set interval to 5 sec'); - assert.isTrue(monitor.timer.isActive(), 'should be active'); - })); - - it('should update monitor checks when there are components enabled', () => config.emitAsync('change', mockConfig1) - .then(() => { - assert.instanceOf(monitor.timer, timers.BasicTimer); - assert.strictEqual(monitor.memoryThreshold, 1290); - assert.strictEqual(monitor.timer.intervalInS, 5, 'should set interval to 5 sec'); - assert.isTrue(monitor.timer.isActive(), 'should be active'); - return config.emitAsync('change', mockConfig2); - }) - .then(() => { - assert.instanceOf(monitor.timer, timers.BasicTimer); - assert.strictEqual(monitor.memoryThreshold, 717); - assert.strictEqual(monitor.timer.intervalInS, 5, 'should set interval to 5 sec'); - assert.isTrue(monitor.timer.isActive(), 'should be active'); - })); - - it('should disable monitor checks when there are no components enabled', () => config.emitAsync('change', mockConfig2) - .then(() => { - assert.instanceOf(monitor.timer, timers.BasicTimer); - assert.strictEqual(monitor.memoryThreshold, 717); - assert.isTrue(monitor.timer.isActive(), 'should start timer'); - return config.emitAsync('change', mockConfig3); - }) - .then(() => { - assert.notExists(monitor.memoryThreshold); - assert.isFalse(monitor.timer.isActive(), 'should stop timer'); - })); - - it('should disable monitor checks when threshold = 100%', () => config.emitAsync('change', mockConfig2) - .then(() => { - assert.instanceOf(monitor.timer, timers.BasicTimer); - assert.isTrue(monitor.timer.isActive(), 'should start timer'); - assert.strictEqual(monitor.memoryThreshold, 717); - - const mockConfigDisable = { - components: [ - { - class: 'Telemetry_System_Poller', - enable: true - }, - { - class: 'Controls', - memoryThresholdPercent: 100, - namespace: 'f5telemetry_default' - } - ] - }; - return config.emitAsync('change', mockConfigDisable); - }) - .then(() => { - assert.isFalse(monitor.timer.isActive(), 'should stop timer'); - assert.notExists(monitor.memoryThreshold); - })); - - it('should keep timer running after checks', () => { - sinon.stub(process, 'memoryUsage').returns({ rss: 987654321 }); - const emitSpy = sinon.spy(monitor, 'emitAsync'); - const fakeClock = stubs.clock(); - return config.emitAsync('change', mockConfig2) - .then(() => { - assert.isTrue(monitor.timer.isActive(), 'should be active'); - fakeClock.clockForward(1000, { promisify: true, repeat: 1000 }); - return testUtil.sleep(1000 * 999); - }) - .then(() => { - assert.isTrue(emitSpy.alwaysCalledWith('check', APP_THRESHOLDS.MEMORY.NOT_OK)); - assert.isTrue(monitor.timer.isActive(), 'should be active'); - }); - }); - - it('should ignore config changes when disabled via env var', () => { - disabledEnvVarStub.value('true'); - return config.emitAsync('change', mockConfig1) - .then(() => { - assert.isFalse(monitor.timer.isActive(), 'should be inactive'); - return config.emitAsync('change', mockConfig1); - }) - .then(() => { - assert.isFalse(monitor.timer.isActive(), 'should be inactive'); - }); - }); - - it('should catch event handler errors', () => monitor.emitAsync('error', new Error('test error')) - .then(() => { - assert.includeMatch( - loggerStub.messages.error, - /An unexpected error occurred in monitor check[\s\S]+test error/gm, - 'should log error message' - ); - })); - - it('should catch config event handler error', () => { - sinon.stub(monitor, 'start').throws(new Error('test error')); - return config.emitAsync('change', mockConfig1) - .then(() => { - assert.includeMatch( - loggerStub.messages.error, - /An error occurred in monitor checks \(config change handler\)[\s\S]+test error/gm, - 'should log error message' - ); - }); - }); - }); - - describe('.checkThresholds', () => { - let emitSpy; - - beforeEach(() => { - sinon.stub(deviceUtil, 'getHostDeviceInfo').returns(1000); - monitor.setLimits(70); - emitSpy = sinon.spy(monitor, 'emitAsync'); - return monitor.start(70); - }); - - afterEach(() => { - assert.isEmpty(loggerStub.messages.error, 'should have no error messages'); - }); - - afterEach(() => { - assert.isEmpty(loggerStub.messages.error, 'should have no error messages'); - }); - - afterEach(() => { - assert.isEmpty(loggerStub.messages.error, 'should have no error messages'); - }); - - it('should emit check event MEMORY_USAGE_HIGH when higher value than threshold', () => { - sinon.stub(process, 'memoryUsage').returns({ rss: 987654321 }); - return monitor.checkThresholds() - .then(() => { - assert.isTrue(emitSpy.calledOnceWith('check', APP_THRESHOLDS.MEMORY.NOT_OK)); - }); - }); - - it('should emit check event MEMORY_USAGE_HIGH when same value as threshold', () => { - sinon.stub(process, 'memoryUsage').returns({ rss: 734003200 }); - return monitor.checkThresholds() - .then(() => { - assert.isTrue(emitSpy.calledOnceWith('check', APP_THRESHOLDS.MEMORY.NOT_OK)); - }); - }); - - it('should emit check event MEMORY_USAGE_OK when below threshold value', () => { - sinon.stub(process, 'memoryUsage').returns({ rss: 2000000 }); - return monitor.checkThresholds() - .then(() => { - assert.isTrue(emitSpy.calledOnceWith('check', APP_THRESHOLDS.MEMORY.OK)); - }); - }); - }); - - describe('auto adjust timer interval based on % memory usage', () => { - let memUsageStub; - - beforeEach(() => { - sinon.stub(monitor, 'memoryLimit').value(1000); - sinon.stub(monitor, 'memoryThresholdPercent').value(80); - sinon.stub(monitor, 'memoryThreshold').value(800); - sinon.stub(monitor, 'emitAsync').resolves(); - memUsageStub = sinon.stub(monitor, 'getProcessMemUsage'); - }); - - const usageIntervals = [ - { sec: 30, memUsagePercent: 24, name: 'should configure <25%' }, - { sec: 15, memUsagePercent: 49, name: 'should configure >=25% and < 50%' }, - { sec: 10, memUsagePercent: 74, name: 'should configure >=50% and < 75%' }, - { sec: 5, memUsagePercent: 89, name: 'should configure >=75% and < 90%' }, - { sec: 3, memUsagePercent: 101, name: 'should configure >=90%' } - ]; - - usageIntervals.forEach((usageTest) => { - testUtil.getCallableIt(usageTest)(usageTest.name, () => { - const memUsageVal = (usageTest.memUsagePercent / 100) * monitor.memoryLimit; - memUsageStub.returns(memUsageVal); - return monitor.checkThresholds() - .then(() => { - assert.strictEqual(monitor.timer.intervalInS, usageTest.sec, `should change interval to ${usageTest.sec} for usage ${memUsageVal} ${usageTest.memUsagePercent}`); - }); - }); - }); - }); -}); diff --git a/test/unit/utils/structures/circularArrayTests.js b/test/unit/utils/structures/circularArrayTests.js index 5fd0ee62..f0c8b529 100644 --- a/test/unit/utils/structures/circularArrayTests.js +++ b/test/unit/utils/structures/circularArrayTests.js @@ -16,576 +16,1375 @@ 'use strict'; -/* eslint-disable import/order */ +/* eslint-disable import/order, no-plusplus, no-nested-ternary */ const moduleCache = require('../../shared/restoreCache')(); const assert = require('../../shared/assert'); const sourceCode = require('../../shared/sourceCode'); const CircularArray = sourceCode('src/lib/utils/structures').CircularArray; +const CircularArrayMR = sourceCode('src/lib/utils/structures').CircularArrayMR; moduleCache.remember(); -describe('Structures / Circular Array', () => { - describe('initialization', () => { - it('should create list with default size equal 1', () => { - const cl = new CircularArray(); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 0, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), []); - }); - - it('should preallocate only 1 element when "size" missing', () => { - const cl = new CircularArray({ prealloc: true }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); - assert.deepStrictEqual(cl.storage(), new Array(1)); - }); - - it('should preallocate and fill only 1 element when "size" missing', () => { - const cl = new CircularArray({ prealloc: true, fill: 0 }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); - assert.deepStrictEqual(cl.storage(), [0]); - }); - - it('should preallocate only 1 element when "size" missing and preallocate > size', () => { - const cl = new CircularArray({ prealloc: 10 }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); - assert.deepStrictEqual(cl.storage(), new Array(1)); - }); - - it('should preallocate amd fill only 1 element when "size" missing and preallocate > size', () => { - const cl = new CircularArray({ prealloc: 10, fill: 0 }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); - assert.deepStrictEqual(cl.storage(), [0]); - }); - - it('should preallocate 0 element when "size" missing and "prealloc" is false', () => { - const cl = new CircularArray({ prealloc: false }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 0, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), []); - }); +describe('Structures / Circular Arrays', () => { + [ + CircularArray, + CircularArrayMR + ].forEach((Cls) => describe(`${Cls.name}`, () => { + describe('initialization', () => { + it('should create list with default size equal 1', () => { + const cl = new Cls(); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [undefined]); + }); - it('should preallocate 1 element when "size" missing and "prealloc" is negative', () => { - [ - -1, - -2, - -Number.MAX_SAFE_INTEGER - ].forEach((prealloc) => { - const cl = new CircularArray({ prealloc }); + it('should preallocate only 1 element when "size" missing', () => { + const cl = new Cls({ prealloc: true }); assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); assert.deepStrictEqual(cl.storage(), new Array(1)); }); - }); - it('should preallocate 0 element when "size" missing and "prealloc" is invalid value', () => { - [ - NaN, - Infinity, - -Infinity, - Number.MAX_VALUE, - -0, - 0, - +0, - 'string' - ].forEach((prealloc) => { - const cl = new CircularArray({ prealloc }); + it('should preallocate and fill only 1 element when "size" missing', () => { + const cl = new Cls({ prealloc: true, fill: 0 }); assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 0, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), []); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [0]); }); - }); - it('should fail on attempt to set incorrect "size"', () => { - assert.throws(() => new CircularArray({ size: 'size' })); - assert.throws(() => new CircularArray({ size: Infinity })); - assert.throws(() => new CircularArray({ size: 0 })); - assert.throws(() => new CircularArray({ size: -1 })); - assert.throws(() => new CircularArray({ size: NaN })); - assert.throws(() => new CircularArray({ size: true })); - assert.throws(() => new CircularArray({ size: Number.MAX_VALUE })); - assert.doesNotThrow(() => new CircularArray({ size: Number.MAX_SAFE_INTEGER })); - }); - - it('should create list with non-default size', () => { - [ - 1, - 2, - Number.MAX_SAFE_INTEGER - ].forEach((size) => { - const cl = new CircularArray({ size }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + it('should preallocate only 1 element when "size" missing and preallocate > size', () => { + const cl = new Cls({ prealloc: 10 }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 0, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), []); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), new Array(1)); }); - }); - it('should preallocate elements with non-default size', () => { - [ - 1, - 2, - 100 - ].forEach((size) => { - const cl = new CircularArray({ size, prealloc: true }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + it('should preallocate amd fill only 1 element when "size" missing and preallocate > size', () => { + const cl = new Cls({ prealloc: 10, fill: 0 }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), new Array(size)); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [0]); }); - }); - it('should preallocate and fill elements with non-default size', () => { - const fill = 0; - [ - 1, - 2, - 100 - ].forEach((size) => { - const cl = new CircularArray({ size, prealloc: true, fill }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + it('should preallocate 1 element when "size" missing and "prealloc" is false', () => { + const cl = new Cls({ prealloc: false }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), (new Array(size)).fill(fill)); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [undefined]); }); - }); - it('should preallocate elements with non-default size and preallocate > size', () => { - [ - 1, - 2, - 100 - ].forEach((size) => { - const cl = new CircularArray({ size, prealloc: size * 2 }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), (new Array(size))); + it('should preallocate 1 element when "size" missing and "prealloc" is negative', () => { + [ + -1, + -2, + -Number.MAX_SAFE_INTEGER + ].forEach((prealloc) => { + const cl = new Cls({ prealloc }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, 1, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.storage(), new Array(1)); + }); + }); + + it('should preallocate 0 element when "size" missing and "prealloc" is invalid value', () => { + [ + NaN, + Infinity, + -Infinity, + Number.MAX_VALUE, + -0, + 0, + +0, + 'string' + ].forEach((prealloc) => { + const cl = new Cls({ prealloc }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [undefined]); + }); + }); + + it('should fail on attempt to set incorrect "size"', () => { + assert.throws(() => new Cls({ size: 'size' })); + assert.throws(() => new Cls({ size: Infinity })); + assert.throws(() => new Cls({ size: 0 })); + assert.throws(() => new Cls({ size: -1 })); + assert.throws(() => new Cls({ size: NaN })); + assert.throws(() => new Cls({ size: true })); + assert.throws(() => new Cls({ size: Number.MAX_VALUE })); + assert.doesNotThrow(() => new Cls({ size: Number.MAX_SAFE_INTEGER })); }); - }); - it('should preallocate and fill elements with non-default size and preallocate > size', () => { - const fill = 0; - [ - 1, - 2, - 100 - ].forEach((size) => { - const cl = new CircularArray({ size, prealloc: size * 2, fill }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + it('should create list with non-default size', () => { + [ + 1, + 2, + Number.MAX_SAFE_INTEGER + ].forEach((size) => { + const cl = new Cls({ size }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size === 1 ? 1 : 0); + assert.deepStrictEqual(cl.storage(), size === 1 ? [undefined] : []); + }); + }); + + it('should preallocate elements with non-default size', () => { + [ + 1, + 2, + 100 + ].forEach((size) => { + const cl = new Cls({ size, prealloc: true }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.storage(), new Array(size)); + }); + }); + + it('should preallocate and fill elements with non-default size', () => { + const fill = 0; + [ + 1, + 2, + 100 + ].forEach((size) => { + const cl = new Cls({ size, prealloc: true, fill }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.storage(), (new Array(size)).fill(fill)); + }); + }); + + it('should preallocate elements with non-default size and preallocate > size', () => { + [ + 1, + 2, + 100 + ].forEach((size) => { + const cl = new Cls({ size, prealloc: size * 2 }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.storage(), (new Array(size))); + }); + }); + + it('should preallocate and fill elements with non-default size and preallocate > size', () => { + const fill = 0; + [ + 1, + 2, + 100 + ].forEach((size) => { + const cl = new Cls({ size, prealloc: size * 2, fill }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); + assert.deepStrictEqual(cl.storage(), (new Array(size)).fill(fill)); + }); + }); + + it('should preallocate amd fill only 1 element when "size" missing and preallocate > size', () => { + const cl = new Cls({ prealloc: 10, fill: 0 }); + assert.deepStrictEqual(cl.size, 1, 'should use default size value'); assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, size, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), (new Array(size)).fill(fill)); + assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); + assert.deepStrictEqual(cl.storage(), [0]); + }); + + it('should create list with non-default size and do not preallocate items', () => { + [ + 1, + 2, + Number.MAX_SAFE_INTEGER + ].forEach((size) => { + const cl = new Cls({ size, prealloc: false }); + assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.allocated, size === 1 ? 1 : 0); + assert.deepStrictEqual(cl.storage(), size === 1 ? [undefined] : []); + }); }); }); - it('should preallocate amd fill only 1 element when "size" missing and preallocate > size', () => { - const cl = new CircularArray({ prealloc: 10, fill: 0 }); - assert.deepStrictEqual(cl.size, 1, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 1, 'should have 1 alloceted items'); - assert.deepStrictEqual(cl.storage(), [0]); + [ + { prealloc: true }, + { prealloc: 6 }, + { prealloc: 4 }, + { prealloc: 1 }, + { prealloc: false }, + { prealloc: true, fill: 0 }, + { prealloc: 6, fill: 0 }, + { prealloc: 4, fill: 0 }, + { prealloc: 1, fill: 0 }, + { prealloc: false, fill: 0 } + ].forEach((opts) => { + describe(`size = 1, opts=${JSON.stringify(opts)}`, () => { + it('should do basic operaionts', () => { + const preallocArray = new Array(1); + if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { + preallocArray.fill(opts.fill); + } + + const cl = new Cls(Object.assign({}, opts)); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.size, 1); + assert.deepStrictEqual(cl.storage(), preallocArray); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.lengthOf(cl, 1); + assert.deepStrictEqual(cl.peak(0), 1); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 1); + + assert.deepStrictEqual(cl.pop(), 1); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.storage(), [opts.fill]); + + assert.deepStrictEqual(cl.pop(), opts.fill); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.deepStrictEqual(cl.push(2), 1); + assert.deepStrictEqual(cl.push(3), 2); + + assert.lengthOf(cl, 1); + assert.deepStrictEqual(cl.peak(0), 3); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.bpeak(), 3); + assert.deepStrictEqual(cl.fpeak(), 3); + assert.deepStrictEqual(cl.storage(), [3]); + + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [3]); + + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), [3]); + + assert.deepStrictEqual(cl.pop(), 3); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + + assert.deepStrictEqual(cl.nextIdx(cl.endIdx), 0); + assert.deepStrictEqual(cl.nextIdx(cl.startIdx), 0); + assert.deepStrictEqual(cl.prevIdx(cl.endIdx), 0); + assert.deepStrictEqual(cl.prevIdx(cl.startIdx), 0); + assert.deepStrictEqual(cl.storage(), [opts.fill]); + + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [opts.fill]); + + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), []); + }); + }); + + describe(`size = 2, opts=${JSON.stringify(opts)}`, () => { + it('should do basic operaionts', () => { + const preallocSize = Math.min(2, (Number.isSafeInteger(opts.prealloc) + ? opts.prealloc + : ((opts.prealloc && 2) || 0))); + + const preallocArray = (new Array(preallocSize)); + if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { + preallocArray.fill(opts.fill); + } + + const cl = new Cls(Object.assign({ size: 2 }, opts)); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.size, 2); + assert.deepStrictEqual( + cl.storage(), + preallocArray + ); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.lengthOf(cl, 1); + assert.deepStrictEqual(cl.peak(0), 1); + assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 1); + + assert.deepStrictEqual(cl.pop(), 1); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual( + cl.storage(), + preallocArray.length ? preallocArray : [opts.fill] + ); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.deepStrictEqual(cl.push(2), opts.fill); + + assert.deepStrictEqual(cl.push(3), 1); + + assert.lengthOf(cl, 2); + assert.deepStrictEqual(cl.peak(0), 3); + assert.deepStrictEqual(cl.allocated, 2); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 1); + assert.deepStrictEqual(cl.bpeak(), 2); + assert.deepStrictEqual(cl.fpeak(), 3); + assert.deepStrictEqual(cl.storage(), [3, 2]); + + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [3, 2]); + + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), [3, 2]); + + assert.deepStrictEqual(cl.pop(), 2); + + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [3, opts.fill]); + + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), [3]); + + assert.deepStrictEqual(cl.pop(), 3); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + + assert.deepStrictEqual(cl.nextIdx(cl.endIdx), 1); + assert.deepStrictEqual(cl.nextIdx(cl.startIdx), 1); + assert.deepStrictEqual(cl.prevIdx(cl.endIdx), 1); + assert.deepStrictEqual(cl.prevIdx(cl.startIdx), 1); + assert.deepStrictEqual(cl.storage(), [opts.fill]); + + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [opts.fill]); + + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), []); + }); + }); + + describe(`size = 6, opts=${JSON.stringify(opts)}`, () => { + it('should do basic operaionts', () => { + const preallocSize = Math.min(6, (Number.isSafeInteger(opts.prealloc) + ? opts.prealloc + : ((opts.prealloc && 6) || 0))); + + const preallocArray = (new Array(preallocSize)); + if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { + preallocArray.fill(opts.fill); + } + + const cl = new Cls(Object.assign({ size: 6 }, opts)); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.size, 6); + assert.deepStrictEqual( + cl.storage(), + preallocArray + ); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.lengthOf(cl, 1); + assert.deepStrictEqual(cl.peak(0), 1); + assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 1); + + assert.deepStrictEqual(cl.pop(), 1); + assert.lengthOf(cl, 0); + assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual( + cl.storage(), + preallocArray.length ? preallocArray : [opts.fill] + ); + + assert.deepStrictEqual(cl.push(1), opts.fill); + assert.deepStrictEqual(cl.push(2), opts.fill); + assert.deepStrictEqual(cl.push(3), opts.fill); + + assert.lengthOf(cl, 3); + assert.deepStrictEqual(cl.allocated, Math.max(preallocArray.length, 3)); + assert.deepStrictEqual(cl.endIdx, 2); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 3); + + assert.deepStrictEqual(cl.push(4), opts.fill); + assert.deepStrictEqual(cl.push(5), opts.fill); + assert.deepStrictEqual(cl.push(6), opts.fill); + assert.deepStrictEqual(cl.push(7), 1); + assert.deepStrictEqual(cl.pop(), 2); + assert.deepStrictEqual(cl.pop(), 3); + assert.deepStrictEqual(cl.pop(), 4); + assert.deepStrictEqual(cl.push(8), opts.fill); + assert.deepStrictEqual(cl.push(9), opts.fill); + assert.deepStrictEqual(cl.push(10), opts.fill); + assert.deepStrictEqual(cl.push(11), 5); + assert.deepStrictEqual(cl.push(12), 6); + assert.deepStrictEqual(cl.push(13), 7); + assert.deepStrictEqual(cl.push(14), 8); + assert.deepStrictEqual(cl.peak(0), 13); + + assert.deepStrictEqual(cl.pop(), 9); + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [10, 11, 12, 13, 14, opts.fill]); + + assert.deepStrictEqual(cl.pop(), 10); + assert.deepStrictEqual(cl.pop(), 11); + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [12, 13, 14, opts.fill, opts.fill, opts.fill]); + + assert.deepStrictEqual(cl.pop(), 12); + cl.rebase(true); + assert.deepStrictEqual(cl.storage(), [13, 14]); + + assert.deepStrictEqual(cl.pop(), 13); + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [14, opts.fill]); + }); + }); }); - it('should create list with non-default size and do not preallocate items', () => { - [ - 1, - 2, - Number.MAX_SAFE_INTEGER - ].forEach((size) => { - const cl = new CircularArray({ size, prealloc: false }); - assert.deepStrictEqual(cl.size, size, 'should use non-default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.allocated, 0, 'should have 0 alloceted items'); - assert.deepStrictEqual(cl.storage(), []); + describe('.rebase()', () => { + it('should shift array', () => { + const cl = new Cls({ size: 4 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + cl.pop(); + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [2, 3, 4, undefined]); + }); + + it('should rotate array', () => { + const cl = new Cls({ size: 4 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + cl.pop(); + cl.pop(); + cl.pop(); + cl.push(5); + cl.rebase(); + assert.deepStrictEqual(cl.storage(), [4, 5, undefined, undefined]); }); }); - }); - [ - { prealloc: true }, - { prealloc: 6 }, - { prealloc: 4 }, - { prealloc: 1 }, - { prealloc: false }, - { prealloc: true, fill: 0 }, - { prealloc: 6, fill: 0 }, - { prealloc: 4, fill: 0 }, - { prealloc: 1, fill: 0 }, - { prealloc: false, fill: 0 } - ].forEach((opts) => { - describe(`size = 1, opts=${JSON.stringify(opts)}`, () => { - it('should do basic operaionts', () => { - const preallocSize = Math.min(1, (Number.isSafeInteger(opts.prealloc) - ? opts.prealloc - : ((opts.prealloc && 1) || 0))); - - const preallocArray = (new Array(preallocSize)); - if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { - preallocArray.fill(opts.fill); - } + describe('.erase()', () => { + it('should re-use size if not defined', () => { + const cl = new Cls({ size: 4 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); - const cl = new CircularArray(Object.assign({}, opts)); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.size, 1); - assert.deepStrictEqual(cl.storage(), preallocArray); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.lengthOf(cl, 1); - assert.deepStrictEqual(cl.peak(0), 1); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 1); - - assert.deepStrictEqual(cl.pop(), 1); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.storage(), [opts.fill]); - - assert.deepStrictEqual(cl.pop(), opts.fill); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.deepStrictEqual(cl.push(2), 1); - assert.deepStrictEqual(cl.push(3), 2); - - assert.lengthOf(cl, 1); - assert.deepStrictEqual(cl.peak(0), 3); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.bpeak(), 3); - assert.deepStrictEqual(cl.fpeak(), 3); - assert.deepStrictEqual(cl.storage(), [3]); + cl.erase(); + assert.deepStrictEqual(cl.size, 4); + }); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [3]); + it('should re-use size if not defined', () => { + const cl = new Cls({ size: 4 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), [3]); + cl.erase({ prealloc: true, fill: 0 }); + assert.deepStrictEqual(cl.size, 4); + assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + }); - assert.deepStrictEqual(cl.pop(), 3); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); + it('should re-use "fill" if not defined', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); - assert.deepStrictEqual(cl.nextIdx(cl.endIdx), 0); - assert.deepStrictEqual(cl.nextIdx(cl.startIdx), 0); - assert.deepStrictEqual(cl.prevIdx(cl.endIdx), 0); - assert.deepStrictEqual(cl.prevIdx(cl.startIdx), 0); - assert.deepStrictEqual(cl.storage(), [opts.fill]); + cl.erase({ prealloc: true }); + assert.deepStrictEqual(cl.size, 4); + assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + }); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [opts.fill]); + it('should not re-use "fill" if not defined at init', () => { + const cl = new Cls({ size: 4 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), []); + cl.erase({ prealloc: true }); + assert.deepStrictEqual(cl.size, 4); + assert.deepStrictEqual(cl.storage(), [1, 2, 3, 4], 'should not overide prev values when "fill" not defined'); }); }); - describe(`size = 2, opts=${JSON.stringify(opts)}`, () => { - it('should do basic operaionts', () => { - const preallocSize = Math.min(2, (Number.isSafeInteger(opts.prealloc) - ? opts.prealloc - : ((opts.prealloc && 2) || 0))); + describe('.fastErase()', () => { + it('should erases all elems (case 1)', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), []); - const preallocArray = (new Array(preallocSize)); - if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { - preallocArray.fill(opts.fill); - } + cl.push(1); cl.push(2); cl.push(3); cl.push(4); - const cl = new CircularArray(Object.assign({ size: 2 }, opts)); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.size, 2); - assert.deepStrictEqual( - cl.storage(), - preallocArray - ); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.lengthOf(cl, 1); - assert.deepStrictEqual(cl.peak(0), 1); - assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 1); - - assert.deepStrictEqual(cl.pop(), 1); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual( - cl.storage(), - preallocArray.length ? preallocArray : [opts.fill] - ); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.deepStrictEqual(cl.push(2), opts.fill); - - assert.deepStrictEqual(cl.push(3), 1); - - assert.lengthOf(cl, 2); - assert.deepStrictEqual(cl.peak(0), 3); - assert.deepStrictEqual(cl.allocated, 2); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 1); - assert.deepStrictEqual(cl.bpeak(), 2); - assert.deepStrictEqual(cl.fpeak(), 3); - assert.deepStrictEqual(cl.storage(), [3, 2]); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + }); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [3, 2]); + it('should erases all elems (case 2)', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), [3, 2]); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + }); - assert.deepStrictEqual(cl.pop(), 2); + it('should erases all elems (case 3)', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [3, opts.fill]); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + }); - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), [3]); + it('should erase all elems (size of 1)', () => { + const cl = new Cls({ size: 1, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); - assert.deepStrictEqual(cl.pop(), 3); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [0]); + }); - assert.deepStrictEqual(cl.nextIdx(cl.endIdx), 1); - assert.deepStrictEqual(cl.nextIdx(cl.startIdx), 1); - assert.deepStrictEqual(cl.prevIdx(cl.endIdx), 1); - assert.deepStrictEqual(cl.prevIdx(cl.startIdx), 1); - assert.deepStrictEqual(cl.storage(), [opts.fill]); + it('should erases non-empty elems only', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); + cl.pop(); cl.pop(); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [opts.fill]); + cl._storage[1] = 10; cl._storage[2] = 11; - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), []); + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [0, 10, 11, 0]); + }); + + it('should erases non-empty elems only (size > 1, length = 1)', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); + cl.pop(); cl.pop(); cl.pop(); + + cl._storage[0] = 10; cl._storage[2] = 11; cl._storage[3] = 12; + + cl.fastErase(); + assert.deepStrictEqual(cl.storage(), [10, 0, 11, 12]); + }); + + it('should reset indexes but keep data', () => { + const cl = new Cls({ size: 4, fill: 0 }); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); + + cl.fastErase(false); + + assert.deepStrictEqual(cl.content(0, 10), [5, 6, 3, 4]); + }); + }); + + describe('.content()', () => { + it('should return correct content for size = 1', () => { + const cl = new Cls({ size: 1, fill: 0, prealloc: true }); + assert.deepStrictEqual(cl.content(), [0]); + assert.deepStrictEqual(cl.content(1, 1), []); + assert.deepStrictEqual(cl.content(0, 0), [0]); + assert.deepStrictEqual(cl.content(0, 1), [0]); + assert.deepStrictEqual(cl.content(0), [0]); + + cl.push(1); + assert.deepStrictEqual(cl.content(), [1]); + assert.deepStrictEqual(cl.content(1, 1), []); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1]); + assert.deepStrictEqual(cl.content(0), [1]); + }); + + it('should return correct content for size = 2', () => { + const cl = new Cls({ size: 2, fill: 0, prealloc: true }); + assert.deepStrictEqual(cl.content(), [0]); + assert.deepStrictEqual(cl.content(1, 1), [0]); + assert.deepStrictEqual(cl.content(0, 0), [0]); + assert.deepStrictEqual(cl.content(0, 1), [0, 0]); + assert.deepStrictEqual(cl.content(0), [0]); + + cl.push(1); + assert.deepStrictEqual(cl.content(), [1]); + assert.deepStrictEqual(cl.content(1, 1), [0]); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1, 0]); + assert.deepStrictEqual(cl.content(0), [1]); + + cl.push(2); + assert.deepStrictEqual(cl.content(), [1, 2]); + assert.deepStrictEqual(cl.content(1, 1), [2]); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1, 2]); + assert.deepStrictEqual(cl.content(0, 2), [1, 2]); + assert.deepStrictEqual(cl.content(1, 2), [2]); + assert.deepStrictEqual(cl.content(2, 2), []); + assert.deepStrictEqual(cl.content(2, 0), [1]); + assert.deepStrictEqual(cl.content(1, 0), [2, 1]); + assert.deepStrictEqual(cl.content(0), [1, 2]); + + cl.push(3); + assert.deepStrictEqual(cl.content(), [2, 3]); + assert.deepStrictEqual(cl.content(1, 1), [2]); + assert.deepStrictEqual(cl.content(0, 0), [3]); + assert.deepStrictEqual(cl.content(0, 1), [3, 2]); + assert.deepStrictEqual(cl.content(0, 2), [3, 2]); + assert.deepStrictEqual(cl.content(1, 2), [2]); + assert.deepStrictEqual(cl.content(2, 2), []); + assert.deepStrictEqual(cl.content(1, 0), [2, 3]); + assert.deepStrictEqual(cl.content(0), [3]); + assert.deepStrictEqual(cl.content(1), [2, 3]); + + cl.push(4); + assert.deepStrictEqual(cl.content(), [3, 4]); + assert.deepStrictEqual(cl.content(1, 1), [4]); + assert.deepStrictEqual(cl.content(0, 0), [3]); + assert.deepStrictEqual(cl.content(0, 1), [3, 4]); + assert.deepStrictEqual(cl.content(0, 2), [3, 4]); + assert.deepStrictEqual(cl.content(1, 2), [4]); + assert.deepStrictEqual(cl.content(2, 2), []); + assert.deepStrictEqual(cl.content(1, 0), [4, 3]); + assert.deepStrictEqual(cl.content(0), [3, 4]); + assert.deepStrictEqual(cl.content(1), [4]); + }); + + it('should return correct content for size = 3', () => { + const cl = new Cls({ size: 3, fill: 0, prealloc: true }); + assert.deepStrictEqual(cl.content(), [0]); + assert.deepStrictEqual(cl.content(1, 1), [0]); + assert.deepStrictEqual(cl.content(0, 0), [0]); + assert.deepStrictEqual(cl.content(0, 1), [0, 0]); + assert.deepStrictEqual(cl.content(0), [0]); + + cl.push(1); + assert.deepStrictEqual(cl.content(), [1]); + assert.deepStrictEqual(cl.content(1, 1), [0]); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1, 0]); + assert.deepStrictEqual(cl.content(0), [1]); + + cl.push(2); + assert.deepStrictEqual(cl.content(), [1, 2]); + assert.deepStrictEqual(cl.content(1, 1), [2]); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1, 2]); + assert.deepStrictEqual(cl.content(0, 2), [1, 2, 0]); + assert.deepStrictEqual(cl.content(1, 2), [2, 0]); + assert.deepStrictEqual(cl.content(2, 2), [0]); + assert.deepStrictEqual(cl.content(2, 0), [0, 1]); + assert.deepStrictEqual(cl.content(1, 0), [2, 0, 1]); + assert.deepStrictEqual(cl.content(0), [1, 2]); + + cl.push(3); + assert.deepStrictEqual(cl.content(), [1, 2, 3]); + assert.deepStrictEqual(cl.content(1, 1), [2]); + assert.deepStrictEqual(cl.content(0, 0), [1]); + assert.deepStrictEqual(cl.content(0, 1), [1, 2]); + assert.deepStrictEqual(cl.content(0, 2), [1, 2, 3]); + assert.deepStrictEqual(cl.content(1, 2), [2, 3]); + assert.deepStrictEqual(cl.content(2, 2), [3]); + assert.deepStrictEqual(cl.content(1, 0), [2, 3, 1]); + assert.deepStrictEqual(cl.content(0), [1, 2, 3]); + assert.deepStrictEqual(cl.content(1), [2, 3]); + + cl.push(4); + assert.deepStrictEqual(cl.content(), [2, 3, 4]); + assert.deepStrictEqual(cl.content(1, 1), [2]); + assert.deepStrictEqual(cl.content(0, 0), [4]); + assert.deepStrictEqual(cl.content(0, 1), [4, 2]); + assert.deepStrictEqual(cl.content(0, 2), [4, 2, 3]); + assert.deepStrictEqual(cl.content(1, 2), [2, 3]); + assert.deepStrictEqual(cl.content(2, 2), [3]); + assert.deepStrictEqual(cl.content(1, 0), [2, 3, 4]); + assert.deepStrictEqual(cl.content(0), [4]); + assert.deepStrictEqual(cl.content(1), [2, 3, 4]); }); }); - describe(`size = 6, opts=${JSON.stringify(opts)}`, () => { - it('should do basic operaionts', () => { - const preallocSize = Math.min(6, (Number.isSafeInteger(opts.prealloc) - ? opts.prealloc - : ((opts.prealloc && 6) || 0))); + describe('.nextIdx()/.prevIdx()', () => { + it('should return correct index for size = 1', () => { + const cl = new Cls({ size: 1, fill: 0, prealloc: true }); + cl.push(1); - const preallocArray = (new Array(preallocSize)); - if (Object.prototype.hasOwnProperty.call(opts, 'fill')) { - preallocArray.fill(opts.fill); + for (let i = 0; i < 10; i += 1) { + assert.deepStrictEqual(cl.nextIdx(i), 0); + assert.deepStrictEqual(cl.prevIdx(i), 0); } + }); - const cl = new CircularArray(Object.assign({ size: 6 }, opts)); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.size, 6); - assert.deepStrictEqual( - cl.storage(), - preallocArray - ); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.lengthOf(cl, 1); - assert.deepStrictEqual(cl.peak(0), 1); - assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 1); - - assert.deepStrictEqual(cl.pop(), 1); - assert.lengthOf(cl, 0); - assert.deepStrictEqual(cl.allocated, preallocArray.length || 1); - assert.deepStrictEqual(cl.endIdx, 0); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual( - cl.storage(), - preallocArray.length ? preallocArray : [opts.fill] - ); - - assert.deepStrictEqual(cl.push(1), opts.fill); - assert.deepStrictEqual(cl.push(2), opts.fill); - assert.deepStrictEqual(cl.push(3), opts.fill); - - assert.lengthOf(cl, 3); - assert.deepStrictEqual(cl.allocated, Math.max(preallocArray.length, 3)); - assert.deepStrictEqual(cl.endIdx, 2); - assert.deepStrictEqual(cl.startIdx, 0); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 3); - - assert.deepStrictEqual(cl.push(4), opts.fill); - assert.deepStrictEqual(cl.push(5), opts.fill); - assert.deepStrictEqual(cl.push(6), opts.fill); - assert.deepStrictEqual(cl.push(7), 1); - assert.deepStrictEqual(cl.pop(), 2); - assert.deepStrictEqual(cl.pop(), 3); - assert.deepStrictEqual(cl.pop(), 4); - assert.deepStrictEqual(cl.push(8), opts.fill); - assert.deepStrictEqual(cl.push(9), opts.fill); - assert.deepStrictEqual(cl.push(10), opts.fill); - assert.deepStrictEqual(cl.push(11), 5); - assert.deepStrictEqual(cl.push(12), 6); - assert.deepStrictEqual(cl.push(13), 7); - assert.deepStrictEqual(cl.push(14), 8); - assert.deepStrictEqual(cl.peak(0), 13); - - assert.deepStrictEqual(cl.pop(), 9); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [10, 11, 12, 13, 14, opts.fill]); + it('should return correct index for size = 2', () => { + const cl = new Cls({ size: 2, fill: 0, prealloc: true }); + cl.push(1); + cl.push(2); - assert.deepStrictEqual(cl.pop(), 10); - assert.deepStrictEqual(cl.pop(), 11); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [12, 13, 14, opts.fill, opts.fill, opts.fill]); + for (let i = 0; i < 10; i += 1) { + assert.deepStrictEqual(cl.nextIdx(i), (i >= cl._size ? 0 : (i % 2) ? 0 : 1)); + assert.deepStrictEqual(cl.prevIdx(i), (i >= cl._size ? (cl._size - 1) : (i % 2) ? 0 : 1)); + } + }); - assert.deepStrictEqual(cl.pop(), 12); - cl.rebase(true); - assert.deepStrictEqual(cl.storage(), [13, 14]); + it('should return correct index for size = 3', () => { + const cl = new Cls({ size: 3, fill: 0, prealloc: true }); + cl.push(1); + cl.push(2); + cl.push(3); - assert.deepStrictEqual(cl.pop(), 13); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [14, opts.fill]); + for (let i = 0; i < 10; i += 1) { + assert.deepStrictEqual(cl.nextIdx(i), (i >= cl._size ? 0 : ((i + 1) % 3))); + } + assert.deepStrictEqual(cl.prevIdx(0), 2); + assert.deepStrictEqual(cl.prevIdx(1), 0); + assert.deepStrictEqual(cl.prevIdx(2), 1); + assert.deepStrictEqual(cl.prevIdx(3), 2); + assert.deepStrictEqual(cl.prevIdx(4), 2); + assert.deepStrictEqual(cl.prevIdx(5), 2); }); }); - }); + })); - describe('.rebase()', () => { - it('should shift array', () => { - const cl = new CircularArray({ size: 4 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); - cl.pop(); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [2, 3, 4, undefined]); + describe('CircularArrayMR', () => { + it('should create and destroy readers', () => { + const cl = new CircularArrayMR(); + assert.deepStrictEqual(cl.readers, 0, 'should have no readers after initialization'); + + const r1 = cl.reader(); + assert.deepStrictEqual(cl.readers, 1); + + const r2 = cl.reader(); + assert.deepStrictEqual(cl.readers, 2); + + r2.destroy(); + assert.deepStrictEqual(cl.readers, 1); + + r1.destroy(); + assert.deepStrictEqual(cl.readers, 0); }); - it('should rotate array', () => { - const cl = new CircularArray({ size: 4 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); - cl.pop(); - cl.pop(); - cl.pop(); - cl.push(5); - cl.rebase(); - assert.deepStrictEqual(cl.storage(), [4, 5, undefined, undefined]); + it('should keep readers after calling .erase()', () => { + const cl = new CircularArrayMR({ size: 10 }); + const r1 = cl.reader(); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + cl.erase({ keepReaders: true }); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + + cl.push(1); cl.push(2); cl.push(3); + + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + const r2 = cl.reader(); + assert.lengthOf(r2, 2); + assert.deepStrictEqual(r2.pop(), 2); + assert.lengthOf(r2, 1); + + cl.erase({ keepReaders: true }); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + + cl.push(1); cl.push(2); cl.push(3); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + assert.lengthOf(r2, 3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r2.pop(), 1); + assert.lengthOf(r2, 2); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); }); - }); - describe('.erase()', () => { - it('should re-use size if not defined', () => { - const cl = new CircularArray({ size: 4 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); + it('should destroy readers after calling .erase()', () => { + const cl = new CircularArrayMR({ size: 10 }); + let r1 = cl.reader(); + + cl.push(1); cl.push(2); cl.push(3); + + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); cl.erase(); - assert.deepStrictEqual(cl.size, 4); + assert.lengthOf(cl, 0); + assert.throws(() => r1.length, /length/); + + cl.push(1); cl.push(2); cl.push(3); + + assert.throws(() => r1.pop()); + + r1 = cl.reader(); + const r2 = cl.reader(); + assert.lengthOf(r2, 3); + assert.deepStrictEqual(r2.pop(), 1); + assert.lengthOf(r2, 2); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + cl.erase({ keepReaders: false }); + cl.push(1); cl.push(2); cl.push(3); + + assert.throws(() => r1.length, /length/); + assert.throws(() => r1.pop()); + assert.throws(() => r2.length, /length/); + assert.throws(() => r2.pop()); }); - it('should re-use size if not defined', () => { - const cl = new CircularArray({ size: 4 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); + it('should reset readers position after calling .fastErase()', () => { + const cl = new CircularArrayMR({ size: 10 }); + let r1 = cl.reader(); - cl.erase({ prealloc: true, fill: 0 }); - assert.deepStrictEqual(cl.size, 4); - assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + cl.fastErase(); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + + cl.push(1); cl.push(2); cl.push(3); + + r1 = cl.reader(); + const r2 = cl.reader(); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + assert.lengthOf(r2, 3); + assert.deepStrictEqual(r2.pop(), 1); + assert.lengthOf(r2, 2); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + cl.fastErase(); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); }); - it('should re-use "fill" if not defined', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); + it('should re-sync readers after calling array.pop()', () => { + const cl = new CircularArrayMR({ size: 10 }); + const r1 = cl.reader(); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + assert.deepStrictEqual(cl.pop(), 2); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + const r2 = cl.reader(); + cl.push(4); cl.push(5); cl.push(6); + + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + + assert.deepStrictEqual(cl.pop(), 3); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); - cl.erase({ prealloc: true }); - assert.deepStrictEqual(cl.size, 4); - assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + assert.deepStrictEqual(r1.pop(), 4); + assert.deepStrictEqual(r2.pop(), 4); }); - it('should not re-use "fill" if not defined at init', () => { - const cl = new CircularArray({ size: 4 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); + it('should re-sync readers after calling array.push()', () => { + const cl = new CircularArrayMR({ size: 3 }); + const r1 = cl.reader(); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(r1, 2); + + cl.push(4); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(cl.startIdx, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); - cl.erase({ prealloc: true }); - assert.deepStrictEqual(cl.size, 4); - assert.deepStrictEqual(cl.storage(), [1, 2, 3, 4], 'should not overide prev values when "fill" not defined'); + cl.push(5); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(cl.startIdx, 2); + assert.deepStrictEqual(cl.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + const r2 = cl.reader(); + assert.lengthOf(r2, 3); + assert.deepStrictEqual(r2.pop(), 3); + assert.deepStrictEqual(r2.startIdx, 0); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + cl.push(6); + assert.lengthOf(r2, 3); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(r2.startIdx, 0); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(cl.endIdx, 2); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + cl.push(7); + assert.lengthOf(r2, 3); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.deepStrictEqual(cl.startIdx, 1); + assert.deepStrictEqual(cl.endIdx, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); }); - }); - describe('.fastErase()', () => { - it('should erases all elems (case 1)', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); + it('should re-sync readers after calling .rebase()', () => { + const cl = new CircularArrayMR({ size: 4 }); + const r1 = cl.reader(); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + r1.pop(); r1.pop(); + assert.lengthOf(cl, 2); + assert.lengthOf(r1, 2); + assert.deepStrictEqual(r1.startIdx, 2); + assert.deepStrictEqual(r1.endIdx, 3); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + cl.rebase(); + assert.lengthOf(cl, 2); + assert.lengthOf(r1, 2); + assert.deepStrictEqual(r1.startIdx, 0); + assert.deepStrictEqual(r1.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + const r2 = cl.reader(); + cl.push(5); cl.push(6); cl.push(7); cl.push(8); + assert.deepStrictEqual(r1.startIdx, 2); + assert.deepStrictEqual(r1.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + + r1.pop(); r2.pop(); r2.pop(); + assert.deepStrictEqual(r1.startIdx, 3); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, 0); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.lengthOf(r2, 2); + + cl.rebase(true); + assert.lengthOf(cl, 3); + assert.lengthOf(r1, 3); + assert.lengthOf(r2, 2); + assert.deepStrictEqual(r1.startIdx, 0); + assert.deepStrictEqual(r1.endIdx, 2); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, 1); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); }); - it('should erases all elems (case 2)', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); + it('should remove extra nodes on reader.destroy()', () => { + const cl = new CircularArrayMR({ size: 4 }); + let r1 = cl.reader(); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + r1.pop(); r1.pop(); + assert.lengthOf(cl, 2); + assert.lengthOf(r1, 2); + assert.deepStrictEqual(r1.startIdx, 2); + assert.deepStrictEqual(r1.endIdx, 3); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + r1.destroy(); + assert.throws(() => r1.length); + + assert.lengthOf(cl, 2); + assert.deepStrictEqual(cl.startIdx, 2); + assert.deepStrictEqual(cl.endIdx, 3); + + r1 = cl.reader(); + let r2 = cl.reader(); + cl.push(5); cl.push(6); + assert.deepStrictEqual(r1.startIdx, 2); + assert.deepStrictEqual(r1.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + + r2.pop(); r2.pop(); r2.pop(); + assert.deepStrictEqual(r2.startIdx, 1); + assert.deepStrictEqual(r2.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, 2); + assert.deepStrictEqual(r1.endIdx, 1); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.lengthOf(r2, 1); + + r1.destroy(); + + assert.lengthOf(cl, 1); + assert.lengthOf(r2, 1); + assert.deepStrictEqual(r2.startIdx, 1); + assert.deepStrictEqual(r2.endIdx, 1); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + + cl.push(7); cl.push(8); cl.push(9); + r1 = cl.reader(); + + assert.deepStrictEqual(r2.startIdx, 1); + assert.deepStrictEqual(r2.endIdx, 0); + assert.deepStrictEqual(r2.startIdx, cl.startIdx); + assert.deepStrictEqual(r2.endIdx, cl.endIdx); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + + r2.pop(); r2.pop(); r2.pop(); + assert.deepStrictEqual(r2.startIdx, 0); + assert.deepStrictEqual(r2.endIdx, 0); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.lengthOf(r2, 1); + + r2.destroy(); + assert.throws(() => r2.length); + assert.deepStrictEqual(r1.startIdx, cl.startIdx); + assert.deepStrictEqual(r1.endIdx, cl.endIdx); + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + + r2 = cl.reader(); + assert.lengthOf(r2, 4); + + assert.deepStrictEqual(cl.startIdx, 1); + assert.deepStrictEqual(r2.startIdx, 1); + assert.deepStrictEqual(r1.startIdx, 1); + + r2.pop(); r2.pop(); r2.pop(); r2.pop(); + assert.lengthOf(r2, 0); + r1.destroy(); + assert.lengthOf(r2, 0); + assert.lengthOf(cl, 0); + // should reset all indexes - all data removed from the list + assert.deepStrictEqual(cl.startIdx, 0); + assert.deepStrictEqual(r2.startIdx, 0); + + r1 = cl.reader(); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + assert.lengthOf(cl, 0); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(r1, 3); + assert.lengthOf(r2, 3); + assert.lengthOf(cl, 3); + + r2.pop(); r2.pop(); r2.pop(); + r1.pop(); + assert.lengthOf(r1, 2); + assert.lengthOf(r2, 0); + assert.lengthOf(cl, 2); + + cl.push(4); cl.push(5); + assert.lengthOf(r1, 4); + assert.lengthOf(r2, 2); + assert.lengthOf(cl, 4); + + r1.destroy(); + assert.throws(() => r1.length); + assert.lengthOf(r2, 2); + assert.lengthOf(cl, 2); }); - it('should erases all elems (case 3)', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); + it('reader.needCopy()', () => { + const cl = new CircularArrayMR({ size: 4 }); + const r1 = cl.reader(); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [0, 0, 0, 0]); - }); + while (r1.length) { + assert.isFalse(r1.needCopy()); + r1.pop(); + } - it('should erase all elems (size of 1)', () => { - const cl = new CircularArray({ size: 1, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + const r2 = cl.reader(); + + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.lengthOf(r2, 4); + + while (r1.length) { + assert.isTrue(r1.needCopy()); + r1.pop(); + } + while (r2.length) { + assert.isFalse(r2.needCopy()); + assert.lengthOf(r1, 0); + r2.pop(); + assert.lengthOf(r1, 0); + } + + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [0]); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + assert.lengthOf(cl, 4); + assert.lengthOf(r1, 4); + assert.lengthOf(r2, 4); + + assert.isTrue(r1.needCopy()); + assert.isTrue(r2.needCopy()); + + r1.pop(); r1.pop(); + assert.isTrue(r1.needCopy()); + + r2.destroy(); + assert.throws(() => r2.length); + assert.isFalse(r1.needCopy()); }); - it('should erases non-empty elems only', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); - cl.pop(); cl.pop(); + it('should update reader when new data pushed/poped', () => { + const cl = new CircularArrayMR({ size: 4 }); + const r1 = cl.reader(); - cl._storage[1] = 10; cl._storage[2] = 11; + assert.lengthOf(r1, 0); + cl.push(1); + assert.lengthOf(r1, 1); + r1.pop(); + assert.lengthOf(r1, 0); + assert.lengthOf(cl, 0); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [0, 10, 11, 0]); + cl.push(2); + assert.lengthOf(r1, 1); + assert.lengthOf(cl, 1); + + const r2 = cl.reader(); + assert.lengthOf(r2, 1); + + cl.push(3); cl.push(4); + + assert.lengthOf(r2, 3); + assert.lengthOf(r1, 3); + assert.lengthOf(cl, 3); + + r2.pop(); r2.pop(); r2.pop(); + + assert.lengthOf(r2, 0); + assert.lengthOf(r1, 3); + assert.lengthOf(cl, 3); + + r1.pop(); r1.pop(); + assert.lengthOf(r2, 0); + assert.lengthOf(r1, 1); + assert.lengthOf(cl, 1); + + cl.push(5); cl.push(6); cl.push(7); + + assert.lengthOf(r2, 3); + assert.lengthOf(r1, 4); + assert.lengthOf(cl, 4); + + r1.pop(); + assert.lengthOf(r2, 3); + assert.lengthOf(r1, 3); + assert.lengthOf(cl, 3); + + r1.pop(); + assert.lengthOf(r2, 2); + assert.lengthOf(r1, 2); + assert.lengthOf(cl, 2); + + r1.pop(); + assert.lengthOf(r2, 2); + assert.lengthOf(r1, 1); + assert.lengthOf(cl, 2); + + r1.pop(); + assert.lengthOf(r2, 2); + assert.lengthOf(r1, 0); + assert.lengthOf(cl, 2); + + r2.pop(); + assert.lengthOf(r2, 1); + assert.lengthOf(r1, 0); + assert.lengthOf(cl, 1); + + r2.pop(); + assert.lengthOf(r2, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(cl, 0); }); - it('should erases non-empty elems only (size > 1, length = 1)', () => { - const cl = new CircularArray({ size: 4, fill: 0 }); - cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.push(5); cl.push(6); - cl.pop(); cl.pop(); cl.pop(); + it('edge case: size = 1', () => { + const cl = new CircularArrayMR({ size: 1 }); + cl.push(1); + const r1 = cl.reader(); + + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + + cl.push(1); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + cl.push(2); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.deepStrictEqual(r1.pop(), 2); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + + const r2 = cl.reader(); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + + cl.push(3); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 1); + + cl.pop(); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); - cl._storage[0] = 10; cl._storage[2] = 11; cl._storage[3] = 12; + cl.push(4); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 1); - cl.fastErase(); - assert.deepStrictEqual(cl.storage(), [10, 0, 11, 12]); + assert.deepStrictEqual(r2.pop(), 4); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 0); + + assert.deepStrictEqual(r1.pop(), 4); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + + cl.push(5); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 1); + + assert.deepStrictEqual(r2.pop(), 5); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 0); + + cl.pop(); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); + + cl.push(6); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 1); + + assert.deepStrictEqual(r2.pop(), 6); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 0); + + cl.push(7); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 1); + + assert.deepStrictEqual(r2.pop(), 7); + assert.lengthOf(cl, 1); + assert.lengthOf(r1, 1); + assert.lengthOf(r2, 0); + + assert.deepStrictEqual(r1.pop(), 7); + assert.lengthOf(cl, 0); + assert.lengthOf(r1, 0); + assert.lengthOf(r2, 0); }); }); }); diff --git a/test/unit/utils/structures/circularLinkedListTests.js b/test/unit/utils/structures/circularLinkedListTests.js index 6ccb2fcb..df62dcfc 100644 --- a/test/unit/utils/structures/circularLinkedListTests.js +++ b/test/unit/utils/structures/circularLinkedListTests.js @@ -23,408 +23,1051 @@ const assert = require('../../shared/assert'); const sourceCode = require('../../shared/sourceCode'); const CircularLinkedList = sourceCode('src/lib/utils/structures').CircularLinkedList; +const CircularLinkedListMR = sourceCode('src/lib/utils/structures').CircularLinkedListMR; moduleCache.remember(); describe('Structures / Circular Linked List', () => { - describe('initialization', () => { - it('should create list with default size equal Number.MAX_SAFE_INTEGER', () => { - const cl = new CircularLinkedList(); - assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.isFalse(cl.ring, 'should have ring disabled by default'); + [ + CircularLinkedList, + CircularLinkedListMR + ].forEach((Cls) => describe(`${Cls.name}`, () => { + describe('initialization', () => { + it('should create list with default size equal Number.MAX_SAFE_INTEGER', () => { + const cl = new Cls(); + assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.isFalse(cl.ring, 'should have ring disabled by default'); + }); + + it('should throw error on incorrect size', () => { + assert.throws(() => new Cls(-1)); + assert.throws(() => new Cls(0)); + assert.throws(() => new Cls(NaN)); + assert.throws(() => new Cls(false)); + assert.throws(() => new Cls(Number.MAX_VALUE)); + }); + + it('should allow to specify size', () => { + let cl = new Cls(10); + assert.deepStrictEqual(cl.size, 10, 'should use provided size'); + + cl = new Cls(1); + assert.deepStrictEqual(cl.size, 1, 'should use provided size'); + }); }); - it('should throw error on incorrect size', () => { - assert.throws(() => new CircularLinkedList(-1)); - assert.throws(() => new CircularLinkedList(0)); - assert.throws(() => new CircularLinkedList(NaN)); - assert.throws(() => new CircularLinkedList(false)); - assert.throws(() => new CircularLinkedList(Number.MAX_VALUE)); + describe('non-ring', () => { + it('should do basic operations', () => { + const cl = new Cls(); + assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.isFalse(cl.ring, 'should have ring disabled by default'); + + cl.push(0); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 0); + + cl.push(1); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 1); + assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + + cl.push(2); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 2); + + assert.deepStrictEqual(cl.pop(), 0, 'should pop element'); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 2); + + assert.deepStrictEqual(cl.pop(), 1, 'should pop element'); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 2); + assert.deepStrictEqual(cl.fpeak(), 2); + assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + + assert.deepStrictEqual(cl.pop(), 2, 'should pop element'); + assert.lengthOf(cl, 0, 'should be empty'); + assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + + cl.push(0); + cl.push(1); + cl.push(2); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 2); + + cl.erase(); + assert.lengthOf(cl, 0, 'should be empty'); + }); }); - it('should allow to specify size', () => { - let cl = new CircularLinkedList(10); - assert.deepStrictEqual(cl.size, 10, 'should use provided size'); + describe('ring', () => { + it('should be able to work with size = 1', () => { + const cl = new Cls(1); + + cl.push(0); + cl.push(1); + cl.push(2); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 2); + + cl.enableRing(); + assert.isTrue(cl.ring); + + assert.deepStrictEqual(cl.push(3), 0); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 3); + + assert.deepStrictEqual(cl.push(4), 1); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 2); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.pop(), 2); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 3); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.push(5), 3); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 4); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.pop(), 4); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 5); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.push(6), 5); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 6); + assert.deepStrictEqual(cl.fpeak(), 6); + + assert.deepStrictEqual(cl.pop(), 6); + assert.lengthOf(cl, 0, 'should be empty'); + + assert.deepStrictEqual(cl.push(7), undefined); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 7); + assert.deepStrictEqual(cl.fpeak(), 7); + + assert.deepStrictEqual(cl.push(8), 7); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 8); + assert.deepStrictEqual(cl.fpeak(), 8); + + cl.disableRing(); + + assert.deepStrictEqual(cl.push(9), undefined); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 8); + assert.deepStrictEqual(cl.fpeak(), 9); + + cl.erase(); + assert.lengthOf(cl, 0, 'should be empty'); + assert.isFalse(cl.ring, 'should disable ring on erase'); + }); + + it('should be able to work with size = 2', () => { + const cl = new Cls(2); + + cl.push(0); + cl.push(1); + cl.push(2); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 2); + + cl.enableRing(); + assert.isTrue(cl.ring); + + assert.deepStrictEqual(cl.push(3), 0); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 3); + + assert.deepStrictEqual(cl.push(4), 1); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 2); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.pop(), 2); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 3); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.push(5), 3); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 4); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.pop(), 4); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 5); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.push(6), undefined); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 5); + assert.deepStrictEqual(cl.fpeak(), 6); + + assert.deepStrictEqual(cl.pop(), 5); + assert.lengthOf(cl, 1, 'should not be empty'); + + assert.deepStrictEqual(cl.pop(), 6); + assert.lengthOf(cl, 0, 'should be empty'); + + assert.deepStrictEqual(cl.push(7), undefined); + assert.lengthOf(cl, 1, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 7); + assert.deepStrictEqual(cl.fpeak(), 7); + + assert.deepStrictEqual(cl.push(8), undefined); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 7); + assert.deepStrictEqual(cl.fpeak(), 8); + + assert.deepStrictEqual(cl.push(9), 7); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 8); + assert.deepStrictEqual(cl.fpeak(), 9); + + cl.disableRing(); + + assert.deepStrictEqual(cl.push(10), undefined); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 8); + assert.deepStrictEqual(cl.fpeak(), 10); + + cl.erase(); + assert.lengthOf(cl, 0, 'should be empty'); + assert.isFalse(cl.ring, 'should disable ring on erase'); + }); + + it('should be able to work with size = 3', () => { + const cl = new Cls(3); + + cl.push(0); + cl.push(1); + cl.push(2); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 0); + assert.deepStrictEqual(cl.fpeak(), 2); + + cl.enableRing(); + assert.isTrue(cl.ring); + + assert.deepStrictEqual(cl.push(3), 0); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 1); + assert.deepStrictEqual(cl.fpeak(), 3); + + assert.deepStrictEqual(cl.push(4), 1); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 2); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.pop(), 2); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 3); + assert.deepStrictEqual(cl.fpeak(), 4); + + assert.deepStrictEqual(cl.push(5), undefined); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 3); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.pop(), 3); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 4); + assert.deepStrictEqual(cl.fpeak(), 5); + + assert.deepStrictEqual(cl.push(6), undefined); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 4); + assert.deepStrictEqual(cl.fpeak(), 6); + + assert.deepStrictEqual(cl.pop(), 4); + assert.lengthOf(cl, 2, 'should not be empty'); + + assert.deepStrictEqual(cl.pop(), 5); + assert.lengthOf(cl, 1, 'should not be empty'); + + assert.deepStrictEqual(cl.push(7), undefined); + assert.lengthOf(cl, 2, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 6); + assert.deepStrictEqual(cl.fpeak(), 7); + + assert.deepStrictEqual(cl.push(8), undefined); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 6); + assert.deepStrictEqual(cl.fpeak(), 8); + + assert.deepStrictEqual(cl.push(9), 6); + assert.lengthOf(cl, 3, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 7); + assert.deepStrictEqual(cl.fpeak(), 9); + + cl.disableRing(); + + assert.deepStrictEqual(cl.push(10), undefined); + assert.lengthOf(cl, 4, 'should not be empty'); + assert.deepStrictEqual(cl.bpeak(), 7); + assert.deepStrictEqual(cl.fpeak(), 10); + + cl.erase(); + assert.lengthOf(cl, 0, 'should be empty'); + assert.isFalse(cl.ring, 'should disable ring on erase'); + }); + + it('should set new size and restore old one', () => { + const cl = new Cls(1); + cl.push(1); + cl.push(2); + + assert.lengthOf(cl, 2); + + cl.enableRing(); + assert.deepStrictEqual(cl.push(3), 1); + assert.deepStrictEqual(cl.push(4), 2); + assert.deepStrictEqual(cl.pop(), 3); + assert.deepStrictEqual(cl.push(5), 4); + assert.deepStrictEqual(cl.push(6), 5); + + assert.deepStrictEqual(cl.size, 1); + + cl.enableRing(3); + assert.deepStrictEqual(cl.size, 1, 'should ignore new size while enabled'); + + cl.disableRing(); + cl.enableRing(2); + assert.deepStrictEqual(cl.size, 2, 'should set new size while enabled'); + + assert.deepStrictEqual(cl.push(7), undefined); + assert.deepStrictEqual(cl.push(8), 6); + assert.deepStrictEqual(cl.push(9), 7); + assert.lengthOf(cl, 2); + + cl.disableRing(); + assert.isFalse(cl.ring); + // should have no effect + cl.disableRing(); + assert.isFalse(cl.ring); + + assert.deepStrictEqual(cl.size, 1, 'should restore old value'); + + cl.enableRing(); + assert.deepStrictEqual(cl.push(10), 8); + assert.deepStrictEqual(cl.pop(), 9); + assert.deepStrictEqual(cl.push(11), 10); + assert.deepStrictEqual(cl.push(12), 11); + + cl.disableRing(); + cl.enableRing(3); + assert.deepStrictEqual(cl.size, 3, 'should set new value'); + + cl.disableRing(false); + assert.deepStrictEqual(cl.size, 3, 'should not restore prev value'); + + cl.enableRing(5); + assert.deepStrictEqual(cl.size, 5, 'should set new value'); - cl = new CircularLinkedList(1); - assert.deepStrictEqual(cl.size, 1, 'should use provided size'); + cl.disableRing(true); + assert.deepStrictEqual(cl.size, 3, 'should restore old value'); + }); + + it('should be able to enable/disable ring on empty list', () => { + const cl = new Cls(3); + cl.enableRing(); + + assert.deepStrictEqual(cl.size, 3); + assert.isTrue(cl.ring); + assert.lengthOf(cl, 0); + + cl.push(1); cl.push(2); cl.push(3); + assert.deepStrictEqual(cl.push(4), 1); + assert.lengthOf(cl, 3); + + assert.deepStrictEqual(cl.pop(), 2); + assert.deepStrictEqual(cl.pop(), 3); + assert.deepStrictEqual(cl.pop(), 4); + + assert.lengthOf(cl, 0); + cl.disableRing(); + + cl.push(5); + assert.lengthOf(cl, 1); + }); }); - }); - describe('non-ring', () => { - it('should do basic operations', () => { - const cl = new CircularLinkedList(); - assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.isFalse(cl.ring, 'should have ring disabled by default'); + it('should provie access to nodes', () => { + const cl = new Cls(); - cl.push(0); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 0); + assert.isNull(cl.back); + assert.isNull(cl.front); cl.push(1); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 1); - assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); + assert.deepStrictEqual(cl.back, { + next: null, + value: 1 + }); + assert.deepStrictEqual(cl.front, { + next: null, + value: 1 + }); cl.push(2); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 2); - - assert.deepStrictEqual(cl.pop(), 0, 'should pop element'); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 2); - - assert.deepStrictEqual(cl.pop(), 1, 'should pop element'); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 2); - assert.deepStrictEqual(cl.fpeak(), 2); - assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); - - assert.deepStrictEqual(cl.pop(), 2, 'should pop element'); - assert.lengthOf(cl, 0, 'should be empty'); - assert.deepStrictEqual(cl.size, Number.MAX_SAFE_INTEGER, 'should use default size value'); - - cl.push(0); - cl.push(1); - cl.push(2); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 2); + assert.deepStrictEqual(cl.back.value, 1); + assert.deepStrictEqual(cl.front.value, 2); - cl.erase(); - assert.lengthOf(cl, 0, 'should be empty'); + cl.push(3); + assert.deepStrictEqual(cl.back.value, 1); + assert.deepStrictEqual(cl.front.value, 3); }); - }); + })); - describe('ring', () => { - it('should be able to work with size = 1', () => { - const cl = new CircularLinkedList(1); + describe('CircularLinkedListMR', () => { + it('should create and destroy readers', () => { + const cl = new CircularLinkedListMR(); + assert.deepStrictEqual(cl.readers, 0, 'should have no readers after initialization'); - cl.push(0); - cl.push(1); - cl.push(2); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 2); + const r1 = cl.reader(); + assert.deepStrictEqual(cl.readers, 1); - cl.enableRing(); - assert.isTrue(cl.ring); + const r2 = cl.reader(); + assert.deepStrictEqual(cl.readers, 2); - assert.deepStrictEqual(cl.push(3), 0); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 3); + r2.destroy(); + assert.deepStrictEqual(cl.readers, 1); - assert.deepStrictEqual(cl.push(4), 1); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 2); - assert.deepStrictEqual(cl.fpeak(), 4); + r1.destroy(); + assert.deepStrictEqual(cl.readers, 0); + }); - assert.deepStrictEqual(cl.pop(), 2); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 3); - assert.deepStrictEqual(cl.fpeak(), 4); + it('should keep readers after calling .erase()', () => { + const cl = new CircularLinkedListMR(10); + const r1 = cl.reader(); - assert.deepStrictEqual(cl.push(5), 3); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 4); - assert.deepStrictEqual(cl.fpeak(), 5); + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 1); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 2); - assert.deepStrictEqual(cl.pop(), 4); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 5); - assert.deepStrictEqual(cl.fpeak(), 5); + cl.erase(true); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.throws(() => r1.pop()); - assert.deepStrictEqual(cl.push(6), 5); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 6); - assert.deepStrictEqual(cl.fpeak(), 6); + cl.push(1); cl.push(2); cl.push(3); - assert.deepStrictEqual(cl.pop(), 6); - assert.lengthOf(cl, 0, 'should be empty'); + assert.lengthOf(cl, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(7), undefined); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 7); - assert.deepStrictEqual(cl.fpeak(), 7); + const r2 = cl.reader(); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r2.pop(), 2); + assert.isTrue(r2.hasData()); + assert.lengthOf(cl, 2); + + cl.erase(true); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); + assert.throws(() => r1.pop()); + assert.throws(() => r2.pop()); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r2.pop(), 1); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r1.pop(), 1); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 2); + }); - assert.deepStrictEqual(cl.push(8), 7); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 8); - assert.deepStrictEqual(cl.fpeak(), 8); + it('should destroy readers after calling .erase()', () => { + const cl = new CircularLinkedListMR(10); + let r1 = cl.reader(); - cl.disableRing(); + cl.push(1); cl.push(2); cl.push(3); - assert.deepStrictEqual(cl.push(9), undefined); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 8); - assert.deepStrictEqual(cl.fpeak(), 9); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 3); + assert.deepStrictEqual(r1.pop(), 1); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 2); cl.erase(); - assert.lengthOf(cl, 0, 'should be empty'); - assert.isFalse(cl.ring, 'should disable ring on erase'); - }); + assert.lengthOf(cl, 0); + assert.throws(() => r1.hasData()); + assert.throws(() => r1.pop()); - it('should be able to work with size = 2', () => { - const cl = new CircularLinkedList(2); + cl.push(1); cl.push(2); cl.push(3); - cl.push(0); - cl.push(1); - cl.push(2); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 2); + assert.throws(() => r1.pop()); - cl.enableRing(); - assert.isTrue(cl.ring); + r1 = cl.reader(); + const r2 = cl.reader(); + assert.lengthOf(cl, 3); + assert.isTrue(r2.hasData()); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r2.pop(), 1); + assert.lengthOf(cl, 3); + assert.isTrue(r2.hasData()); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(cl, 2); + assert.isTrue(r2.hasData()); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(3), 0); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 3); + cl.erase(false); + cl.push(1); cl.push(2); cl.push(3); + + assert.throws(() => r1.hasData()); + assert.throws(() => r1.pop()); + assert.throws(() => r2.hasData()); + assert.throws(() => r2.pop()); + }); - assert.deepStrictEqual(cl.push(4), 1); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 2); - assert.deepStrictEqual(cl.fpeak(), 4); + it('should re-sync readers after calling array.pop()', () => { + const cl = new CircularLinkedListMR(10); + const r1 = cl.reader(); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 1); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 2); assert.deepStrictEqual(cl.pop(), 2); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 3); - assert.deepStrictEqual(cl.fpeak(), 4); + assert.isTrue(r1.hasData()); + assert.lengthOf(cl, 1); - assert.deepStrictEqual(cl.push(5), 3); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 4); - assert.deepStrictEqual(cl.fpeak(), 5); + const r2 = cl.reader(); + cl.push(4); cl.push(5); cl.push(6); + assert.lengthOf(cl, 4); - assert.deepStrictEqual(cl.pop(), 4); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 5); - assert.deepStrictEqual(cl.fpeak(), 5); + assert.deepStrictEqual(cl.pop(), 3); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.push(6), undefined); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 5); - assert.deepStrictEqual(cl.fpeak(), 6); + assert.deepStrictEqual(r1.pop(), 4); + assert.deepStrictEqual(r2.pop(), 4); + }); - assert.deepStrictEqual(cl.pop(), 5); - assert.lengthOf(cl, 1, 'should not be empty'); + it('should re-sync readers after calling array.push()', () => { + const cl = new CircularLinkedListMR(3); + const r1 = cl.reader(); - assert.deepStrictEqual(cl.pop(), 6); - assert.lengthOf(cl, 0, 'should be empty'); + cl.push(1); cl.push(2); cl.push(3); + cl.enableRing(); - assert.deepStrictEqual(cl.push(7), undefined); - assert.lengthOf(cl, 1, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 7); - assert.deepStrictEqual(cl.fpeak(), 7); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(cl, 2); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(8), undefined); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 7); - assert.deepStrictEqual(cl.fpeak(), 8); + cl.push(4); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(9), 7); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 8); - assert.deepStrictEqual(cl.fpeak(), 9); + cl.push(5); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); - cl.disableRing(); + const r2 = cl.reader(); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r2.pop(), 3); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.push(10), undefined); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 8); - assert.deepStrictEqual(cl.fpeak(), 10); + cl.push(6); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.erase(); - assert.lengthOf(cl, 0, 'should be empty'); - assert.isFalse(cl.ring, 'should disable ring on erase'); - }); + cl.push(7); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - it('should be able to work with size = 3', () => { - const cl = new CircularLinkedList(3); + assert.deepStrictEqual(r1.pop(), 5); + assert.deepStrictEqual(r2.pop(), 5); + }); - cl.push(0); - cl.push(1); - cl.push(2); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 0); - assert.deepStrictEqual(cl.fpeak(), 2); + it('should remove extra nodes on reader.destroy()', () => { + const cl = new CircularLinkedListMR(4); + let r1 = cl.reader(); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); cl.enableRing(); - assert.isTrue(cl.ring); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(3), 0); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 1); - assert.deepStrictEqual(cl.fpeak(), 3); + r1.pop(); r1.pop(); + assert.lengthOf(cl, 2); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(4), 1); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 2); - assert.deepStrictEqual(cl.fpeak(), 4); + r1.destroy(); + assert.throws(() => r1.hasData()); + assert.lengthOf(cl, 2); - assert.deepStrictEqual(cl.pop(), 2); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 3); - assert.deepStrictEqual(cl.fpeak(), 4); + r1 = cl.reader(); + let r2 = cl.reader(); + cl.push(5); cl.push(6); - assert.deepStrictEqual(cl.push(5), undefined); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 3); - assert.deepStrictEqual(cl.fpeak(), 5); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.pop(), 3); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 4); - assert.deepStrictEqual(cl.fpeak(), 5); + r2.pop(); r2.pop(); r2.pop(); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + r1.destroy(); - assert.deepStrictEqual(cl.push(6), undefined); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 4); - assert.deepStrictEqual(cl.fpeak(), 6); + assert.lengthOf(cl, 1); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.pop(), 4); - assert.lengthOf(cl, 2, 'should not be empty'); + cl.push(7); cl.push(8); cl.push(9); + r1 = cl.reader(); - assert.deepStrictEqual(cl.pop(), 5); - assert.lengthOf(cl, 1, 'should not be empty'); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.push(7), undefined); - assert.lengthOf(cl, 2, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 6); - assert.deepStrictEqual(cl.fpeak(), 7); + r2.pop(); r2.pop(); r2.pop(); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.push(8), undefined); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 6); - assert.deepStrictEqual(cl.fpeak(), 8); + r2.destroy(); + assert.throws(() => r2.hasData()); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); - assert.deepStrictEqual(cl.push(9), 6); - assert.lengthOf(cl, 3, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 7); - assert.deepStrictEqual(cl.fpeak(), 9); + r2 = cl.reader(); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.disableRing(); + r2.pop(); r2.pop(); r2.pop(); r2.pop(); + assert.isFalse(r2.hasData()); + r1.destroy(); - assert.deepStrictEqual(cl.push(10), undefined); - assert.lengthOf(cl, 4, 'should not be empty'); - assert.deepStrictEqual(cl.bpeak(), 7); - assert.deepStrictEqual(cl.fpeak(), 10); + assert.lengthOf(cl, 0); + assert.isFalse(r2.hasData()); + assert.throws(() => r1.hasData()); - cl.erase(); - assert.lengthOf(cl, 0, 'should be empty'); - assert.isFalse(cl.ring, 'should disable ring on erase'); + r1 = cl.reader(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(1); cl.push(2); cl.push(3); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + r2.pop(); r2.pop(); r2.pop(); + r1.pop(); + assert.lengthOf(cl, 2); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(4); cl.push(5); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + r1.destroy(); + assert.throws(() => r1.hasData()); + assert.lengthOf(cl, 2); + assert.isTrue(r2.hasData()); + + r1 = cl.reader(); + assert.lengthOf(cl, 2); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + cl.push(6); cl.push(7); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + r2.pop(); r2.pop(); r2.pop(); r2.pop(); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + r1.pop(); r1.pop(); r1.pop(); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + r1.destroy(); + assert.throws(() => r1.hasData()); + assert.lengthOf(cl, 0); + assert.isFalse(r2.hasData()); + + r1 = cl.reader(); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + r2.pop(); r2.pop(); r2.pop(); r2.pop(); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + assert.throws(() => r2.pop()); + + r1.pop(); r1.pop(); r1.pop(); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + r2.destroy(); + assert.throws(() => r2.hasData()); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + + r1.pop(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); }); - it('should set new size and restore old one', () => { - const cl = new CircularLinkedList(1); + it('should update reader when new data pushed/poped', () => { + const cl = new CircularLinkedListMR(4); + const r1 = cl.reader(); + + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + cl.push(1); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + + r1.pop(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + cl.push(2); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); - assert.lengthOf(cl, 2); + const r2 = cl.reader(); + assert.isTrue(r2.hasData()); - cl.enableRing(); - assert.deepStrictEqual(cl.push(3), 1); - assert.deepStrictEqual(cl.push(4), 2); - assert.deepStrictEqual(cl.pop(), 3); - assert.deepStrictEqual(cl.push(5), 4); - assert.deepStrictEqual(cl.push(6), 5); + cl.push(3); cl.push(4); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.size, 1); + r2.pop(); r2.pop(); r2.pop(); - cl.enableRing(3); - assert.deepStrictEqual(cl.size, 1, 'should ignore new size while enabled'); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + assert.throws(() => r2.pop()); - cl.disableRing(); - cl.enableRing(2); - assert.deepStrictEqual(cl.size, 2, 'should set new size while enabled'); + r1.pop(); r1.pop(); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); - assert.deepStrictEqual(cl.push(7), undefined); - assert.deepStrictEqual(cl.push(8), 6); - assert.deepStrictEqual(cl.push(9), 7); - assert.lengthOf(cl, 2); + cl.push(5); cl.push(6); cl.push(7); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.disableRing(); - assert.isFalse(cl.ring); - // should have no effect - cl.disableRing(); - assert.isFalse(cl.ring); + r1.pop(); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - assert.deepStrictEqual(cl.size, 1, 'should restore old value'); + r1.pop(); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.enableRing(); - assert.deepStrictEqual(cl.push(10), 8); - assert.deepStrictEqual(cl.pop(), 9); - assert.deepStrictEqual(cl.push(11), 10); - assert.deepStrictEqual(cl.push(12), 11); + r1.pop(); + assert.lengthOf(cl, 3); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.disableRing(); - cl.enableRing(3); - assert.deepStrictEqual(cl.size, 3, 'should set new value'); + r1.pop(); + assert.lengthOf(cl, 3); + assert.isFalse(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.disableRing(false); - assert.deepStrictEqual(cl.size, 3, 'should not restore prev value'); + r2.pop(); + assert.lengthOf(cl, 2); + assert.isFalse(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.enableRing(5); - assert.deepStrictEqual(cl.size, 5, 'should set new value'); + r2.pop(); + assert.lengthOf(cl, 1); + assert.isFalse(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.disableRing(true); - assert.deepStrictEqual(cl.size, 3, 'should restore old value'); + r2.pop(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); }); - it('should be able to enable/disable ring on empty list', () => { - const cl = new CircularLinkedList(3); + it('edge case: size = 1', () => { + const cl = new CircularLinkedListMR(1); + cl.push(1); cl.enableRing(); - assert.deepStrictEqual(cl.size, 3); - assert.isTrue(cl.ring); + const r1 = cl.reader(); + + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + + assert.deepStrictEqual(r1.pop(), 1); assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); - cl.push(1); cl.push(2); cl.push(3); - assert.deepStrictEqual(cl.push(4), 1); - assert.lengthOf(cl, 3); + cl.push(1); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + cl.push(2); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 2); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); - assert.deepStrictEqual(cl.pop(), 2); - assert.deepStrictEqual(cl.pop(), 3); - assert.deepStrictEqual(cl.pop(), 4); + const r2 = cl.reader(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(3); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + cl.pop(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(4); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r2.pop(), 4); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + assert.deepStrictEqual(r1.pop(), 4); assert.lengthOf(cl, 0); - cl.disableRing(); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); cl.push(5); assert.lengthOf(cl, 1); - }); - }); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + assert.deepStrictEqual(r2.pop(), 5); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); - it('should provie access to nodes', () => { - const cl = new CircularLinkedList(); + cl.pop(); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); - assert.isNull(cl.back); - assert.isNull(cl.front); + cl.push(6); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); - cl.push(1); - assert.deepStrictEqual(cl.back, { - next: null, - value: 1 - }); - assert.deepStrictEqual(cl.front, { - next: null, - value: 1 + assert.deepStrictEqual(r2.pop(), 6); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(7); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + assert.deepStrictEqual(r2.pop(), 7); + assert.lengthOf(cl, 1); + assert.isTrue(r1.hasData()); + assert.isFalse(r2.hasData()); + + assert.deepStrictEqual(r1.pop(), 7); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); }); - cl.push(2); - assert.deepStrictEqual(cl.back.value, 1); - assert.deepStrictEqual(cl.front.value, 2); + it('reader.needCopy()', () => { + const cl = new CircularLinkedListMR(4); + const r1 = cl.reader(); + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + + while (r1.hasData()) { + assert.isFalse(r1.needCopy()); + r1.pop(); + } + + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + const r2 = cl.reader(); + + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + while (r1.hasData()) { + assert.isTrue(r1.needCopy()); + r1.pop(); + } + while (r2.hasData()) { + assert.isFalse(r2.needCopy()); + assert.isFalse(r1.hasData()); + r2.pop(); + assert.isFalse(r1.hasData()); + } - cl.push(3); - assert.deepStrictEqual(cl.back.value, 1); - assert.deepStrictEqual(cl.front.value, 3); + assert.lengthOf(cl, 0); + assert.isFalse(r1.hasData()); + assert.isFalse(r2.hasData()); + + cl.push(1); cl.push(2); cl.push(3); cl.push(4); + assert.lengthOf(cl, 4); + assert.isTrue(r1.hasData()); + assert.isTrue(r2.hasData()); + + assert.isTrue(r1.needCopy()); + assert.isTrue(r2.needCopy()); + + r1.pop(); r1.pop(); + assert.isTrue(r1.needCopy()); + assert.isFalse(r2.needCopy()); + + r2.pop(); + assert.isFalse(r2.needCopy()); + r2.pop(); + assert.isTrue(r2.needCopy()); + r2.pop(); + assert.isTrue(r2.needCopy()); + + r2.destroy(); + assert.throws(() => r2.hasData()); + assert.isFalse(r1.needCopy()); + }); + + it('should support multiple readers', () => { + const cl = new CircularLinkedListMR(20); + + for (let i = 0; i < 20; i += 1) { + cl.push(i); + } + + assert.lengthOf(cl, 20); + + const r1 = cl.reader(); + assert.isTrue(r1.hasData()); + assert.deepStrictEqual(r1.pop(), 0); + assert.deepStrictEqual(r1.pop(), 1); + assert.lengthOf(cl, 18); + + const r2 = cl.reader(); + assert.isTrue(r2.hasData()); + assert.deepStrictEqual(r2.pop(), 2); + assert.deepStrictEqual(r2.pop(), 3); + assert.lengthOf(cl, 18); + assert.deepStrictEqual(r1.pop(), 2); + assert.deepStrictEqual(r1.pop(), 3); + assert.lengthOf(cl, 16); + + const r3 = cl.reader(); + assert.deepStrictEqual(r3.pop(), 4); + assert.deepStrictEqual(r3.pop(), 5); + assert.lengthOf(cl, 16); + assert.deepStrictEqual(r1.pop(), 4); + assert.deepStrictEqual(r1.pop(), 5); + assert.lengthOf(cl, 16); + + const r4 = cl.reader(); + assert.deepStrictEqual(r4.pop(), 4); + assert.deepStrictEqual(r4.pop(), 5); + assert.deepStrictEqual(r4.pop(), 6); + assert.lengthOf(cl, 16); + + const r5 = cl.reader(); + assert.deepStrictEqual(r5.pop(), 4); + assert.deepStrictEqual(r5.pop(), 5); + assert.deepStrictEqual(r5.pop(), 6); + assert.deepStrictEqual(r5.pop(), 7); + assert.lengthOf(cl, 16); + + const r6 = cl.reader(); + assert.deepStrictEqual(r6.pop(), 4); + assert.deepStrictEqual(r6.pop(), 5); + assert.deepStrictEqual(r6.pop(), 6); + assert.deepStrictEqual(r6.pop(), 7); + assert.lengthOf(cl, 16); + + assert.isFalse(r2.needCopy()); + assert.deepStrictEqual(r2.pop(), 4); + assert.isFalse(r2.needCopy()); + assert.deepStrictEqual(r2.pop(), 5); + assert.isTrue(r2.needCopy()); + assert.lengthOf(cl, 14); + + while (r1.hasData()) { + r1.pop(); + } + assert.isFalse(r1.hasData()); + assert.lengthOf(cl, 14); + + r1.destroy(); + assert.lengthOf(cl, 14); + + r2.destroy(); + assert.lengthOf(cl, 14); + + assert.deepStrictEqual(r6.fpeak(), 19); + assert.deepStrictEqual(r6.bpeak(), 8); + + assert.deepStrictEqual(r4.fpeak(), 19); + assert.deepStrictEqual(r4.bpeak(), 7); + + assert.deepStrictEqual(r3.fpeak(), 19); + assert.deepStrictEqual(r3.bpeak(), 6); + }); }); }); diff --git a/versions.json b/versions.json index 1a41b500..37f8894f 100644 --- a/versions.json +++ b/versions.json @@ -1,5 +1,5 @@ { - "versionMetaTimestamp": 1540928503, + "versionMetaTimestamp": 1540928506, "latestVersion": { "name": "1.34 (non-LTS)", "url": "/products/extensions/f5-telemetry-streaming/latest/"